From ed746e5e22193ac9959ca3ba63bf42da9f9bf00a Mon Sep 17 00:00:00 2001 From: xuexihuang <1339326187@qq.com> Date: Wed, 4 Oct 2023 01:11:57 +0800 Subject: [PATCH] add kafka&minio charts --- manifest/helm-charts/infra/kafka/.helmignore | 21 + manifest/helm-charts/infra/kafka/Chart.lock | 9 + manifest/helm-charts/infra/kafka/Chart.yaml | 44 + manifest/helm-charts/infra/kafka/README.md | 1433 ++++++++++ .../infra/kafka/charts/common/.helmignore | 22 + .../infra/kafka/charts/common/Chart.yaml | 23 + .../infra/kafka/charts/common/README.md | 235 ++ .../charts/common/templates/_affinities.tpl | 139 + .../charts/common/templates/_capabilities.tpl | 185 ++ .../kafka/charts/common/templates/_errors.tpl | 28 + .../kafka/charts/common/templates/_images.tpl | 101 + .../charts/common/templates/_ingress.tpl | 73 + .../kafka/charts/common/templates/_labels.tpl | 40 + .../kafka/charts/common/templates/_names.tpl | 71 + .../charts/common/templates/_secrets.tpl | 172 ++ .../charts/common/templates/_storage.tpl | 28 + .../charts/common/templates/_tplvalues.tpl | 38 + .../kafka/charts/common/templates/_utils.tpl | 67 + .../charts/common/templates/_warnings.tpl | 19 + .../templates/validations/_cassandra.tpl | 77 + .../common/templates/validations/_mariadb.tpl | 108 + .../common/templates/validations/_mongodb.tpl | 113 + .../common/templates/validations/_mysql.tpl | 108 + .../templates/validations/_postgresql.tpl | 134 + .../common/templates/validations/_redis.tpl | 81 + .../templates/validations/_validations.tpl | 51 + .../kafka/charts/common/values.schema.json | 11 + .../infra/kafka/charts/common/values.yaml | 8 + .../infra/kafka/charts/zookeeper/.helmignore | 21 + .../infra/kafka/charts/zookeeper/Chart.lock | 6 + .../infra/kafka/charts/zookeeper/Chart.yaml | 29 + .../infra/kafka/charts/zookeeper/README.md | 528 ++++ .../zookeeper/charts/common/.helmignore | 22 + .../charts/zookeeper/charts/common/Chart.yaml | 23 + .../charts/zookeeper/charts/common/README.md | 235 ++ .../charts/common/templates/_affinities.tpl | 139 + .../charts/common/templates/_capabilities.tpl | 185 ++ .../charts/common/templates/_errors.tpl | 28 + .../charts/common/templates/_images.tpl | 85 + .../charts/common/templates/_ingress.tpl | 73 + .../charts/common/templates/_labels.tpl | 55 + .../charts/common/templates/_names.tpl | 71 + .../charts/common/templates/_secrets.tpl | 172 ++ .../charts/common/templates/_storage.tpl | 28 + .../charts/common/templates/_tplvalues.tpl | 38 + .../charts/common/templates/_utils.tpl | 67 + .../charts/common/templates/_warnings.tpl | 19 + .../templates/validations/_cassandra.tpl | 77 + .../common/templates/validations/_mariadb.tpl | 108 + .../common/templates/validations/_mongodb.tpl | 113 + .../common/templates/validations/_mysql.tpl | 108 + .../templates/validations/_postgresql.tpl | 134 + .../common/templates/validations/_redis.tpl | 81 + .../templates/validations/_validations.tpl | 51 + .../zookeeper/charts/common/values.yaml | 8 + .../charts/zookeeper/templates/NOTES.txt | 76 + .../charts/zookeeper/templates/_helpers.tpl | 366 +++ .../charts/zookeeper/templates/configmap.yaml | 20 + .../zookeeper/templates/extra-list.yaml | 9 + .../zookeeper/templates/metrics-svc.yaml | 27 + .../zookeeper/templates/networkpolicy.yaml | 44 + .../kafka/charts/zookeeper/templates/pdb.yaml | 29 + .../zookeeper/templates/prometheusrule.yaml | 25 + .../templates/scripts-configmap.yaml | 104 + .../charts/zookeeper/templates/secrets.yaml | 70 + .../zookeeper/templates/serviceaccount.yaml | 20 + .../zookeeper/templates/servicemonitor.yaml | 51 + .../zookeeper/templates/statefulset.yaml | 532 ++++ .../zookeeper/templates/svc-headless.yaml | 40 + .../kafka/charts/zookeeper/templates/svc.yaml | 69 + .../zookeeper/templates/tls-secrets.yaml | 56 + .../infra/kafka/charts/zookeeper/values.yaml | 882 +++++++ .../infra/kafka/templates/NOTES.txt | 317 +++ .../infra/kafka/templates/_helpers.tpl | 1162 ++++++++ .../templates/broker/config-secrets.yaml | 25 + .../kafka/templates/broker/configmap.yaml | 47 + .../infra/kafka/templates/broker/pdb.yaml | 31 + .../kafka/templates/broker/statefulset.yaml | 455 ++++ .../templates/broker/svc-external-access.yaml | 63 + .../kafka/templates/broker/svc-headless.yaml | 38 + .../controller-eligible/config-secrets.yaml | 25 + .../controller-eligible/configmap.yaml | 46 + .../templates/controller-eligible/pdb.yaml | 31 + .../controller-eligible/statefulset.yaml | 448 ++++ .../svc-external-access.yaml | 65 + .../controller-eligible/svc-headless.yaml | 46 + .../infra/kafka/templates/extra-list.yaml | 9 + .../kafka/templates/log4j-configmap.yaml | 20 + .../kafka/templates/metrics/deployment.yaml | 174 ++ .../templates/metrics/jmx-configmap.yaml | 70 + .../templates/metrics/jmx-servicemonitor.yaml | 49 + .../kafka/templates/metrics/jmx-svc.yaml | 31 + .../templates/metrics/prometheusrule.yaml | 21 + .../templates/metrics/serviceaccount.yaml | 20 + .../templates/metrics/servicemonitor.yaml | 50 + .../infra/kafka/templates/metrics/svc.yaml | 34 + .../network-policy/networkpolicy-egress.yaml | 23 + .../network-policy/networkpolicy-ingress.yaml | 53 + .../kafka/templates/provisioning/job.yaml | 267 ++ .../provisioning/serviceaccount.yaml | 17 + .../templates/provisioning/tls-secret.yaml | 21 + .../infra/kafka/templates/rbac/role.yaml | 26 + .../kafka/templates/rbac/rolebinding.yaml | 25 + .../kafka/templates/rbac/serviceaccount.yaml | 19 + .../kafka/templates/scripts-configmap.yaml | 367 +++ .../infra/kafka/templates/secrets.yaml | 121 + .../infra/kafka/templates/svc.yaml | 63 + .../infra/kafka/templates/tls-secret.yaml | 82 + manifest/helm-charts/infra/kafka/values.yaml | 2352 +++++++++++++++++ manifest/helm-charts/infra/minio/.helmignore | 21 + manifest/helm-charts/infra/minio/Chart.lock | 6 + manifest/helm-charts/infra/minio/Chart.yaml | 36 + manifest/helm-charts/infra/minio/README.md | 531 ++++ .../infra/minio/charts/common/.helmignore | 22 + .../infra/minio/charts/common/Chart.yaml | 23 + .../infra/minio/charts/common/README.md | 235 ++ .../charts/common/templates/_affinities.tpl | 139 + .../charts/common/templates/_capabilities.tpl | 185 ++ .../minio/charts/common/templates/_errors.tpl | 28 + .../minio/charts/common/templates/_images.tpl | 101 + .../charts/common/templates/_ingress.tpl | 73 + .../minio/charts/common/templates/_labels.tpl | 40 + .../minio/charts/common/templates/_names.tpl | 71 + .../charts/common/templates/_secrets.tpl | 172 ++ .../charts/common/templates/_storage.tpl | 28 + .../charts/common/templates/_tplvalues.tpl | 38 + .../minio/charts/common/templates/_utils.tpl | 67 + .../charts/common/templates/_warnings.tpl | 19 + .../templates/validations/_cassandra.tpl | 77 + .../common/templates/validations/_mariadb.tpl | 108 + .../common/templates/validations/_mongodb.tpl | 113 + .../common/templates/validations/_mysql.tpl | 108 + .../templates/validations/_postgresql.tpl | 134 + .../common/templates/validations/_redis.tpl | 81 + .../templates/validations/_validations.tpl | 51 + .../minio/charts/common/values.schema.json | 11 + .../infra/minio/charts/common/values.yaml | 8 + .../infra/minio/templates/NOTES.txt | 76 + .../infra/minio/templates/_helpers.tpl | 244 ++ .../infra/minio/templates/api-ingress.yaml | 60 + .../templates/distributed/headless-svc.yaml | 30 + .../minio/templates/distributed/pdb.yaml | 26 + .../templates/distributed/statefulset.yaml | 338 +++ .../infra/minio/templates/extra-list.yaml | 9 + .../infra/minio/templates/ingress.yaml | 60 + .../infra/minio/templates/networkpolicy.yaml | 36 + .../infra/minio/templates/prometheusrule.yaml | 24 + .../templates/provisioning-configmap.yaml | 75 + .../minio/templates/provisioning-job.yaml | 324 +++ .../infra/minio/templates/pvc.yaml | 26 + .../infra/minio/templates/secrets.yaml | 20 + .../infra/minio/templates/service.yaml | 51 + .../infra/minio/templates/serviceaccount.yaml | 20 + .../infra/minio/templates/servicemonitor.yaml | 53 + .../templates/standalone/deployment.yaml | 255 ++ .../infra/minio/templates/tls-secrets.yaml | 69 + manifest/helm-charts/infra/minio/values.yaml | 1092 ++++++++ 157 files changed, 20490 insertions(+) create mode 100644 manifest/helm-charts/infra/kafka/.helmignore create mode 100644 manifest/helm-charts/infra/kafka/Chart.lock create mode 100644 manifest/helm-charts/infra/kafka/Chart.yaml create mode 100644 manifest/helm-charts/infra/kafka/README.md create mode 100644 manifest/helm-charts/infra/kafka/charts/common/.helmignore create mode 100644 manifest/helm-charts/infra/kafka/charts/common/Chart.yaml create mode 100644 manifest/helm-charts/infra/kafka/charts/common/README.md create mode 100644 manifest/helm-charts/infra/kafka/charts/common/templates/_affinities.tpl create mode 100644 manifest/helm-charts/infra/kafka/charts/common/templates/_capabilities.tpl create mode 100644 manifest/helm-charts/infra/kafka/charts/common/templates/_errors.tpl create mode 100644 manifest/helm-charts/infra/kafka/charts/common/templates/_images.tpl create mode 100644 manifest/helm-charts/infra/kafka/charts/common/templates/_ingress.tpl create mode 100644 manifest/helm-charts/infra/kafka/charts/common/templates/_labels.tpl create mode 100644 manifest/helm-charts/infra/kafka/charts/common/templates/_names.tpl create mode 100644 manifest/helm-charts/infra/kafka/charts/common/templates/_secrets.tpl create mode 100644 manifest/helm-charts/infra/kafka/charts/common/templates/_storage.tpl create mode 100644 manifest/helm-charts/infra/kafka/charts/common/templates/_tplvalues.tpl create mode 100644 manifest/helm-charts/infra/kafka/charts/common/templates/_utils.tpl create mode 100644 manifest/helm-charts/infra/kafka/charts/common/templates/_warnings.tpl create mode 100644 manifest/helm-charts/infra/kafka/charts/common/templates/validations/_cassandra.tpl create mode 100644 manifest/helm-charts/infra/kafka/charts/common/templates/validations/_mariadb.tpl create mode 100644 manifest/helm-charts/infra/kafka/charts/common/templates/validations/_mongodb.tpl create mode 100644 manifest/helm-charts/infra/kafka/charts/common/templates/validations/_mysql.tpl create mode 100644 manifest/helm-charts/infra/kafka/charts/common/templates/validations/_postgresql.tpl create mode 100644 manifest/helm-charts/infra/kafka/charts/common/templates/validations/_redis.tpl create mode 100644 manifest/helm-charts/infra/kafka/charts/common/templates/validations/_validations.tpl create mode 100644 manifest/helm-charts/infra/kafka/charts/common/values.schema.json create mode 100644 manifest/helm-charts/infra/kafka/charts/common/values.yaml create mode 100644 manifest/helm-charts/infra/kafka/charts/zookeeper/.helmignore create mode 100644 manifest/helm-charts/infra/kafka/charts/zookeeper/Chart.lock create mode 100644 manifest/helm-charts/infra/kafka/charts/zookeeper/Chart.yaml create mode 100644 manifest/helm-charts/infra/kafka/charts/zookeeper/README.md create mode 100644 manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/.helmignore create mode 100644 manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/Chart.yaml create mode 100644 manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/README.md create mode 100644 manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/_affinities.tpl create mode 100644 manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/_capabilities.tpl create mode 100644 manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/_errors.tpl create mode 100644 manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/_images.tpl create mode 100644 manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/_ingress.tpl create mode 100644 manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/_labels.tpl create mode 100644 manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/_names.tpl create mode 100644 manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/_secrets.tpl create mode 100644 manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/_storage.tpl create mode 100644 manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/_tplvalues.tpl create mode 100644 manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/_utils.tpl create mode 100644 manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/_warnings.tpl create mode 100644 manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/validations/_cassandra.tpl create mode 100644 manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/validations/_mariadb.tpl create mode 100644 manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/validations/_mongodb.tpl create mode 100644 manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/validations/_mysql.tpl create mode 100644 manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/validations/_postgresql.tpl create mode 100644 manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/validations/_redis.tpl create mode 100644 manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/validations/_validations.tpl create mode 100644 manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/values.yaml create mode 100644 manifest/helm-charts/infra/kafka/charts/zookeeper/templates/NOTES.txt create mode 100644 manifest/helm-charts/infra/kafka/charts/zookeeper/templates/_helpers.tpl create mode 100644 manifest/helm-charts/infra/kafka/charts/zookeeper/templates/configmap.yaml create mode 100644 manifest/helm-charts/infra/kafka/charts/zookeeper/templates/extra-list.yaml create mode 100644 manifest/helm-charts/infra/kafka/charts/zookeeper/templates/metrics-svc.yaml create mode 100644 manifest/helm-charts/infra/kafka/charts/zookeeper/templates/networkpolicy.yaml create mode 100644 manifest/helm-charts/infra/kafka/charts/zookeeper/templates/pdb.yaml create mode 100644 manifest/helm-charts/infra/kafka/charts/zookeeper/templates/prometheusrule.yaml create mode 100644 manifest/helm-charts/infra/kafka/charts/zookeeper/templates/scripts-configmap.yaml create mode 100644 manifest/helm-charts/infra/kafka/charts/zookeeper/templates/secrets.yaml create mode 100644 manifest/helm-charts/infra/kafka/charts/zookeeper/templates/serviceaccount.yaml create mode 100644 manifest/helm-charts/infra/kafka/charts/zookeeper/templates/servicemonitor.yaml create mode 100644 manifest/helm-charts/infra/kafka/charts/zookeeper/templates/statefulset.yaml create mode 100644 manifest/helm-charts/infra/kafka/charts/zookeeper/templates/svc-headless.yaml create mode 100644 manifest/helm-charts/infra/kafka/charts/zookeeper/templates/svc.yaml create mode 100644 manifest/helm-charts/infra/kafka/charts/zookeeper/templates/tls-secrets.yaml create mode 100644 manifest/helm-charts/infra/kafka/charts/zookeeper/values.yaml create mode 100644 manifest/helm-charts/infra/kafka/templates/NOTES.txt create mode 100644 manifest/helm-charts/infra/kafka/templates/_helpers.tpl create mode 100644 manifest/helm-charts/infra/kafka/templates/broker/config-secrets.yaml create mode 100644 manifest/helm-charts/infra/kafka/templates/broker/configmap.yaml create mode 100644 manifest/helm-charts/infra/kafka/templates/broker/pdb.yaml create mode 100644 manifest/helm-charts/infra/kafka/templates/broker/statefulset.yaml create mode 100644 manifest/helm-charts/infra/kafka/templates/broker/svc-external-access.yaml create mode 100644 manifest/helm-charts/infra/kafka/templates/broker/svc-headless.yaml create mode 100644 manifest/helm-charts/infra/kafka/templates/controller-eligible/config-secrets.yaml create mode 100644 manifest/helm-charts/infra/kafka/templates/controller-eligible/configmap.yaml create mode 100644 manifest/helm-charts/infra/kafka/templates/controller-eligible/pdb.yaml create mode 100644 manifest/helm-charts/infra/kafka/templates/controller-eligible/statefulset.yaml create mode 100644 manifest/helm-charts/infra/kafka/templates/controller-eligible/svc-external-access.yaml create mode 100644 manifest/helm-charts/infra/kafka/templates/controller-eligible/svc-headless.yaml create mode 100644 manifest/helm-charts/infra/kafka/templates/extra-list.yaml create mode 100644 manifest/helm-charts/infra/kafka/templates/log4j-configmap.yaml create mode 100644 manifest/helm-charts/infra/kafka/templates/metrics/deployment.yaml create mode 100644 manifest/helm-charts/infra/kafka/templates/metrics/jmx-configmap.yaml create mode 100644 manifest/helm-charts/infra/kafka/templates/metrics/jmx-servicemonitor.yaml create mode 100644 manifest/helm-charts/infra/kafka/templates/metrics/jmx-svc.yaml create mode 100644 manifest/helm-charts/infra/kafka/templates/metrics/prometheusrule.yaml create mode 100644 manifest/helm-charts/infra/kafka/templates/metrics/serviceaccount.yaml create mode 100644 manifest/helm-charts/infra/kafka/templates/metrics/servicemonitor.yaml create mode 100644 manifest/helm-charts/infra/kafka/templates/metrics/svc.yaml create mode 100644 manifest/helm-charts/infra/kafka/templates/network-policy/networkpolicy-egress.yaml create mode 100644 manifest/helm-charts/infra/kafka/templates/network-policy/networkpolicy-ingress.yaml create mode 100644 manifest/helm-charts/infra/kafka/templates/provisioning/job.yaml create mode 100644 manifest/helm-charts/infra/kafka/templates/provisioning/serviceaccount.yaml create mode 100644 manifest/helm-charts/infra/kafka/templates/provisioning/tls-secret.yaml create mode 100644 manifest/helm-charts/infra/kafka/templates/rbac/role.yaml create mode 100644 manifest/helm-charts/infra/kafka/templates/rbac/rolebinding.yaml create mode 100644 manifest/helm-charts/infra/kafka/templates/rbac/serviceaccount.yaml create mode 100644 manifest/helm-charts/infra/kafka/templates/scripts-configmap.yaml create mode 100644 manifest/helm-charts/infra/kafka/templates/secrets.yaml create mode 100644 manifest/helm-charts/infra/kafka/templates/svc.yaml create mode 100644 manifest/helm-charts/infra/kafka/templates/tls-secret.yaml create mode 100644 manifest/helm-charts/infra/kafka/values.yaml create mode 100644 manifest/helm-charts/infra/minio/.helmignore create mode 100644 manifest/helm-charts/infra/minio/Chart.lock create mode 100644 manifest/helm-charts/infra/minio/Chart.yaml create mode 100644 manifest/helm-charts/infra/minio/README.md create mode 100644 manifest/helm-charts/infra/minio/charts/common/.helmignore create mode 100644 manifest/helm-charts/infra/minio/charts/common/Chart.yaml create mode 100644 manifest/helm-charts/infra/minio/charts/common/README.md create mode 100644 manifest/helm-charts/infra/minio/charts/common/templates/_affinities.tpl create mode 100644 manifest/helm-charts/infra/minio/charts/common/templates/_capabilities.tpl create mode 100644 manifest/helm-charts/infra/minio/charts/common/templates/_errors.tpl create mode 100644 manifest/helm-charts/infra/minio/charts/common/templates/_images.tpl create mode 100644 manifest/helm-charts/infra/minio/charts/common/templates/_ingress.tpl create mode 100644 manifest/helm-charts/infra/minio/charts/common/templates/_labels.tpl create mode 100644 manifest/helm-charts/infra/minio/charts/common/templates/_names.tpl create mode 100644 manifest/helm-charts/infra/minio/charts/common/templates/_secrets.tpl create mode 100644 manifest/helm-charts/infra/minio/charts/common/templates/_storage.tpl create mode 100644 manifest/helm-charts/infra/minio/charts/common/templates/_tplvalues.tpl create mode 100644 manifest/helm-charts/infra/minio/charts/common/templates/_utils.tpl create mode 100644 manifest/helm-charts/infra/minio/charts/common/templates/_warnings.tpl create mode 100644 manifest/helm-charts/infra/minio/charts/common/templates/validations/_cassandra.tpl create mode 100644 manifest/helm-charts/infra/minio/charts/common/templates/validations/_mariadb.tpl create mode 100644 manifest/helm-charts/infra/minio/charts/common/templates/validations/_mongodb.tpl create mode 100644 manifest/helm-charts/infra/minio/charts/common/templates/validations/_mysql.tpl create mode 100644 manifest/helm-charts/infra/minio/charts/common/templates/validations/_postgresql.tpl create mode 100644 manifest/helm-charts/infra/minio/charts/common/templates/validations/_redis.tpl create mode 100644 manifest/helm-charts/infra/minio/charts/common/templates/validations/_validations.tpl create mode 100644 manifest/helm-charts/infra/minio/charts/common/values.schema.json create mode 100644 manifest/helm-charts/infra/minio/charts/common/values.yaml create mode 100644 manifest/helm-charts/infra/minio/templates/NOTES.txt create mode 100644 manifest/helm-charts/infra/minio/templates/_helpers.tpl create mode 100644 manifest/helm-charts/infra/minio/templates/api-ingress.yaml create mode 100644 manifest/helm-charts/infra/minio/templates/distributed/headless-svc.yaml create mode 100644 manifest/helm-charts/infra/minio/templates/distributed/pdb.yaml create mode 100644 manifest/helm-charts/infra/minio/templates/distributed/statefulset.yaml create mode 100644 manifest/helm-charts/infra/minio/templates/extra-list.yaml create mode 100644 manifest/helm-charts/infra/minio/templates/ingress.yaml create mode 100644 manifest/helm-charts/infra/minio/templates/networkpolicy.yaml create mode 100644 manifest/helm-charts/infra/minio/templates/prometheusrule.yaml create mode 100644 manifest/helm-charts/infra/minio/templates/provisioning-configmap.yaml create mode 100644 manifest/helm-charts/infra/minio/templates/provisioning-job.yaml create mode 100644 manifest/helm-charts/infra/minio/templates/pvc.yaml create mode 100644 manifest/helm-charts/infra/minio/templates/secrets.yaml create mode 100644 manifest/helm-charts/infra/minio/templates/service.yaml create mode 100644 manifest/helm-charts/infra/minio/templates/serviceaccount.yaml create mode 100644 manifest/helm-charts/infra/minio/templates/servicemonitor.yaml create mode 100644 manifest/helm-charts/infra/minio/templates/standalone/deployment.yaml create mode 100644 manifest/helm-charts/infra/minio/templates/tls-secrets.yaml create mode 100644 manifest/helm-charts/infra/minio/values.yaml diff --git a/manifest/helm-charts/infra/kafka/.helmignore b/manifest/helm-charts/infra/kafka/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/manifest/helm-charts/infra/kafka/Chart.lock b/manifest/helm-charts/infra/kafka/Chart.lock new file mode 100644 index 000000000..4c554cbaf --- /dev/null +++ b/manifest/helm-charts/infra/kafka/Chart.lock @@ -0,0 +1,9 @@ +dependencies: +- name: zookeeper + repository: oci://registry-1.docker.io/bitnamicharts + version: 12.1.3 +- name: common + repository: oci://registry-1.docker.io/bitnamicharts + version: 2.11.1 +digest: sha256:7008d8cb5f82e0a99fbc97b8c42c87e5f7b45a90c7ff868d9ecb2778efa477ad +generated: "2023-09-18T14:18:33.491894+02:00" diff --git a/manifest/helm-charts/infra/kafka/Chart.yaml b/manifest/helm-charts/infra/kafka/Chart.yaml new file mode 100644 index 000000000..9be2ff8f0 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/Chart.yaml @@ -0,0 +1,44 @@ +annotations: + category: Infrastructure + images: | + - name: jmx-exporter + image: docker.io/bitnami/jmx-exporter:0.19.0-debian-11-r66 + - name: kafka-exporter + image: docker.io/bitnami/kafka-exporter:1.7.0-debian-11-r102 + - name: kafka + image: docker.io/bitnami/kafka:3.5.1-debian-11-r44 + - name: kubectl + image: docker.io/bitnami/kubectl:1.25.13-debian-11-r11 + - name: os-shell + image: docker.io/bitnami/os-shell:11-debian-11-r60 + licenses: Apache-2.0 +apiVersion: v2 +appVersion: 3.5.1 +dependencies: +- condition: zookeeper.enabled + name: zookeeper + repository: oci://registry-1.docker.io/bitnamicharts + version: 12.x.x +- name: common + repository: oci://registry-1.docker.io/bitnamicharts + tags: + - bitnami-common + version: 2.x.x +description: Apache Kafka is a distributed streaming platform designed to build real-time + pipelines and can be used as a message broker or as a replacement for a log aggregation + solution for big data applications. +home: https://bitnami.com +icon: https://bitnami.com/assets/stacks/kafka/img/kafka-stack-220x234.png +keywords: +- kafka +- zookeeper +- streaming +- producer +- consumer +maintainers: +- name: VMware, Inc. + url: https://github.com/bitnami/charts +name: kafka +sources: +- https://github.com/bitnami/charts/tree/main/bitnami/kafka +version: 25.1.11 diff --git a/manifest/helm-charts/infra/kafka/README.md b/manifest/helm-charts/infra/kafka/README.md new file mode 100644 index 000000000..ee4a7f42b --- /dev/null +++ b/manifest/helm-charts/infra/kafka/README.md @@ -0,0 +1,1433 @@ + + +# Apache Kafka packaged by Bitnami + +Apache Kafka is a distributed streaming platform designed to build real-time pipelines and can be used as a message broker or as a replacement for a log aggregation solution for big data applications. + +[Overview of Apache Kafka](http://kafka.apache.org/) + +Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement. + +## TL;DR + +```console +helm install my-release oci://registry-1.docker.io/bitnamicharts/kafka +``` + +## Introduction + +This chart bootstraps a [Kafka](https://github.com/bitnami/containers/tree/main/bitnami/kafka) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. + +Looking to use Apache Kafka in production? Try [VMware Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +helm install my-release oci://registry-1.docker.io/bitnamicharts/kafka +``` + +These commands deploy Kafka on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +### Global parameters + +| Name | Description | Value | +| ------------------------- | ----------------------------------------------- | ----- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | + +### Common parameters + +| Name | Description | Value | +| ------------------------- | --------------------------------------------------------------------------------------- | --------------- | +| `kubeVersion` | Override Kubernetes version | `""` | +| `nameOverride` | String to partially override common.names.fullname | `""` | +| `fullnameOverride` | String to fully override common.names.fullname | `""` | +| `clusterDomain` | Default Kubernetes cluster domain | `cluster.local` | +| `commonLabels` | Labels to add to all deployed objects | `{}` | +| `commonAnnotations` | Annotations to add to all deployed objects | `{}` | +| `extraDeploy` | Array of extra objects to deploy with the release | `[]` | +| `serviceBindings.enabled` | Create secret for service binding (Experimental) | `false` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the statefulset | `["sleep"]` | +| `diagnosticMode.args` | Args to override all containers in the statefulset | `["infinity"]` | + +### Kafka parameters + +| Name | Description | Value | +| ------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------- | +| `image.registry` | Kafka image registry | `docker.io` | +| `image.repository` | Kafka image repository | `bitnami/kafka` | +| `image.tag` | Kafka image tag (immutable tags are recommended) | `3.5.1-debian-11-r44` | +| `image.digest` | Kafka image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `image.pullPolicy` | Kafka image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `image.debug` | Specify if debug values should be set | `false` | +| `extraInit` | Additional content for the kafka init script, rendered as a template. | `""` | +| `config` | Configuration file for Kafka, rendered as a template. Auto-generated based on chart values when not specified. | `""` | +| `existingConfigmap` | ConfigMap with Kafka Configuration | `""` | +| `extraConfig` | Additional configuration to be appended at the end of the generated Kafka configuration file. | `""` | +| `secretConfig` | Additional configuration to be appended at the end of the generated Kafka configuration file. | `""` | +| `existingSecretConfig` | Secret with additonal configuration that will be appended to the end of the generated Kafka configuration file | `""` | +| `log4j` | An optional log4j.properties file to overwrite the default of the Kafka brokers | `""` | +| `existingLog4jConfigMap` | The name of an existing ConfigMap containing a log4j.properties file | `""` | +| `heapOpts` | Kafka Java Heap size | `-Xmx1024m -Xms1024m` | +| `interBrokerProtocolVersion` | Override the setting 'inter.broker.protocol.version' during the ZK migration. | `""` | +| `listeners.client.name` | Name for the Kafka client listener | `CLIENT` | +| `listeners.client.containerPort` | Port for the Kafka client listener | `9092` | +| `listeners.client.protocol` | Security protocol for the Kafka client listener. Allowed values are 'PLAINTEXT', 'SASL_PLAINTEXT', 'SASL_SSL' and 'SSL' | `SASL_PLAINTEXT` | +| `listeners.client.sslClientAuth` | Optional. If SASL_SSL is enabled, configure mTLS TLS authentication type. If SSL protocol is enabled, overrides tls.authType for this listener. Allowed values are 'none', 'requested' and 'required' | `""` | +| `listeners.controller.name` | Name for the Kafka controller listener | `CONTROLLER` | +| `listeners.controller.containerPort` | Port for the Kafka controller listener | `9093` | +| `listeners.controller.protocol` | Security protocol for the Kafka controller listener. Allowed values are 'PLAINTEXT', 'SASL_PLAINTEXT', 'SASL_SSL' and 'SSL' | `SASL_PLAINTEXT` | +| `listeners.controller.sslClientAuth` | Optional. If SASL_SSL is enabled, configure mTLS TLS authentication type. If SSL protocol is enabled, overrides tls.authType for this listener. Allowed values are 'none', 'requested' and 'required' | `""` | +| `listeners.interbroker.name` | Name for the Kafka inter-broker listener | `INTERNAL` | +| `listeners.interbroker.containerPort` | Port for the Kafka inter-broker listener | `9094` | +| `listeners.interbroker.protocol` | Security protocol for the Kafka inter-broker listener. Allowed values are 'PLAINTEXT', 'SASL_PLAINTEXT', 'SASL_SSL' and 'SSL' | `SASL_PLAINTEXT` | +| `listeners.interbroker.sslClientAuth` | Optional. If SASL_SSL is enabled, configure mTLS TLS authentication type. If SSL protocol is enabled, overrides tls.authType for this listener. Allowed values are 'none', 'requested' and 'required' | `""` | +| `listeners.external.containerPort` | Port for the Kafka external listener | `9095` | +| `listeners.external.protocol` | Security protocol for the Kafka external listener. . Allowed values are 'PLAINTEXT', 'SASL_PLAINTEXT', 'SASL_SSL' and 'SSL' | `SASL_PLAINTEXT` | +| `listeners.external.name` | Name for the Kafka external listener | `EXTERNAL` | +| `listeners.external.sslClientAuth` | Optional. If SASL_SSL is enabled, configure mTLS TLS authentication type. If SSL protocol is enabled, overrides tls.sslClientAuth for this listener. Allowed values are 'none', 'requested' and 'required' | `""` | +| `listeners.extraListeners` | Array of listener objects to be appended to already existing listeners | `[]` | +| `listeners.overrideListeners` | Overrides the Kafka 'listeners' configuration setting. | `""` | +| `listeners.advertisedListeners` | Overrides the Kafka 'advertised.listener' configuration setting. | `""` | +| `listeners.securityProtocolMap` | Overrides the Kafka 'security.protocol.map' configuration setting. | `""` | + +### Kafka SASL parameters + +| Name | Description | Value | +| --------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------- | +| `sasl.enabledMechanisms` | Comma-separated list of allowed SASL mechanisms when SASL listeners are configured. Allowed types: `PLAIN`, `SCRAM-SHA-256`, `SCRAM-SHA-512` | `PLAIN,SCRAM-SHA-256,SCRAM-SHA-512` | +| `sasl.interBrokerMechanism` | SASL mechanism for inter broker communication. | `PLAIN` | +| `sasl.controllerMechanism` | SASL mechanism for controller communications. | `PLAIN` | +| `sasl.interbroker.user` | Username for inter-broker communications when SASL is enabled | `inter_broker_user` | +| `sasl.interbroker.password` | Password for inter-broker communications when SASL is enabled. If not set and SASL is enabled for the controller listener, a random password will be generated. | `""` | +| `sasl.controller.user` | Username for controller communications when SASL is enabled | `controller_user` | +| `sasl.controller.password` | Password for controller communications when SASL is enabled. If not set and SASL is enabled for the inter-broker listener, a random password will be generated. | `""` | +| `sasl.client.users` | Comma-separated list of usernames for client communications when SASL is enabled | `["user1"]` | +| `sasl.client.passwords` | Comma-separated list of passwords for client communications when SASL is enabled, must match the number of client.users | `""` | +| `sasl.zookeeper.user` | Username for zookeeper communications when SASL is enabled. | `""` | +| `sasl.zookeeper.password` | Password for zookeeper communications when SASL is enabled. | `""` | +| `sasl.existingSecret` | Name of the existing secret containing credentials for clientUsers, interBrokerUser, controllerUser and zookeeperUser | `""` | + +### Kafka TLS parameters + +| Name | Description | Value | +| -------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------- | -------------------------- | +| `tls.type` | Format to use for TLS certificates. Allowed types: `JKS` and `PEM` | `JKS` | +| `tls.pemChainIncluded` | Flag to denote that the Certificate Authority (CA) certificates are bundled with the endpoint cert. | `false` | +| `tls.existingSecret` | Name of the existing secret containing the TLS certificates for the Kafka nodes. | `""` | +| `tls.autoGenerated` | Generate automatically self-signed TLS certificates for Kafka brokers. Currently only supported if `tls.type` is `PEM` | `false` | +| `tls.passwordsSecret` | Name of the secret containing the password to access the JKS files or PEM key when they are password-protected. (`key`: `password`) | `""` | +| `tls.passwordsSecretKeystoreKey` | The secret key from the tls.passwordsSecret containing the password for the Keystore. | `keystore-password` | +| `tls.passwordsSecretTruststoreKey` | The secret key from the tls.passwordsSecret containing the password for the Truststore. | `truststore-password` | +| `tls.passwordsSecretPemPasswordKey` | The secret key from the tls.passwordsSecret containing the password for the PEM key inside 'tls.passwordsSecret'. | `""` | +| `tls.keystorePassword` | Password to access the JKS keystore when it is password-protected. Ignored when 'tls.passwordsSecret' is provided. | `""` | +| `tls.truststorePassword` | Password to access the JKS truststore when it is password-protected. Ignored when 'tls.passwordsSecret' is provided. | `""` | +| `tls.keyPassword` | Password to access the PEM key when it is password-protected. | `""` | +| `tls.jksTruststoreSecret` | Name of the existing secret containing your truststore if truststore not existing or different from the one in the `tls.existingSecret` | `""` | +| `tls.jksTruststoreKey` | The secret key from the `tls.existingSecret` or `tls.jksTruststoreSecret` containing the truststore | `""` | +| `tls.endpointIdentificationAlgorithm` | The endpoint identification algorithm to validate server hostname using server certificate | `https` | +| `tls.sslClientAuth` | Sets the default value for the ssl.client.auth Kafka setting. | `required` | +| `tls.zookeeper.enabled` | Enable TLS for Zookeeper client connections. | `false` | +| `tls.zookeeper.verifyHostname` | Hostname validation. | `true` | +| `tls.zookeeper.existingSecret` | Name of the existing secret containing the TLS certificates for ZooKeeper client communications. | `""` | +| `tls.zookeeper.existingSecretKeystoreKey` | The secret key from the tls.zookeeper.existingSecret containing the Keystore. | `zookeeper.keystore.jks` | +| `tls.zookeeper.existingSecretTruststoreKey` | The secret key from the tls.zookeeper.existingSecret containing the Truststore. | `zookeeper.truststore.jks` | +| `tls.zookeeper.passwordsSecret` | Existing secret containing Keystore and Truststore passwords. | `""` | +| `tls.zookeeper.passwordsSecretKeystoreKey` | The secret key from the tls.zookeeper.passwordsSecret containing the password for the Keystore. | `keystore-password` | +| `tls.zookeeper.passwordsSecretTruststoreKey` | The secret key from the tls.zookeeper.passwordsSecret containing the password for the Truststore. | `truststore-password` | +| `tls.zookeeper.keystorePassword` | Password to access the JKS keystore when it is password-protected. Ignored when 'tls.passwordsSecret' is provided. | `""` | +| `tls.zookeeper.truststorePassword` | Password to access the JKS truststore when it is password-protected. Ignored when 'tls.passwordsSecret' is provided. | `""` | +| `extraEnvVars` | Extra environment variables to add to Kafka pods | `[]` | +| `extraEnvVarsCM` | ConfigMap with extra environment variables | `""` | +| `extraEnvVarsSecret` | Secret with extra environment variables | `""` | +| `extraVolumes` | Optionally specify extra list of additional volumes for the Kafka pod(s) | `[]` | +| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Kafka container(s) | `[]` | +| `sidecars` | Add additional sidecar containers to the Kafka pod(s) | `[]` | +| `initContainers` | Add additional Add init containers to the Kafka pod(s) | `[]` | + +### Controller-eligible statefulset parameters + +| Name | Description | Value | +| -------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------- | +| `controller.replicaCount` | Number of Kafka controller-eligible nodes | `3` | +| `controller.controllerOnly` | If set to true, controller nodes will be deployed as dedicated controllers, instead of controller+broker processes. | `false` | +| `controller.minId` | Minimal node.id values for controller-eligible nodes. Do not change after first initialization. | `0` | +| `controller.zookeeperMigrationMode` | Set to true to deploy cluster controller quorum | `false` | +| `controller.config` | Configuration file for Kafka controller-eligible nodes, rendered as a template. Auto-generated based on chart values when not specified. | `""` | +| `controller.existingConfigmap` | ConfigMap with Kafka Configuration for controller-eligible nodes. | `""` | +| `controller.extraConfig` | Additional configuration to be appended at the end of the generated Kafka controller-eligible nodes configuration file. | `""` | +| `controller.secretConfig` | Additional configuration to be appended at the end of the generated Kafka controller-eligible nodes configuration file. | `""` | +| `controller.existingSecretConfig` | Secret with additonal configuration that will be appended to the end of the generated Kafka controller-eligible nodes configuration file | `""` | +| `controller.heapOpts` | Kafka Java Heap size for controller-eligible nodes | `-Xmx1024m -Xms1024m` | +| `controller.command` | Override Kafka container command | `[]` | +| `controller.args` | Override Kafka container arguments | `[]` | +| `controller.extraEnvVars` | Extra environment variables to add to Kafka pods | `[]` | +| `controller.extraEnvVarsCM` | ConfigMap with extra environment variables | `""` | +| `controller.extraEnvVarsSecret` | Secret with extra environment variables | `""` | +| `controller.extraContainerPorts` | Kafka controller-eligible extra containerPorts. | `[]` | +| `controller.livenessProbe.enabled` | Enable livenessProbe on Kafka containers | `true` | +| `controller.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `10` | +| `controller.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `controller.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `controller.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` | +| `controller.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `controller.readinessProbe.enabled` | Enable readinessProbe on Kafka containers | `true` | +| `controller.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `controller.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `controller.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `controller.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `controller.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `controller.startupProbe.enabled` | Enable startupProbe on Kafka containers | `false` | +| `controller.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` | +| `controller.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `controller.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `controller.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | +| `controller.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `controller.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `controller.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `controller.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `controller.lifecycleHooks` | lifecycleHooks for the Kafka container to automate configuration before or after startup | `{}` | +| `controller.resources.limits` | The resources limits for the container | `{}` | +| `controller.resources.requests` | The requested resources for the container | `{}` | +| `controller.podSecurityContext.enabled` | Enable security context for the pods | `true` | +| `controller.podSecurityContext.fsGroup` | Set Kafka pod's Security Context fsGroup | `1001` | +| `controller.podSecurityContext.seccompProfile.type` | Set Kafka pods's Security Context seccomp profile | `RuntimeDefault` | +| `controller.containerSecurityContext.enabled` | Enable Kafka containers' Security Context | `true` | +| `controller.containerSecurityContext.runAsUser` | Set Kafka containers' Security Context runAsUser | `1001` | +| `controller.containerSecurityContext.runAsNonRoot` | Set Kafka containers' Security Context runAsNonRoot | `true` | +| `controller.containerSecurityContext.allowPrivilegeEscalation` | Force the child process to be run as non-privileged | `false` | +| `controller.containerSecurityContext.readOnlyRootFilesystem` | Allows the pod to mount the RootFS as ReadOnly only | `true` | +| `controller.containerSecurityContext.capabilities.drop` | Set Kafka containers' server Security Context capabilities to be dropped | `["ALL"]` | +| `controller.hostAliases` | Kafka pods host aliases | `[]` | +| `controller.hostNetwork` | Specify if host network should be enabled for Kafka pods | `false` | +| `controller.hostIPC` | Specify if host IPC should be enabled for Kafka pods | `false` | +| `controller.podLabels` | Extra labels for Kafka pods | `{}` | +| `controller.podAnnotations` | Extra annotations for Kafka pods | `{}` | +| `controller.podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `controller.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `controller.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `controller.nodeAffinityPreset.key` | Node label key to match Ignored if `affinity` is set. | `""` | +| `controller.nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` | +| `controller.affinity` | Affinity for pod assignment | `{}` | +| `controller.nodeSelector` | Node labels for pod assignment | `{}` | +| `controller.tolerations` | Tolerations for pod assignment | `[]` | +| `controller.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `controller.terminationGracePeriodSeconds` | Seconds the pod needs to gracefully terminate | `""` | +| `controller.podManagementPolicy` | StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel | `Parallel` | +| `controller.priorityClassName` | Name of the existing priority class to be used by kafka pods | `""` | +| `controller.runtimeClassName` | Name of the runtime class to be used by pod(s) | `""` | +| `controller.enableServiceLinks` | Whether information about services should be injected into pod's environment variable | `true` | +| `controller.schedulerName` | Name of the k8s scheduler (other than default) | `""` | +| `controller.updateStrategy.type` | Kafka statefulset strategy type | `RollingUpdate` | +| `controller.extraVolumes` | Optionally specify extra list of additional volumes for the Kafka pod(s) | `[]` | +| `controller.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Kafka container(s) | `[]` | +| `controller.sidecars` | Add additional sidecar containers to the Kafka pod(s) | `[]` | +| `controller.initContainers` | Add additional Add init containers to the Kafka pod(s) | `[]` | +| `controller.pdb.create` | Deploy a pdb object for the Kafka pod | `false` | +| `controller.pdb.minAvailable` | Maximum number/percentage of unavailable Kafka replicas | `""` | +| `controller.pdb.maxUnavailable` | Maximum number/percentage of unavailable Kafka replicas | `1` | +| `controller.persistence.enabled` | Enable Kafka data persistence using PVC, note that ZooKeeper persistence is unaffected | `true` | +| `controller.persistence.existingClaim` | A manually managed Persistent Volume and Claim | `""` | +| `controller.persistence.storageClass` | PVC Storage Class for Kafka data volume | `""` | +| `controller.persistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` | +| `controller.persistence.size` | PVC Storage Request for Kafka data volume | `8Gi` | +| `controller.persistence.annotations` | Annotations for the PVC | `{}` | +| `controller.persistence.labels` | Labels for the PVC | `{}` | +| `controller.persistence.selector` | Selector to match an existing Persistent Volume for Kafka data PVC. If set, the PVC can't have a PV dynamically provisioned for it | `{}` | +| `controller.persistence.mountPath` | Mount path of the Kafka data volume | `/bitnami/kafka` | +| `controller.logPersistence.enabled` | Enable Kafka logs persistence using PVC, note that ZooKeeper persistence is unaffected | `false` | +| `controller.logPersistence.existingClaim` | A manually managed Persistent Volume and Claim | `""` | +| `controller.logPersistence.storageClass` | PVC Storage Class for Kafka logs volume | `""` | +| `controller.logPersistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` | +| `controller.logPersistence.size` | PVC Storage Request for Kafka logs volume | `8Gi` | +| `controller.logPersistence.annotations` | Annotations for the PVC | `{}` | +| `controller.logPersistence.selector` | Selector to match an existing Persistent Volume for Kafka log data PVC. If set, the PVC can't have a PV dynamically provisioned for it | `{}` | +| `controller.logPersistence.mountPath` | Mount path of the Kafka logs volume | `/opt/bitnami/kafka/logs` | + +### Broker-only statefulset parameters + +| Name | Description | Value | +| ---------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------- | +| `broker.replicaCount` | Number of Kafka broker-only nodes | `0` | +| `broker.minId` | Minimal node.id values for broker-only nodes. Do not change after first initialization. | `100` | +| `broker.zookeeperMigrationMode` | Set to true to deploy cluster controller quorum | `false` | +| `broker.config` | Configuration file for Kafka broker-only nodes, rendered as a template. Auto-generated based on chart values when not specified. | `""` | +| `broker.existingConfigmap` | ConfigMap with Kafka Configuration for broker-only nodes. | `""` | +| `broker.extraConfig` | Additional configuration to be appended at the end of the generated Kafka broker-only nodes configuration file. | `""` | +| `broker.secretConfig` | Additional configuration to be appended at the end of the generated Kafka broker-only nodes configuration file. | `""` | +| `broker.existingSecretConfig` | Secret with additonal configuration that will be appended to the end of the generated Kafka broker-only nodes configuration file | `""` | +| `broker.heapOpts` | Kafka Java Heap size for broker-only nodes | `-Xmx1024m -Xms1024m` | +| `broker.command` | Override Kafka container command | `[]` | +| `broker.args` | Override Kafka container arguments | `[]` | +| `broker.extraEnvVars` | Extra environment variables to add to Kafka pods | `[]` | +| `broker.extraEnvVarsCM` | ConfigMap with extra environment variables | `""` | +| `broker.extraEnvVarsSecret` | Secret with extra environment variables | `""` | +| `broker.extraContainerPorts` | Kafka broker-only extra containerPorts. | `[]` | +| `broker.livenessProbe.enabled` | Enable livenessProbe on Kafka containers | `true` | +| `broker.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `10` | +| `broker.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `broker.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `broker.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` | +| `broker.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `broker.readinessProbe.enabled` | Enable readinessProbe on Kafka containers | `true` | +| `broker.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `broker.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `broker.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `broker.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `broker.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `broker.startupProbe.enabled` | Enable startupProbe on Kafka containers | `false` | +| `broker.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` | +| `broker.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `broker.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `broker.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | +| `broker.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `broker.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `broker.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `broker.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `broker.lifecycleHooks` | lifecycleHooks for the Kafka container to automate configuration before or after startup | `{}` | +| `broker.resources.limits` | The resources limits for the container | `{}` | +| `broker.resources.requests` | The requested resources for the container | `{}` | +| `broker.podSecurityContext.enabled` | Enable security context for the pods | `true` | +| `broker.podSecurityContext.fsGroup` | Set Kafka pod's Security Context fsGroup | `1001` | +| `broker.podSecurityContext.seccompProfile.type` | Set Kafka pod's Security Context seccomp profile | `RuntimeDefault` | +| `broker.containerSecurityContext.enabled` | Enable Kafka containers' Security Context | `true` | +| `broker.containerSecurityContext.runAsUser` | Set Kafka containers' Security Context runAsUser | `1001` | +| `broker.containerSecurityContext.runAsNonRoot` | Set Kafka containers' Security Context runAsNonRoot | `true` | +| `broker.containerSecurityContext.allowPrivilegeEscalation` | Force the child process to be run as non-privileged | `false` | +| `broker.containerSecurityContext.readOnlyRootFilesystem` | Allows the pod to mount the RootFS as ReadOnly only | `true` | +| `broker.containerSecurityContext.capabilities.drop` | Set Kafka containers' server Security Context capabilities to be dropped | `["ALL"]` | +| `broker.hostAliases` | Kafka pods host aliases | `[]` | +| `broker.hostNetwork` | Specify if host network should be enabled for Kafka pods | `false` | +| `broker.hostIPC` | Specify if host IPC should be enabled for Kafka pods | `false` | +| `broker.podLabels` | Extra labels for Kafka pods | `{}` | +| `broker.podAnnotations` | Extra annotations for Kafka pods | `{}` | +| `broker.podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `broker.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `broker.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `broker.nodeAffinityPreset.key` | Node label key to match Ignored if `affinity` is set. | `""` | +| `broker.nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` | +| `broker.affinity` | Affinity for pod assignment | `{}` | +| `broker.nodeSelector` | Node labels for pod assignment | `{}` | +| `broker.tolerations` | Tolerations for pod assignment | `[]` | +| `broker.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `broker.terminationGracePeriodSeconds` | Seconds the pod needs to gracefully terminate | `""` | +| `broker.podManagementPolicy` | StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel | `Parallel` | +| `broker.priorityClassName` | Name of the existing priority class to be used by kafka pods | `""` | +| `broker.runtimeClassName` | Name of the runtime class to be used by pod(s) | `""` | +| `broker.enableServiceLinks` | Whether information about services should be injected into pod's environment variable | `true` | +| `broker.schedulerName` | Name of the k8s scheduler (other than default) | `""` | +| `broker.updateStrategy.type` | Kafka statefulset strategy type | `RollingUpdate` | +| `broker.extraVolumes` | Optionally specify extra list of additional volumes for the Kafka pod(s) | `[]` | +| `broker.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Kafka container(s) | `[]` | +| `broker.sidecars` | Add additional sidecar containers to the Kafka pod(s) | `[]` | +| `broker.initContainers` | Add additional Add init containers to the Kafka pod(s) | `[]` | +| `broker.pdb.create` | Deploy a pdb object for the Kafka pod | `false` | +| `broker.pdb.minAvailable` | Maximum number/percentage of unavailable Kafka replicas | `""` | +| `broker.pdb.maxUnavailable` | Maximum number/percentage of unavailable Kafka replicas | `1` | +| `broker.persistence.enabled` | Enable Kafka data persistence using PVC, note that ZooKeeper persistence is unaffected | `true` | +| `broker.persistence.existingClaim` | A manually managed Persistent Volume and Claim | `""` | +| `broker.persistence.storageClass` | PVC Storage Class for Kafka data volume | `""` | +| `broker.persistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` | +| `broker.persistence.size` | PVC Storage Request for Kafka data volume | `8Gi` | +| `broker.persistence.annotations` | Annotations for the PVC | `{}` | +| `broker.persistence.labels` | Labels for the PVC | `{}` | +| `broker.persistence.selector` | Selector to match an existing Persistent Volume for Kafka data PVC. If set, the PVC can't have a PV dynamically provisioned for it | `{}` | +| `broker.persistence.mountPath` | Mount path of the Kafka data volume | `/bitnami/kafka` | +| `broker.logPersistence.enabled` | Enable Kafka logs persistence using PVC, note that ZooKeeper persistence is unaffected | `false` | +| `broker.logPersistence.existingClaim` | A manually managed Persistent Volume and Claim | `""` | +| `broker.logPersistence.storageClass` | PVC Storage Class for Kafka logs volume | `""` | +| `broker.logPersistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` | +| `broker.logPersistence.size` | PVC Storage Request for Kafka logs volume | `8Gi` | +| `broker.logPersistence.annotations` | Annotations for the PVC | `{}` | +| `broker.logPersistence.selector` | Selector to match an existing Persistent Volume for Kafka log data PVC. If set, the PVC can't have a PV dynamically provisioned for it | `{}` | +| `broker.logPersistence.mountPath` | Mount path of the Kafka logs volume | `/opt/bitnami/kafka/logs` | + +### Traffic Exposure parameters + +| Name | Description | Value | +| ------------------------------------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------- | ----------------------- | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.ports.client` | Kafka svc port for client connections | `9092` | +| `service.ports.controller` | Kafka svc port for controller connections. It is used if "kraft.enabled: true" | `9093` | +| `service.ports.interbroker` | Kafka svc port for inter-broker connections | `9094` | +| `service.ports.external` | Kafka svc port for external connections | `9095` | +| `service.extraPorts` | Extra ports to expose in the Kafka service (normally used with the `sidecar` value) | `[]` | +| `service.nodePorts.client` | Node port for the Kafka client connections | `""` | +| `service.nodePorts.external` | Node port for the Kafka external connections | `""` | +| `service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `service.clusterIP` | Kafka service Cluster IP | `""` | +| `service.loadBalancerIP` | Kafka service Load Balancer IP | `""` | +| `service.loadBalancerSourceRanges` | Kafka service Load Balancer sources | `[]` | +| `service.externalTrafficPolicy` | Kafka service external traffic policy | `Cluster` | +| `service.annotations` | Additional custom annotations for Kafka service | `{}` | +| `service.headless.controller.annotations` | Annotations for the controller-eligible headless service. | `{}` | +| `service.headless.controller.labels` | Labels for the controller-eligible headless service. | `{}` | +| `service.headless.broker.annotations` | Annotations for the broker-only headless service. | `{}` | +| `service.headless.broker.labels` | Labels for the broker-only headless service. | `{}` | +| `externalAccess.enabled` | Enable Kubernetes external cluster access to Kafka brokers | `false` | +| `externalAccess.autoDiscovery.enabled` | Enable using an init container to auto-detect external IPs/ports by querying the K8s API | `false` | +| `externalAccess.autoDiscovery.image.registry` | Init container auto-discovery image registry | `docker.io` | +| `externalAccess.autoDiscovery.image.repository` | Init container auto-discovery image repository | `bitnami/kubectl` | +| `externalAccess.autoDiscovery.image.tag` | Init container auto-discovery image tag (immutable tags are recommended) | `1.25.13-debian-11-r11` | +| `externalAccess.autoDiscovery.image.digest` | Kubectl image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `externalAccess.autoDiscovery.image.pullPolicy` | Init container auto-discovery image pull policy | `IfNotPresent` | +| `externalAccess.autoDiscovery.image.pullSecrets` | Init container auto-discovery image pull secrets | `[]` | +| `externalAccess.autoDiscovery.resources.limits` | The resources limits for the auto-discovery init container | `{}` | +| `externalAccess.autoDiscovery.resources.requests` | The requested resources for the auto-discovery init container | `{}` | +| `externalAccess.controller.forceExpose` | If set to true, force exposing controller-eligible nodes although they are configured as controller-only nodes | `false` | +| `externalAccess.controller.service.type` | Kubernetes Service type for external access. It can be NodePort, LoadBalancer or ClusterIP | `LoadBalancer` | +| `externalAccess.controller.service.ports.external` | Kafka port used for external access when service type is LoadBalancer | `9094` | +| `externalAccess.controller.service.loadBalancerIPs` | Array of load balancer IPs for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.controller.service.loadBalancerNames` | Array of load balancer Names for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.controller.service.loadBalancerAnnotations` | Array of load balancer annotations for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.controller.service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | `[]` | +| `externalAccess.controller.service.nodePorts` | Array of node ports used for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.controller.service.externalIPs` | Use distinct service host IPs to configure Kafka external listener when service type is NodePort. Length must be the same as replicaCount | `[]` | +| `externalAccess.controller.service.useHostIPs` | Use service host IPs to configure Kafka external listener when service type is NodePort | `false` | +| `externalAccess.controller.service.usePodIPs` | using the MY_POD_IP address for external access. | `false` | +| `externalAccess.controller.service.domain` | Domain or external ip used to configure Kafka external listener when service type is NodePort or ClusterIP | `""` | +| `externalAccess.controller.service.publishNotReadyAddresses` | Indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready | `false` | +| `externalAccess.controller.service.labels` | Service labels for external access | `{}` | +| `externalAccess.controller.service.annotations` | Service annotations for external access | `{}` | +| `externalAccess.controller.service.extraPorts` | Extra ports to expose in the Kafka external service | `[]` | +| `externalAccess.broker.service.type` | Kubernetes Service type for external access. It can be NodePort, LoadBalancer or ClusterIP | `LoadBalancer` | +| `externalAccess.broker.service.ports.external` | Kafka port used for external access when service type is LoadBalancer | `9094` | +| `externalAccess.broker.service.loadBalancerIPs` | Array of load balancer IPs for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.broker.service.loadBalancerNames` | Array of load balancer Names for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.broker.service.loadBalancerAnnotations` | Array of load balancer annotations for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.broker.service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | `[]` | +| `externalAccess.broker.service.nodePorts` | Array of node ports used for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.broker.service.externalIPs` | Use distinct service host IPs to configure Kafka external listener when service type is NodePort. Length must be the same as replicaCount | `[]` | +| `externalAccess.broker.service.useHostIPs` | Use service host IPs to configure Kafka external listener when service type is NodePort | `false` | +| `externalAccess.broker.service.usePodIPs` | using the MY_POD_IP address for external access. | `false` | +| `externalAccess.broker.service.domain` | Domain or external ip used to configure Kafka external listener when service type is NodePort or ClusterIP | `""` | +| `externalAccess.broker.service.publishNotReadyAddresses` | Indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready | `false` | +| `externalAccess.broker.service.labels` | Service labels for external access | `{}` | +| `externalAccess.broker.service.annotations` | Service annotations for external access | `{}` | +| `externalAccess.broker.service.extraPorts` | Extra ports to expose in the Kafka external service | `[]` | +| `networkPolicy.enabled` | Specifies whether a NetworkPolicy should be created | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.explicitNamespacesSelector` | A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed | `{}` | +| `networkPolicy.externalAccess.from` | customize the from section for External Access on tcp-external port | `[]` | +| `networkPolicy.egressRules.customRules` | Custom network policy rule | `{}` | + +### Volume Permissions parameters + +| Name | Description | Value | +| ------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------- | ------------------ | +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/os-shell` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag (immutable tags are recommended) | `11-debian-11-r60` | +| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | Init container volume-permissions image pull secrets | `[]` | +| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` | +| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` | +| `volumePermissions.containerSecurityContext.runAsUser` | User ID for the init container | `0` | + +### Other Parameters + +| Name | Description | Value | +| --------------------------------------------- | ---------------------------------------------------------------------------------------------- | ------- | +| `serviceAccount.create` | Enable creation of ServiceAccount for Kafka pods | `true` | +| `serviceAccount.name` | The name of the service account to use. If not set and `create` is `true`, a name is generated | `""` | +| `serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `true` | +| `serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` | +| `rbac.create` | Whether to create & use RBAC resources or not | `false` | + +### Metrics parameters + +| Name | Description | Value | +| ----------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------- | +| `metrics.kafka.enabled` | Whether or not to create a standalone Kafka exporter to expose Kafka metrics | `false` | +| `metrics.kafka.image.registry` | Kafka exporter image registry | `docker.io` | +| `metrics.kafka.image.repository` | Kafka exporter image repository | `bitnami/kafka-exporter` | +| `metrics.kafka.image.tag` | Kafka exporter image tag (immutable tags are recommended) | `1.7.0-debian-11-r102` | +| `metrics.kafka.image.digest` | Kafka exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `metrics.kafka.image.pullPolicy` | Kafka exporter image pull policy | `IfNotPresent` | +| `metrics.kafka.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `metrics.kafka.certificatesSecret` | Name of the existing secret containing the optional certificate and key files | `""` | +| `metrics.kafka.tlsCert` | The secret key from the certificatesSecret if 'client-cert' key different from the default (cert-file) | `cert-file` | +| `metrics.kafka.tlsKey` | The secret key from the certificatesSecret if 'client-key' key different from the default (key-file) | `key-file` | +| `metrics.kafka.tlsCaSecret` | Name of the existing secret containing the optional ca certificate for Kafka exporter client authentication | `""` | +| `metrics.kafka.tlsCaCert` | The secret key from the certificatesSecret or tlsCaSecret if 'ca-cert' key different from the default (ca-file) | `ca-file` | +| `metrics.kafka.extraFlags` | Extra flags to be passed to Kafka exporter | `{}` | +| `metrics.kafka.command` | Override Kafka exporter container command | `[]` | +| `metrics.kafka.args` | Override Kafka exporter container arguments | `[]` | +| `metrics.kafka.containerPorts.metrics` | Kafka exporter metrics container port | `9308` | +| `metrics.kafka.resources.limits` | The resources limits for the container | `{}` | +| `metrics.kafka.resources.requests` | The requested resources for the container | `{}` | +| `metrics.kafka.podSecurityContext.enabled` | Enable security context for the pods | `true` | +| `metrics.kafka.podSecurityContext.fsGroup` | Set Kafka exporter pod's Security Context fsGroup | `1001` | +| `metrics.kafka.podSecurityContext.seccompProfile.type` | Set Kafka exporter pod's Security Context seccomp profile | `RuntimeDefault` | +| `metrics.kafka.containerSecurityContext.enabled` | Enable Kafka exporter containers' Security Context | `true` | +| `metrics.kafka.containerSecurityContext.runAsUser` | Set Kafka exporter containers' Security Context runAsUser | `1001` | +| `metrics.kafka.containerSecurityContext.runAsNonRoot` | Set Kafka exporter containers' Security Context runAsNonRoot | `true` | +| `metrics.kafka.containerSecurityContext.allowPrivilegeEscalation` | Set Kafka exporter containers' Security Context allowPrivilegeEscalation | `false` | +| `metrics.kafka.containerSecurityContext.readOnlyRootFilesystem` | Set Kafka exporter containers' Security Context readOnlyRootFilesystem | `true` | +| `metrics.kafka.containerSecurityContext.capabilities.drop` | Set Kafka exporter containers' Security Context capabilities to be dropped | `["ALL"]` | +| `metrics.kafka.hostAliases` | Kafka exporter pods host aliases | `[]` | +| `metrics.kafka.podLabels` | Extra labels for Kafka exporter pods | `{}` | +| `metrics.kafka.podAnnotations` | Extra annotations for Kafka exporter pods | `{}` | +| `metrics.kafka.podAffinityPreset` | Pod affinity preset. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `metrics.kafka.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `metrics.kafka.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `metrics.kafka.nodeAffinityPreset.key` | Node label key to match Ignored if `metrics.kafka.affinity` is set. | `""` | +| `metrics.kafka.nodeAffinityPreset.values` | Node label values to match. Ignored if `metrics.kafka.affinity` is set. | `[]` | +| `metrics.kafka.affinity` | Affinity for pod assignment | `{}` | +| `metrics.kafka.nodeSelector` | Node labels for pod assignment | `{}` | +| `metrics.kafka.tolerations` | Tolerations for pod assignment | `[]` | +| `metrics.kafka.schedulerName` | Name of the k8s scheduler (other than default) for Kafka exporter | `""` | +| `metrics.kafka.enableServiceLinks` | Whether information about services should be injected into pod's environment variable | `true` | +| `metrics.kafka.priorityClassName` | Kafka exporter pods' priorityClassName | `""` | +| `metrics.kafka.topologySpreadConstraints` | Topology Spread Constraints for pod assignment | `[]` | +| `metrics.kafka.extraVolumes` | Optionally specify extra list of additional volumes for the Kafka exporter pod(s) | `[]` | +| `metrics.kafka.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Kafka exporter container(s) | `[]` | +| `metrics.kafka.sidecars` | Add additional sidecar containers to the Kafka exporter pod(s) | `[]` | +| `metrics.kafka.initContainers` | Add init containers to the Kafka exporter pods | `[]` | +| `metrics.kafka.service.ports.metrics` | Kafka exporter metrics service port | `9308` | +| `metrics.kafka.service.clusterIP` | Static clusterIP or None for headless services | `""` | +| `metrics.kafka.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `metrics.kafka.service.annotations` | Annotations for the Kafka exporter service | `{}` | +| `metrics.kafka.serviceAccount.create` | Enable creation of ServiceAccount for Kafka exporter pods | `true` | +| `metrics.kafka.serviceAccount.name` | The name of the service account to use. If not set and `create` is `true`, a name is generated | `""` | +| `metrics.kafka.serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `true` | +| `metrics.jmx.enabled` | Whether or not to expose JMX metrics to Prometheus | `false` | +| `metrics.jmx.kafkaJmxPort` | JMX port where the exporter will collect metrics, exposed in the Kafka container. | `5555` | +| `metrics.jmx.image.registry` | JMX exporter image registry | `docker.io` | +| `metrics.jmx.image.repository` | JMX exporter image repository | `bitnami/jmx-exporter` | +| `metrics.jmx.image.tag` | JMX exporter image tag (immutable tags are recommended) | `0.19.0-debian-11-r66` | +| `metrics.jmx.image.digest` | JMX exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `metrics.jmx.image.pullPolicy` | JMX exporter image pull policy | `IfNotPresent` | +| `metrics.jmx.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `metrics.jmx.containerSecurityContext.enabled` | Enable Prometheus JMX exporter containers' Security Context | `true` | +| `metrics.jmx.containerSecurityContext.runAsUser` | Set Prometheus JMX exporter containers' Security Context runAsUser | `1001` | +| `metrics.jmx.containerSecurityContext.runAsNonRoot` | Set Prometheus JMX exporter containers' Security Context runAsNonRoot | `true` | +| `metrics.jmx.containerSecurityContext.allowPrivilegeEscalation` | Set Prometheus JMX exporter containers' Security Context allowPrivilegeEscalation | `false` | +| `metrics.jmx.containerSecurityContext.readOnlyRootFilesystem` | Set Prometheus JMX exporter containers' Security Context readOnlyRootFilesystem | `true` | +| `metrics.jmx.containerSecurityContext.capabilities.drop` | Set Prometheus JMX exporter containers' Security Context capabilities to be dropped | `["ALL"]` | +| `metrics.jmx.containerPorts.metrics` | Prometheus JMX exporter metrics container port | `5556` | +| `metrics.jmx.resources.limits` | The resources limits for the JMX exporter container | `{}` | +| `metrics.jmx.resources.requests` | The requested resources for the JMX exporter container | `{}` | +| `metrics.jmx.service.ports.metrics` | Prometheus JMX exporter metrics service port | `5556` | +| `metrics.jmx.service.clusterIP` | Static clusterIP or None for headless services | `""` | +| `metrics.jmx.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `metrics.jmx.service.annotations` | Annotations for the Prometheus JMX exporter service | `{}` | +| `metrics.jmx.whitelistObjectNames` | Allows setting which JMX objects you want to expose to via JMX stats to JMX exporter | `["kafka.controller:*","kafka.server:*","java.lang:*","kafka.network:*","kafka.log:*"]` | +| `metrics.jmx.config` | Configuration file for JMX exporter | `""` | +| `metrics.jmx.existingConfigmap` | Name of existing ConfigMap with JMX exporter configuration | `""` | +| `metrics.jmx.extraRules` | Add extra rules to JMX exporter configuration | `""` | +| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (requires `metrics.kafka.enabled` or `metrics.jmx.enabled` to be `true`) | `false` | +| `metrics.serviceMonitor.namespace` | Namespace in which Prometheus is running | `""` | +| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped | `""` | +| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.labels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | +| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` | +| `metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` | +| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | +| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | +| `metrics.prometheusRule.enabled` | if `true`, creates a Prometheus Operator PrometheusRule (requires `metrics.kafka.enabled` or `metrics.jmx.enabled` to be `true`) | `false` | +| `metrics.prometheusRule.namespace` | Namespace in which Prometheus is running | `""` | +| `metrics.prometheusRule.labels` | Additional labels that can be used so PrometheusRule will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.groups` | Prometheus Rule Groups for Kafka | `[]` | + +### Kafka provisioning parameters + +| Name | Description | Value | +| ---------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | --------------------- | +| `provisioning.enabled` | Enable kafka provisioning Job | `false` | +| `provisioning.numPartitions` | Default number of partitions for topics when unspecified | `1` | +| `provisioning.replicationFactor` | Default replication factor for topics when unspecified | `1` | +| `provisioning.topics` | Kafka topics to provision | `[]` | +| `provisioning.nodeSelector` | Node labels for pod assignment | `{}` | +| `provisioning.tolerations` | Tolerations for pod assignment | `[]` | +| `provisioning.extraProvisioningCommands` | Extra commands to run to provision cluster resources | `[]` | +| `provisioning.parallel` | Number of provisioning commands to run at the same time | `1` | +| `provisioning.preScript` | Extra bash script to run before topic provisioning. $CLIENT_CONF is path to properties file with most needed configurations | `""` | +| `provisioning.postScript` | Extra bash script to run after topic provisioning. $CLIENT_CONF is path to properties file with most needed configurations | `""` | +| `provisioning.auth.tls.type` | Format to use for TLS certificates. Allowed types: `JKS` and `PEM`. | `jks` | +| `provisioning.auth.tls.certificatesSecret` | Existing secret containing the TLS certificates for the Kafka provisioning Job. | `""` | +| `provisioning.auth.tls.cert` | The secret key from the certificatesSecret if 'cert' key different from the default (tls.crt) | `tls.crt` | +| `provisioning.auth.tls.key` | The secret key from the certificatesSecret if 'key' key different from the default (tls.key) | `tls.key` | +| `provisioning.auth.tls.caCert` | The secret key from the certificatesSecret if 'caCert' key different from the default (ca.crt) | `ca.crt` | +| `provisioning.auth.tls.keystore` | The secret key from the certificatesSecret if 'keystore' key different from the default (keystore.jks) | `keystore.jks` | +| `provisioning.auth.tls.truststore` | The secret key from the certificatesSecret if 'truststore' key different from the default (truststore.jks) | `truststore.jks` | +| `provisioning.auth.tls.passwordsSecret` | Name of the secret containing passwords to access the JKS files or PEM key when they are password-protected. | `""` | +| `provisioning.auth.tls.keyPasswordSecretKey` | The secret key from the passwordsSecret if 'keyPasswordSecretKey' key different from the default (key-password) | `key-password` | +| `provisioning.auth.tls.keystorePasswordSecretKey` | The secret key from the passwordsSecret if 'keystorePasswordSecretKey' key different from the default (keystore-password) | `keystore-password` | +| `provisioning.auth.tls.truststorePasswordSecretKey` | The secret key from the passwordsSecret if 'truststorePasswordSecretKey' key different from the default (truststore-password) | `truststore-password` | +| `provisioning.auth.tls.keyPassword` | Password to access the password-protected PEM key if necessary. Ignored if 'passwordsSecret' is provided. | `""` | +| `provisioning.auth.tls.keystorePassword` | Password to access the JKS keystore. Ignored if 'passwordsSecret' is provided. | `""` | +| `provisioning.auth.tls.truststorePassword` | Password to access the JKS truststore. Ignored if 'passwordsSecret' is provided. | `""` | +| `provisioning.command` | Override provisioning container command | `[]` | +| `provisioning.args` | Override provisioning container arguments | `[]` | +| `provisioning.extraEnvVars` | Extra environment variables to add to the provisioning pod | `[]` | +| `provisioning.extraEnvVarsCM` | ConfigMap with extra environment variables | `""` | +| `provisioning.extraEnvVarsSecret` | Secret with extra environment variables | `""` | +| `provisioning.podAnnotations` | Extra annotations for Kafka provisioning pods | `{}` | +| `provisioning.podLabels` | Extra labels for Kafka provisioning pods | `{}` | +| `provisioning.serviceAccount.create` | Enable creation of ServiceAccount for Kafka provisioning pods | `false` | +| `provisioning.serviceAccount.name` | The name of the service account to use. If not set and `create` is `true`, a name is generated | `""` | +| `provisioning.serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `true` | +| `provisioning.resources.limits` | The resources limits for the Kafka provisioning container | `{}` | +| `provisioning.resources.requests` | The requested resources for the Kafka provisioning container | `{}` | +| `provisioning.podSecurityContext.enabled` | Enable security context for the pods | `true` | +| `provisioning.podSecurityContext.fsGroup` | Set Kafka provisioning pod's Security Context fsGroup | `1001` | +| `provisioning.podSecurityContext.seccompProfile.type` | Set Kafka provisioning pod's Security Context seccomp profile | `RuntimeDefault` | +| `provisioning.containerSecurityContext.enabled` | Enable Kafka provisioning containers' Security Context | `true` | +| `provisioning.containerSecurityContext.runAsUser` | Set Kafka provisioning containers' Security Context runAsUser | `1001` | +| `provisioning.containerSecurityContext.runAsNonRoot` | Set Kafka provisioning containers' Security Context runAsNonRoot | `true` | +| `provisioning.containerSecurityContext.allowPrivilegeEscalation` | Set Kafka provisioning containers' Security Context allowPrivilegeEscalation | `false` | +| `provisioning.containerSecurityContext.readOnlyRootFilesystem` | Set Kafka provisioning containers' Security Context readOnlyRootFilesystem | `true` | +| `provisioning.containerSecurityContext.capabilities.drop` | Set Kafka provisioning containers' Security Context capabilities to be dropped | `["ALL"]` | +| `provisioning.schedulerName` | Name of the k8s scheduler (other than default) for kafka provisioning | `""` | +| `provisioning.enableServiceLinks` | Whether information about services should be injected into pod's environment variable | `true` | +| `provisioning.extraVolumes` | Optionally specify extra list of additional volumes for the Kafka provisioning pod(s) | `[]` | +| `provisioning.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Kafka provisioning container(s) | `[]` | +| `provisioning.sidecars` | Add additional sidecar containers to the Kafka provisioning pod(s) | `[]` | +| `provisioning.initContainers` | Add additional Add init containers to the Kafka provisioning pod(s) | `[]` | +| `provisioning.waitForKafka` | If true use an init container to wait until kafka is ready before starting provisioning | `true` | + +### KRaft chart parameters + +| Name | Description | Value | +| ------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------ | +| `kraft.enabled` | Switch to enable or disable the KRaft mode for Kafka | `true` | +| `kraft.clusterId` | Kafka Kraft cluster ID. If not set, a random cluster ID will be generated the first time Kraft is initialized. | `""` | +| `kraft.controllerQuorumVoters` | Override the Kafka controller quorum voters of the Kafka Kraft cluster. If not set, it will be automatically configured to use all controller-elegible nodes. | `""` | + +### ZooKeeper chart parameters + +| Name | Description | Value | +| --------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------- | +| `zookeeperChrootPath` | Path which puts data under some path in the global ZooKeeper namespace | `""` | +| `zookeeper.enabled` | Switch to enable or disable the ZooKeeper helm chart. Must be false if you use KRaft mode. | `false` | +| `zookeeper.replicaCount` | Number of ZooKeeper nodes | `1` | +| `zookeeper.auth.client.enabled` | Enable ZooKeeper auth | `false` | +| `zookeeper.auth.client.clientUser` | User that will use ZooKeeper client (zkCli.sh) to authenticate. Must exist in the serverUsers comma-separated list. | `""` | +| `zookeeper.auth.client.clientPassword` | Password that will use ZooKeeper client (zkCli.sh) to authenticate. Must exist in the serverPasswords comma-separated list. | `""` | +| `zookeeper.auth.client.serverUsers` | Comma, semicolon or whitespace separated list of user to be created. Specify them as a string, for example: "user1,user2,admin" | `""` | +| `zookeeper.auth.client.serverPasswords` | Comma, semicolon or whitespace separated list of passwords to assign to users when created. Specify them as a string, for example: "pass4user1, pass4user2, pass4admin" | `""` | +| `zookeeper.persistence.enabled` | Enable persistence on ZooKeeper using PVC(s) | `true` | +| `zookeeper.persistence.storageClass` | Persistent Volume storage class | `""` | +| `zookeeper.persistence.accessModes` | Persistent Volume access modes | `["ReadWriteOnce"]` | +| `zookeeper.persistence.size` | Persistent Volume size | `8Gi` | +| `externalZookeeper.servers` | List of external zookeeper servers to use. Typically used in combination with 'zookeeperChrootPath'. Must be empty if you use KRaft mode. | `[]` | + +```console +helm install my-release \ + --set replicaCount=3 \ + oci://registry-1.docker.io/bitnamicharts/kafka +``` + +The above command deploys Kafka with 3 brokers (replicas). + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +helm install my-release -f values.yaml oci://registry-1.docker.io/bitnamicharts/kafka +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Setting custom parameters + +Any environment variable beginning with `KAFKA_CFG_` will be mapped to its corresponding Kafka key. For example, use `KAFKA_CFG_BACKGROUND_THREADS` in order to set `background.threads`. In order to pass custom environment variables use the `extraEnvVars` property. + +Using `extraEnvVars` with `KAFKA_CFG_` is the preferred and simplest way to add custom Kafka parameters not otherwise specified in this chart. Alternatively, you can provide a *full* Kafka configuration using `config` or `existingConfigmap`. +Setting either `config` or `existingConfigmap` will cause the chart to disregard `KAFKA_CFG_` settings, which are used by many other Kafka-related chart values described above, as well as dynamically generated parameters such as `zookeeper.connect`. This can cause unexpected behavior. + +### Listeners configuration + +This chart allows you to automatically configure Kafka with 3 listeners: + +- One for inter-broker communications. +- A second one for communications with clients within the K8s cluster. +- (optional) a third listener for communications with clients outside the K8s cluster. Check [this section](#accessing-kafka-brokers-from-outside-the-cluster) for more information. + +For more complex configurations, set the `listeners`, `advertisedListeners` and `listenerSecurityProtocolMap` parameters as needed. + +### Enable security for Kafka and Zookeeper + +You can configure different authentication protocols for each listener you configure in Kafka. For instance, you can use `sasl_tls` authentication for client communications, while using `tls` for inter-broker communications. This table shows the available protocols and the security they provide: + +| Method | Authentication | Encryption via TLS | +|-----------|------------------------------|--------------------| +| plaintext | None | No | +| tls | None | Yes | +| mtls | Yes (two-way authentication) | Yes | +| sasl | Yes (via SASL) | No | +| sasl_tls | Yes (via SASL) | Yes | + +Learn more about how to configure Kafka to use the different authentication protocols in the [chart documentation](https://docs.bitnami.com/kubernetes/infrastructure/kafka/administration/enable-security/). + +If you enabled SASL authentication on any listener, you can set the SASL credentials using the parameters below: + +- `auth.sasl.jaas.clientUsers`/`auth.sasl.jaas.clientPasswords`: when enabling SASL authentication for communications with clients. +- `auth.sasl.jaas.interBrokerUser`/`auth.sasl.jaas.interBrokerPassword`: when enabling SASL authentication for inter-broker communications. +- `auth.jaas.zookeeperUser`/`auth.jaas.zookeeperPassword`: In the case that the Zookeeper chart is deployed with SASL authentication enabled. + +In order to configure TLS authentication/encryption, you **can** create a secret per Kafka broker you have in the cluster containing the Java Key Stores (JKS) files: the truststore (`kafka.truststore.jks`) and the keystore (`kafka.keystore.jks`). Then, you need pass the secret names with the `tls.existingSecret` parameter when deploying the chart. + +> **Note**: If the JKS files are password protected (recommended), you will need to provide the password to get access to the keystores. To do so, use the `tls.password` parameter to provide your password. + +For instance, to configure TLS authentication on a Kafka cluster with 2 Kafka brokers use the commands below to create the secrets: + +```console +kubectl create secret generic kafka-jks-0 --from-file=kafka.truststore.jks=./kafka.truststore.jks --from-file=kafka.keystore.jks=./kafka-0.keystore.jks +kubectl create secret generic kafka-jks-1 --from-file=kafka.truststore.jks=./kafka.truststore.jks --from-file=kafka.keystore.jks=./kafka-1.keystore.jks +``` + +> **Note**: the command above assumes you already created the truststore and keystores files. This [script](https://raw.githubusercontent.com/confluentinc/confluent-platform-security-tools/master/kafka-generate-ssl.sh) can help you with the JKS files generation. + +If, for some reason (like using Cert-Manager) you can not use the default JKS secret scheme, you can use the additional parameters: + +- `tls.jksTruststoreSecret` to define additional secret, where the `kafka.truststore.jks` is being kept. The truststore password **must** be the same as in `tls.password` +- `tls.jksTruststore` to overwrite the default value of the truststore key (`kafka.truststore.jks`). + +> **Note**: If you are using cert-manager, particularly when an ACME issuer is used, the `ca.crt` field is not put in the `Secret` that cert-manager creates. To handle this, the `tls.pemChainIncluded` property can be set to `true` and the initContainer created by this Chart will attempt to extract the intermediate certs from the `tls.crt` field of the secret (which is a PEM chain) +> **Note**: The truststore/keystore from above **must** be protected with the same password as in `tls.password` + +You can deploy the chart with authentication using the following parameters: + +```console +replicaCount=2 +listeners.client.client.protocol=SASL +listeners.client.interbroker.protocol=TLS +tls.existingSecret=kafka-jks +tls.password=jksPassword +sasl.client.users[0]=brokerUser +sasl.client.passwords[0]=brokerPassword +sasl.zookeeper.user=zookeeperUser +sasl.zookeeper.password=zookeeperPassword +zookeeper.auth.enabled=true +zookeeper.auth.serverUsers=zookeeperUser +zookeeper.auth.serverPasswords=zookeeperPassword +zookeeper.auth.clientUser=zookeeperUser +zookeeper.auth.clientPassword=zookeeperPassword +``` + +You can deploy the chart with AclAuthorizer using the following parameters: + +```console +replicaCount=2 +listeners.client.protocol=SASL +listeners.interbroker.protocol=SASL_TLS +tls.existingSecret=kafka-jks-0 +tls.password=jksPassword +sasl.client.users[0]=brokerUser +sasl.client.passwords[0]=brokerPassword +sasl.zookeeper.user=zookeeperUser +sasl.zookeeper.password=zookeeperPassword +zookeeper.auth.enabled=true +zookeeper.auth.serverUsers=zookeeperUser +zookeeper.auth.serverPasswords=zookeeperPassword +zookeeper.auth.clientUser=zookeeperUser +zookeeper.auth.clientPassword=zookeeperPassword +authorizerClassName=kafka.security.authorizer.AclAuthorizer +allowEveryoneIfNoAclFound=false +superUsers=User:admin +``` + +If you are using Kafka ACLs, you might encounter in kafka-authorizer.log the following event: `[...] Principal = User:ANONYMOUS is Allowed Operation [...]`. + +By setting the following parameter: `listeners.client.protocol=SSL` and `listener.client.sslClientAuth=required`, Kafka will require the clients to authenticate to Kafka brokers via certificate. + +As result, we will be able to see in kafka-authorizer.log the events specific Subject: `[...] Principal = User:CN=kafka,OU=...,O=...,L=...,C=..,ST=... is [...]`. + +If you also enable exposing metrics using the Kafka exporter, and you are using `SSL` or `SASL_SSL` security protocols protocols, you need to mount the CA certificated used to sign the brokers certificates in the exporter so it can validate the Kafka brokers. To do so, create a secret containing the CA, and set the `metrics.certificatesSecret` parameter. As an alternative, you can skip TLS validation using extra flags: + +```console +metrics.kafka.extraFlags={tls.insecure-skip-tls-verify: ""} +``` + +### Accessing Kafka brokers from outside the cluster + +In order to access Kafka Brokers from outside the cluster, an additional listener and advertised listener must be configured. Additionally, a specific service per kafka pod will be created. + +There are three ways of configuring external access. Using LoadBalancer services, using NodePort services or using ClusterIP services. + +#### Using LoadBalancer services + +You have two alternatives to use LoadBalancer services: + +- Option A) Use random load balancer IPs using an **initContainer** that waits for the IPs to be ready and discover them automatically. + +```console +externalAccess.enabled=true +externalAccess.service.broker.type=LoadBalancer +externalAccess.service.controller.type=LoadBalancer +externalAccess.service.broker.ports.external=9094 +externalAccess.service.controller.containerPorts.external=9094 +externalAccess.autoDiscovery.enabled=true +serviceAccount.create=true +rbac.create=true +``` + +Note: This option requires creating RBAC rules on clusters where RBAC policies are enabled. + +- Option B) Manually specify the load balancer IPs: + +```console +externalAccess.enabled=true +externalAccess.service.controller.type=LoadBalancer +externalAccess.service.controller.containerPorts.external=9094 +externalAccess.service.controller.loadBalancerIPs[0]='external-ip-1' +externalAccess.service.controller.loadBalancerIPs[1]='external-ip-2' +externalAccess.service.broker.type=LoadBalancer +externalAccess.service.broker.ports.external=9094 +externalAccess.service.broker.loadBalancerIPs[0]='external-ip-3' +externalAccess.service.broker.loadBalancerIPs[1]='external-ip-4' +``` + +Note: You need to know in advance the load balancer IPs so each Kafka broker advertised listener is configured with it. + +Following the aforementioned steps will also allow to connect the brokers from the outside using the cluster's default service (when `service.type` is `LoadBalancer` or `NodePort`). Use the property `service.externalPort` to specify the port used for external connections. + +#### Using NodePort services + +You have two alternatives to use NodePort services: + +- Option A) Use random node ports using an **initContainer** that discover them automatically. + + ```console + externalAccess.enabled=true + externalAccess.controller.service.type=NodePort + externalAccess.broker.service.type=NodePort + externalAccess.autoDiscovery.enabled=true + serviceAccount.create=true + rbac.create=true + ``` + + Note: This option requires creating RBAC rules on clusters where RBAC policies are enabled. + +- Option B) Manually specify the node ports: + + ```console + externalAccess.enabled=true + externalAccess.controller.service.type=NodePort + externalAccess.controller.service.nodePorts[0]='node-port-1' + externalAccess.controller.service.nodePorts[1]='node-port-2' + ``` + + Note: You need to know in advance the node ports that will be exposed so each Kafka broker advertised listener is configured with it. + + The pod will try to get the external ip of the node using `curl -s https://ipinfo.io/ip` unless `externalAccess.service.domain` or `externalAccess.service.useHostIPs` is provided. + +- Option C) Manually specify distinct external IPs (using controller+broker nodes) + + ```console + externalAccess.enabled=true + externalAccess.controller.service.type=NodePort + externalAccess.controller.service.externalIPs[0]='172.16.0.20' + externalAccess.controller.service.externalIPs[1]='172.16.0.21' + externalAccess.controller.service.externalIPs[2]='172.16.0.22' + ``` + + Note: You need to know in advance the available IP of your cluster that will be exposed so each Kafka broker advertised listener is configured with it. + +#### Using ClusterIP services + +Note: This option requires that an ingress is deployed within your cluster + +```console +externalAccess.enabled=true +externalAccess.controller.service.type=ClusterIP +externalAccess.controller.service.ports.external=9094 +externalAccess.controller.service.domain='ingress-ip' +externalAccess.broker.service.type=ClusterIP +externalAccess.broker.service.ports.external=9094 +externalAccess.broker.service.domain='ingress-ip' +``` + +Note: the deployed ingress must contain the following block: + +```console +tcp: + 9094: "{{ include "common.names.namespace" . }}/{{ include "common.names.fullname" . }}-0-external:9094" + 9095: "{{ include "common.names.namespace" . }}/{{ include "common.names.fullname" . }}-1-external:9094" + 9096: "{{ include "common.names.namespace" . }}/{{ include "common.names.fullname" . }}-2-external:9094" +``` + +#### Name resolution with External-DNS + +You can use the following values to generate External-DNS annotations which automatically creates DNS records for each ReplicaSet pod: + +```yaml +externalAccess: + service: + annotations: + external-dns.alpha.kubernetes.io/hostname: "{{ .targetPod }}.example.com" +``` + +### Sidecars + +If you have a need for additional containers to run within the same pod as Kafka (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. + +```yaml +sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +### Setting Pod's affinity + +This chart allows you to set your custom affinity using the `affinity` parameter. Find more information about Pod's affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/main/bitnami/common#affinities) chart. To do so, set the `podAffinityPreset`, `podAntiAffinityPreset`, or `nodeAffinityPreset` parameters. + +### Deploying extra resources + +There are cases where you may want to deploy extra objects, such as Kafka Connect. For covering this case, the chart allows adding the full specification of other objects using the `extraDeploy` parameter. The following example would create a deployment including a Kafka Connect deployment so you can connect Kafka with MongoDB®: + +```yaml +## Extra objects to deploy (value evaluated as a template) +## +extraDeploy: + - | + apiVersion: apps/v1 + kind: Deployment + metadata: + name: {{ include "common.names.fullname" . }}-connect + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: connector + spec: + replicas: 1 + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: connector + template: + metadata: + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 8 }} + app.kubernetes.io/component: connector + spec: + containers: + - name: connect + image: KAFKA-CONNECT-IMAGE + imagePullPolicy: IfNotPresent + ports: + - name: connector + containerPort: 8083 + volumeMounts: + - name: configuration + mountPath: /bitnami/kafka/config + volumes: + - name: configuration + configMap: + name: {{ include "common.names.fullname" . }}-connect + - | + apiVersion: v1 + kind: ConfigMap + metadata: + name: {{ include "common.names.fullname" . }}-connect + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: connector + data: + connect-standalone.properties: |- + bootstrap.servers = {{ include "common.names.fullname" . }}-0.{{ include "common.names.fullname" . }}-headless.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }}:{{ .Values.service.port }} + ... + mongodb.properties: |- + connection.uri=mongodb://root:password@mongodb-hostname:27017 + ... + - | + apiVersion: v1 + kind: Service + metadata: + name: {{ include "common.names.fullname" . }}-connect + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: connector + spec: + ports: + - protocol: TCP + port: 8083 + targetPort: connector + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: connector +``` + +You can create the Kafka Connect image using the Dockerfile below: + +```Dockerfile +FROM bitnami/kafka:latest +# Download MongoDB® Connector for Apache Kafka https://www.confluent.io/hub/mongodb/kafka-connect-mongodb +RUN mkdir -p /opt/bitnami/kafka/plugins && \ + cd /opt/bitnami/kafka/plugins && \ + curl --remote-name --location --silent https://search.maven.org/remotecontent?filepath=org/mongodb/kafka/mongo-kafka-connect/1.2.0/mongo-kafka-connect-1.2.0-all.jar +CMD /opt/bitnami/kafka/bin/connect-standalone.sh /opt/bitnami/kafka/config/connect-standalone.properties /opt/bitnami/kafka/config/mongo.properties +``` + +## Persistence + +The [Bitnami Kafka](https://github.com/bitnami/containers/tree/main/bitnami/kafka) image stores the Kafka data at the `/bitnami/kafka` path of the container. Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. + +### Adjust permissions of persistent volume mountpoint + +As the image run as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it. + +By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. +As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination. + +You can enable this initContainer by setting `volumePermissions.enabled` to `true`. + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Migrating from Zookeeper (Early access) + +This guide is an adaptation from upstream documentation: [Migrate from ZooKeeper to KRaft](https://docs.confluent.io/platform/current/installation/migrate-zk-kraft.html) + +1. Retrieve the cluster ID from Zookeeper: + + ```console + $ kubectl exec -it -- zkCli.sh get /cluster/id + /opt/bitnami/java/bin/java + Connecting to localhost:2181 + + WATCHER:: + + WatchedEvent state:SyncConnected type:None path:null + {"version":"1","id":"TEr3HVPvTqSWixWRHngP5g"} + ``` + +2. Deploy at least one Kraft controller-only in your deployment and enable `zookeeperMigrationMode=true`. The Kraft controllers will migrate the data from your Kafka ZkBroker to Kraft mode. + + To do so add the following values to your Zookeeper deployment when upgrading: + + ```yaml + controller: + replicaCount: 1 + controllerOnly: true + zookeeperMigrationMode: true + # If needed, set controllers minID to avoid conflict with your ZK brokers' ids. + # minID: 0 + broker: + zookeeperMigrationMode: true + kraft: + enabled: true + clusterId: "" + ``` + +3. Wait until until all brokers are ready. You should see the following log in the broker logs: + + ```console + INFO [KafkaServer id=100] Finished catching up on KRaft metadata log, requesting that the KRaft controller unfence this broker (kafka.server.KafkaServer) + INFO [BrokerLifecycleManager id=100 isZkBroker=true] The broker has been unfenced. Transitioning from RECOVERY to RUNNING. (kafka.server.BrokerLifecycleManager) + ``` + + In the controllers, the following message should show up: + + ```console + Transitioning ZK migration state from PRE_MIGRATION to MIGRATION (org.apache.kafka.controller.FeatureControlManager) + ``` + +4. Once all brokers have been successfully migrated, set `broker.zookeeperMigrationMode=false` to fully migrate them. + + ```yaml + broker: + zookeeperMigrationMode: false + ``` + +5. To conclude the migration, switch off migration mode on controllers and stop Zookeeper: + + ```yaml + controller: + zookeeperMigrationMode: false + zookeeper: + enabled: false + ``` + + After migration is complete, you should see the following message in your controllers: + + ```console + [2023-07-13 13:07:45,226] INFO [QuorumController id=1] Transitioning ZK migration state from MIGRATION to POST_MIGRATION (org.apache.kafka.controller.FeatureControlManager) + ``` + +6. (**Optional**) If you would like to switch to a non-dedicated cluster, set `controller.controllerOnly=false`. This will cause controller-only nodes to switch to controller+broker nodes. + + At that point, you could manually decommission broker-only nodes by reassigning its partitions to controller-eligible nodes. + + For more information about decommissioning kafka broker check the [Kafka documentation](https://www.confluent.io/blog/remove-kafka-brokers-from-any-cluster-the-easy-way/). + +## Upgrading + +### To 25.0.0 + +This major updates the Zookeeper subchart to it newest major, 12.0.0. For more information on this subchart's major, please refer to [zookeeper upgrade notes](https://github.com/bitnami/charts/tree/main/bitnami/zookeeper#to-1200). + +### To 24.0.0 + +This major version is a refactor of the Kafka chart and its architecture, to better adapt to Kraft features introduced in version 22.0.0. + +The changes introduced in this version are: + +- New architecture. The chart now has two statefulsets, one for controller-eligible nodes (controller or controller+broker) and another one for broker-only nodes. Please take a look at the subsections [Upgrading from Kraft mode](#upgrading-from-kraft-mode) and [Upgrading from Zookeeper mode](#upgrading-from-zookeeper-mode) for more information about how to upgrade this chart depending on which mode you were using. + + The new architecture is designed to support two main features: + - Deployment of dedicated nodes + - Support for Zookeeper to Kraft migration + +- Adds compatibility with `securityContext.readOnlyRootFs=true`, which is now the execution default. + - The Kafka configuration is now mounted as a ConfigMap instead of generated at runtime. + - Due to the implementation of readOnlyRootFs support, the following settings have been removed and will now rely on Kafka defaults. To override them, please use `extraConfig` to extend your Kafka configuration instead. + - `deleteTopicEnable` + - `autoCreateTopicsEnable` + - `logFlushIntervalMessages` + - `logFlushIntervalMs` + - `logRetentionBytes` + - `logRetentionCheckIntervalMs` + - `logRetentionHours` + - `logSegmentBytes` + - `logsDirs` + - `maxMessageBytes` + - `defaultReplicationFactor` + - `offsetsTopicReplicationFactor` + - `transactionStateLogReplicationFactor` + - `transactionStateLogMinIsr` + - `numIoThreads` + - `numNetworkThreads` + - `numPartitions` + - `numRecoveryThreadsPerDataDir` + - `socketReceiveBufferBytes` + - `socketRequestMaxBytes` + - `socketSendBufferBytes` + - `zookeeperConnectionTimeoutMs` + - `authorizerClassName` + - `allowEveryoneIfNoAclFound` + - `superUsers` +- All listeners are configured with protocol 'SASL_PLAINTEXT' by default. +- Support for SCRAM authentication in KRaft mode +- All statefulset settings have been moved from values' root to `controller.*` and `broker.*`. +- Refactor of listeners configuration: + - Settings `listeners`, `advertisedListeners` and `listenerSecurityProtocolMap` have been replaced with `listeners.*` object, which includes default listeners and each listener can be configured individually and extended using `listeners.extraListeners`. + - Values `interBrokerListenerName`, `allowPlaintextListener` have been removed. +- Refactor of SASL, SSL and ACL settings: + - Authentication nomenclature `plaintext,tls,mtls,sasl,sasl_tls` has been removed. Listeners are now configured using Kafka nomenclature `PLAINTEXT,SASL_PLAINTEXT,SASL_SSL,SSL` in `listeners.*.protocol`. + - mTLS is configured by default for SSL protocol listeners, while it can now also be configured for SASL_SSL listeners if `listener.*.sslClientAuth` is set. + - All SASL settings are now grouped under `sasl.*`. + - `auth.sasl.mechanisms` -> `sasl.enabledMechanisms` + - `auth.interBrokerMechanism` -> `sasl.interBrokerMechanism` + - `auth.sasl.jaas.clientUSers` -> `sasl.client.users` + - `auth.sasl.jaas.clientPasswords` -> `sasl.client.passwords` + - `auth.sasl.jaas.interBrokerUser` -> `sasl.interbroker.user` + - `auth.sasl.jaas.interBrokerPassword` -> `sasl.interbroker.password` + - `auth.sasl.jaas.zookeeperUser` -> `sasl.zookeeper.user` + - `auth.sasl.jaas.zookeeperPassword` -> `sasl.zookeeper.password` + - `auth.sasl.jaas.existingSecret` -> `sasl.existingSecret` + - Added support for Controller listener protocols other than PLAINTEXT. + - TLS settings have been moved from `auth.tls.*` to `tls.*`. + - Zookeeper TLS settings have been moved from `auth.zookeeper*` to `tls.zookeeper.*` +- Refactor externalAccess to support the new architecture: + - `externalAccess.service.*` have been renamed to `externalAccess.controller.service.*` and `externalAccess.controller.service.*`. + - Controller pods will not configure externalAccess unless: + - `controller.controllerOnly=false` (default), meaning the pods are running as 'controller+broker' nodes. + - `externalAccess.controller.service.forceExpose=true`, for use cases where controller-only nodes want to be exposed externally. + +#### Upgrading from Kraft mode + +If upgrading from Kraft mode, existing PVCs from Kafka containers should be reattached to 'controller' pods. + +#### Upgrading from Zookeeper mode + +If upgrading from Zookeeper mode, make sure you set 'controller.replicaCount=0' and reattach the existing PVCs to 'broker' pods. +This will allow you to perform a migration to Kraft mode in the future by following the 'Migrating from Zookeeper' section of this documentation. + +#### Retaining PersistentVolumes + +When upgrading the Kafka chart, you may want to retain your existing data. To do so, we recommend following this guide: + +**NOTE**: This guide requires the binaries 'kubectl' and 'jq'. + +```console +# Env variables +REPLICA=0 +OLD_PVC="data--kafka-${REPLICA}" +NEW_PVC="data--kafka--${REPLICA}" +PV_NAME=$(kubectl get pvc $OLD_PVC -o jsonpath="{.spec.volumeName}") +NEW_PVC_MANIFEST_FILE="$NEW_PVC.yaml" + +# Modify PV reclaim policy +kubectl patch pv $PV_NAME -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}' +# Manually check field 'RECLAIM POLICY' +kubectl get pv $PV_NAME + +# Create new PVC manifest +kubectl get pvc $OLD_PVC -o json | jq " + .metadata.name = \"$NEW_PVC\" + | with_entries( + select([.key] | + inside([\"metadata\", \"spec\", \"apiVersion\", \"kind\"])) + ) + | del( + .metadata.annotations, .metadata.creationTimestamp, + .metadata.finalizers, .metadata.resourceVersion, + .metadata.selfLink, .metadata.uid + ) + " > $NEW_PVC_MANIFEST_FILE +# Check manifest +cat $NEW_PVC_MANIFEST_FILE + +# Delete your old Statefulset and PVC +kubectl delete sts "-kafka" +kubectl delete pvc $OLD_PVC +# Make PV available again and create the new PVC +kubectl patch pv $PV_NAME -p '{"spec":{"claimRef": null}}' +kubectl apply -f $NEW_PVC_MANIFEST_FILE +``` + +Repeat this process for each replica you had in your Kafka cluster. Once completed, upgrade the cluster and the new Statefulset should reuse the existing PVCs. + +### To 23.0.0 + +This major updates Kafka to its newest version, 3.5.x. For more information, please refer to [kafka upgrade notes](https://kafka.apache.org/35/documentation.html#upgrade). + +### To 22.0.0 + +This major updates the Kafka's configuration to use Kraft by default. You can learn more about this configuration [here](https://developer.confluent.io/learn/kraft). Apart from seting the `kraft.enabled` parameter to `true`, we also made the following changes: + +- Renamed `minBrokerId` parameter to `minId` to set the minimum ID to use when configuring the node.id or broker.id parameter depending on the Kafka's configuration. This parameter sets the `KAFKA_CFG_NODE_ID` env var in the container. +- Updated the `containerPorts` and `service.ports` parameters to include the new controller port. + +### To 21.0.0 + +This major updates Kafka to its newest version, 3.4.x. For more information, please refer to [kafka upgrade notes](https://kafka.apache.org/34/documentation.html#upgrade). + +### To 20.0.0 + +This major updates the Zookeeper subchart to it newest major, 11.0.0. For more information on this subchart's major, please refer to [zookeeper upgrade notes](https://github.com/bitnami/charts/tree/main/bitnami/zookeeper#to-1100). + +### To 19.0.0 + +This major updates Kafka to its newest version, 3.3.x. For more information, please refer to [kafka upgrade notes](https://kafka.apache.org/33/documentation.html#upgrade). + +### To 18.0.0 + +This major updates the Zookeeper subchart to it newest major, 10.0.0. For more information on this subchart's major, please refer to [zookeeper upgrade notes](https://github.com/bitnami/charts/tree/main/bitnami/zookeeper#to-1000). + +### To 16.0.0 + +This major updates the Zookeeper subchart to it newest major, 9.0.0. For more information on this subchart's major, please refer to [zookeeper upgrade notes](https://github.com/bitnami/charts/tree/main/bitnami/zookeeper#to-900). + +### To 15.0.0 + +This major release bumps Kafka major version to `3.x` series. +It also renames several values in this chart and adds missing features, in order to be inline with the rest of assets in the Bitnami charts repository. Some affected values are: + +- `service.port`, `service.internalPort` and `service.externalPort` have been regrouped under the `service.ports` map. +- `metrics.kafka.service.port` has been regrouped under the `metrics.kafka.service.ports` map. +- `metrics.jmx.service.port` has been regrouped under the `metrics.jmx.service.ports` map. +- `updateStrategy` (string) and `rollingUpdatePartition` are regrouped under the `updateStrategy` map. +- Several parameters marked as deprecated `14.x.x` are not supported anymore. + +Additionally updates the ZooKeeper subchart to it newest major, `8.0.0`, which contains similar changes. + +### To 14.0.0 + +In this version, the `image` block is defined once and is used in the different templates, while in the previous version, the `image` block was duplicated for the main container and the provisioning one + +```yaml +image: + registry: docker.io + repository: bitnami/kafka + tag: 2.8.0 +``` + +VS + +```yaml +image: + registry: docker.io + repository: bitnami/kafka + tag: 2.8.0 +... +provisioning: + image: + registry: docker.io + repository: bitnami/kafka + tag: 2.8.0 +``` + +See [PR#7114](https://github.com/bitnami/charts/pull/7114) for more info about the implemented changes + +### To 13.0.0 + +This major updates the Zookeeper subchart to it newest major, 7.0.0, which renames all TLS-related settings. For more information on this subchart's major, please refer to [zookeeper upgrade notes](https://github.com/bitnami/charts/tree/main/bitnami/zookeeper#to-700). + +### To 12.2.0 + +This version also introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/main/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade. + +### To 12.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +#### What changes were introduced in this major version? + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Move dependency information from the *requirements.yaml* to the *Chart.yaml* +- After running `helm dependency update`, a *Chart.lock* file is generated containing the same structure used in the previous *requirements.lock* +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +#### Considerations when upgrading to this version + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +#### Useful links + +- +- +- + +### To 11.8.0 + +External access to brokers can now be achieved through the cluster's Kafka service. + +- `service.nodePort` -> deprecated in favor of `service.nodePorts.client` and `service.nodePorts.external` + +### To 11.7.0 + +The way to configure the users and passwords changed. Now it is allowed to create multiple users during the installation by providing the list of users and passwords. + +- `auth.jaas.clientUser` (string) -> deprecated in favor of `auth.jaas.clientUsers` (array). +- `auth.jaas.clientPassword` (string) -> deprecated in favor of `auth.jaas.clientPasswords` (array). + +### To 11.0.0 + +The way to configure listeners and athentication on Kafka is totally refactored allowing users to configure different authentication protocols on different listeners. Please check the [Listeners Configuration](#listeners-configuration) section for more information. + +Backwards compatibility is not guaranteed you adapt your values.yaml to the new format. Here you can find some parameters that were renamed or disappeared in favor of new ones on this major version: + +- `auth.enabled` -> deprecated in favor of `auth.clientProtocol` and `auth.interBrokerProtocol` parameters. +- `auth.ssl` -> deprecated in favor of `auth.clientProtocol` and `auth.interBrokerProtocol` parameters. +- `auth.certificatesSecret` -> renamed to `auth.jksSecret`. +- `auth.certificatesPassword` -> renamed to `auth.jksPassword`. +- `sslEndpointIdentificationAlgorithm` -> renamedo to `auth.tlsEndpointIdentificationAlgorithm`. +- `auth.interBrokerUser` -> renamed to `auth.jaas.interBrokerUser` +- `auth.interBrokerPassword` -> renamed to `auth.jaas.interBrokerPassword` +- `auth.zookeeperUser` -> renamed to `auth.jaas.zookeeperUser` +- `auth.zookeeperPassword` -> renamed to `auth.jaas.zookeeperPassword` +- `auth.existingSecret` -> renamed to `auth.jaas.existingSecret` +- `service.sslPort` -> deprecated in favor of `service.internalPort` +- `service.nodePorts.kafka` and `service.nodePorts.ssl` -> deprecated in favor of `service.nodePort` +- `metrics.kafka.extraFlag` -> new parameter +- `metrics.kafka.certificatesSecret` -> new parameter + +### To 10.0.0 + +If you are setting the `config` or `log4j` parameter, backwards compatibility is not guaranteed, because the `KAFKA_MOUNTED_CONFDIR` has moved from `/opt/bitnami/kafka/conf` to `/bitnami/kafka/config`. In order to continue using these parameters, you must also upgrade your image to `docker.io/bitnami/kafka:2.4.1-debian-10-r38` or later. + +### To 9.0.0 + +Backwards compatibility is not guaranteed you adapt your values.yaml to the new format. Here you can find some parameters that were renamed on this major version: + +```diff +- securityContext.enabled +- securityContext.fsGroup +- securityContext.fsGroup ++ podSecurityContext +- externalAccess.service.loadBalancerIP ++ externalAccess.service.loadBalancerIPs +- externalAccess.service.nodePort ++ externalAccess.service.nodePorts +- metrics.jmx.configMap.enabled +- metrics.jmx.configMap.overrideConfig ++ metrics.jmx.config +- metrics.jmx.configMap.overrideName ++ metrics.jmx.existingConfigmap +``` + +Ports names were prefixed with the protocol to comply with Istio (see ). + +### To 8.0.0 + +There is not backwards compatibility since the brokerID changes to the POD_NAME. For more information see [this PR](https://github.com/bitnami/charts/pull/2028). + +### To 7.0.0 + +Backwards compatibility is not guaranteed when Kafka metrics are enabled, unless you modify the labels used on the exporter deployments. +Use the workaround below to upgrade from versions previous to 7.0.0. The following example assumes that the release name is kafka: + +```console +helm upgrade kafka oci://registry-1.docker.io/bitnamicharts/kafka --version 6.1.8 --set metrics.kafka.enabled=false +helm upgrade kafka oci://registry-1.docker.io/bitnamicharts/kafka --version 7.0.0 --set metrics.kafka.enabled=true +``` + +### To 2.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 2.0.0. The following example assumes that the release name is kafka: + +```console +kubectl delete statefulset kafka-kafka --cascade=false +kubectl delete statefulset kafka-zookeeper --cascade=false +``` + +### To 1.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 1.0.0. The following example assumes that the release name is kafka: + +```console +kubectl delete statefulset kafka-kafka --cascade=false +kubectl delete statefulset kafka-zookeeper --cascade=false +``` + +## License + +Copyright © 2023 VMware, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/manifest/helm-charts/infra/kafka/charts/common/.helmignore b/manifest/helm-charts/infra/kafka/charts/common/.helmignore new file mode 100644 index 000000000..50af03172 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/manifest/helm-charts/infra/kafka/charts/common/Chart.yaml b/manifest/helm-charts/infra/kafka/charts/common/Chart.yaml new file mode 100644 index 000000000..3be88e6aa --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure + licenses: Apache-2.0 +apiVersion: v2 +appVersion: 2.11.1 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://bitnami.com +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- name: VMware, Inc. + url: https://github.com/bitnami/charts +name: common +sources: +- https://github.com/bitnami/charts +type: library +version: 2.11.1 diff --git a/manifest/helm-charts/infra/kafka/charts/common/README.md b/manifest/helm-charts/infra/kafka/charts/common/README.md new file mode 100644 index 000000000..fe6a01000 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/common/README.md @@ -0,0 +1,235 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between Bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 2.x.x + repository: oci://registry-1.docker.io/bitnamicharts +``` + +```console +helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. + +Looking to use our applications in production? Try [VMware Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ + +## Parameters + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets (evaluated as templates). + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 -d) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 -d) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +#### What changes were introduced in this major version? + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +#### Considerations when upgrading to this version + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +#### Useful links + +- +- +- + +## License + +Copyright © 2023 VMware, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/manifest/helm-charts/infra/kafka/charts/common/templates/_affinities.tpl b/manifest/helm-charts/infra/kafka/charts/common/templates/_affinities.tpl new file mode 100644 index 000000000..e85b1df45 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/common/templates/_affinities.tpl @@ -0,0 +1,139 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a topologyKey definition +{{ include "common.affinities.topologyKey" (dict "topologyKey" "BAR") -}} +*/}} +{{- define "common.affinities.topologyKey" -}} +{{ .topologyKey | default "kubernetes.io/hostname" -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "customLabels" .Values.podLabels "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "extraPodAffinityTerms" .Values.extraPodAffinityTerms "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +{{- $customLabels := default (dict) .customLabels -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +{{- $extraPodAffinityTerms := default (list) .extraPodAffinityTerms -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" .context )) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} + weight: 1 + {{- range $extraPodAffinityTerms }} + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" $.context )) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := .extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} + weight: {{ .weight | default 1 -}} + {{- end -}} +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "customLabels" .Values.podLabels "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "extraPodAffinityTerms" .Values.extraPodAffinityTerms "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +{{- $customLabels := default (dict) .customLabels -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +{{- $extraPodAffinityTerms := default (list) .extraPodAffinityTerms -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" .context )) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} + {{- range $extraPodAffinityTerms }} + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" $.context )) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := .extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} + {{- end -}} +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/manifest/helm-charts/infra/kafka/charts/common/templates/_capabilities.tpl b/manifest/helm-charts/infra/kafka/charts/common/templates/_capabilities.tpl new file mode 100644 index 000000000..c6d115fe5 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/common/templates/_capabilities.tpl @@ -0,0 +1,185 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for poddisruptionbudget. +*/}} +{{- define "common.capabilities.policy.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "policy/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "common.capabilities.networkPolicy.apiVersion" -}} +{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for cronjob. +*/}} +{{- define "common.capabilities.cronjob.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "batch/v1beta1" -}} +{{- else -}} +{{- print "batch/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for daemonset. +*/}} +{{- define "common.capabilities.daemonset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for RBAC resources. +*/}} +{{- define "common.capabilities.rbac.apiVersion" -}} +{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for CRDs. +*/}} +{{- define "common.capabilities.crd.apiVersion" -}} +{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiextensions.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiextensions.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for APIService. +*/}} +{{- define "common.capabilities.apiService.apiVersion" -}} +{{- if semverCompare "<1.10-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiregistration.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiregistration.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for Horizontal Pod Autoscaler. +*/}} +{{- define "common.capabilities.hpa.apiVersion" -}} +{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}} +{{- if .beta2 -}} +{{- print "autoscaling/v2beta2" -}} +{{- else -}} +{{- print "autoscaling/v2beta1" -}} +{{- end -}} +{{- else -}} +{{- print "autoscaling/v2" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for Vertical Pod Autoscaler. +*/}} +{{- define "common.capabilities.vpa.apiVersion" -}} +{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}} +{{- if .beta2 -}} +{{- print "autoscaling/v2beta2" -}} +{{- else -}} +{{- print "autoscaling/v2beta1" -}} +{{- end -}} +{{- else -}} +{{- print "autoscaling/v2" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the used Helm version is 3.3+. +A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure. +This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error. +**To be removed when the catalog's minimun Helm version is 3.3** +*/}} +{{- define "common.capabilities.supportsHelmVersion" -}} +{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/manifest/helm-charts/infra/kafka/charts/common/templates/_errors.tpl b/manifest/helm-charts/infra/kafka/charts/common/templates/_errors.tpl new file mode 100644 index 000000000..07ded6f64 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/common/templates/_errors.tpl @@ -0,0 +1,28 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}} + {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}} + {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}} + {{- $errorString = print $errorString "\n%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/manifest/helm-charts/infra/kafka/charts/common/templates/_images.tpl b/manifest/helm-charts/infra/kafka/charts/common/templates/_images.tpl new file mode 100644 index 000000000..e248d6d08 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/common/templates/_images.tpl @@ -0,0 +1,101 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" .Values.global ) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $separator := ":" -}} +{{- $termination := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- if .imageRoot.digest }} + {{- $separator = "@" -}} + {{- $termination = .imageRoot.digest | toString -}} +{{- end -}} +{{- if $registryName }} + {{- printf "%s/%s%s%s" $registryName $repositoryName $separator $termination -}} +{{- else -}} + {{- printf "%s%s%s" $repositoryName $separator $termination -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets | uniq }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names evaluating values as templates +{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }} +*/}} +{{- define "common.images.renderPullSecrets" -}} + {{- $pullSecrets := list }} + {{- $context := .context }} + + {{- if $context.Values.global }} + {{- range $context.Values.global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets | uniq }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Return the proper image version (ingores image revision/prerelease info & fallbacks to chart appVersion) +{{ include "common.images.version" ( dict "imageRoot" .Values.path.to.the.image "chart" .Chart ) }} +*/}} +{{- define "common.images.version" -}} +{{- $imageTag := .imageRoot.tag | toString -}} +{{/* regexp from https://github.com/Masterminds/semver/blob/23f51de38a0866c5ef0bfc42b3f735c73107b700/version.go#L41-L44 */}} +{{- if regexMatch `^([0-9]+)(\.[0-9]+)?(\.[0-9]+)?(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?$` $imageTag -}} + {{- $version := semver $imageTag -}} + {{- printf "%d.%d.%d" $version.Major $version.Minor $version.Patch -}} +{{- else -}} + {{- print .chart.AppVersion -}} +{{- end -}} +{{- end -}} + diff --git a/manifest/helm-charts/infra/kafka/charts/common/templates/_ingress.tpl b/manifest/helm-charts/infra/kafka/charts/common/templates/_ingress.tpl new file mode 100644 index 000000000..efa5b85c7 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/common/templates/_ingress.tpl @@ -0,0 +1,73 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} +{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} +serviceName: {{ .serviceName }} +servicePort: {{ .servicePort }} +{{- else -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }} + number: {{ .servicePort | int }} + {{- end }} +{{- end -}} +{{- end -}} + +{{/* +Print "true" if the API pathType field is supported +Usage: +{{ include "common.ingress.supportsPathType" . }} +*/}} +{{- define "common.ingress.supportsPathType" -}} +{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the ingressClassname field is supported +Usage: +{{ include "common.ingress.supportsIngressClassname" . }} +*/}} +{{- define "common.ingress.supportsIngressClassname" -}} +{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if cert-manager required annotations for TLS signed +certificates are set in the Ingress annotations +Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations +Usage: +{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }} +*/}} +{{- define "common.ingress.certManagerRequest" -}} +{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") (hasKey .annotations "kubernetes.io/tls-acme") }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/manifest/helm-charts/infra/kafka/charts/common/templates/_labels.tpl b/manifest/helm-charts/infra/kafka/charts/common/templates/_labels.tpl new file mode 100644 index 000000000..a3cdc2bfd --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/common/templates/_labels.tpl @@ -0,0 +1,40 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Kubernetes standard labels +{{ include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) -}} +*/}} +{{- define "common.labels.standard" -}} +{{- if and (hasKey . "customLabels") (hasKey . "context") -}} +{{ merge (include "common.tplvalues.render" (dict "value" .customLabels "context" .context) | fromYaml) (dict "app.kubernetes.io/name" (include "common.names.name" .context) "helm.sh/chart" (include "common.names.chart" .context) "app.kubernetes.io/instance" .context.Release.Name "app.kubernetes.io/managed-by" .context.Release.Service "app.kubernetes.io/version" .context.Chart.AppVersion) | toYaml }} +{{- else -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end -}} +{{- end -}} + +{{/* +Labels used on immutable fields such as deploy.spec.selector.matchLabels or svc.spec.selector +{{ include "common.labels.matchLabels" (dict "customLabels" .Values.podLabels "context" $) -}} + +We don't want to loop over custom labels appending them to the selector +since it's very likely that it will break deployments, services, etc. +However, it's important to overwrite the standard labels if the user +overwrote them on metadata.labels fields. +*/}} +{{- define "common.labels.matchLabels" -}} +{{- if and (hasKey . "customLabels") (hasKey . "context") -}} +{{ merge (pick (include "common.tplvalues.render" (dict "value" .customLabels "context" .context) | fromYaml) "app.kubernetes.io/name" "app.kubernetes.io/instance") (dict "app.kubernetes.io/name" (include "common.names.name" .context) "app.kubernetes.io/instance" .context.Release.Name ) | toYaml }} +{{- else -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} +{{- end -}} diff --git a/manifest/helm-charts/infra/kafka/charts/common/templates/_names.tpl b/manifest/helm-charts/infra/kafka/charts/common/templates/_names.tpl new file mode 100644 index 000000000..a222924f1 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/common/templates/_names.tpl @@ -0,0 +1,71 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified dependency name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +Usage: +{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }} +*/}} +{{- define "common.names.dependency.fullname" -}} +{{- if .chartValues.fullnameOverride -}} +{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .chartName .chartValues.nameOverride -}} +{{- if contains $name .context.Release.Name -}} +{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts. +*/}} +{{- define "common.names.namespace" -}} +{{- default .Release.Namespace .Values.namespaceOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a fully qualified app name adding the installation's namespace. +*/}} +{{- define "common.names.fullname.namespace" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) (include "common.names.namespace" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/manifest/helm-charts/infra/kafka/charts/common/templates/_secrets.tpl b/manifest/helm-charts/infra/kafka/charts/common/templates/_secrets.tpl new file mode 100644 index 000000000..a193c46b6 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/common/templates/_secrets.tpl @@ -0,0 +1,172 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- with .name -}} +{{- $name = . -}} +{{- end -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. + - failOnNew - Boolean - Optional - Default to true. If set to false, skip errors adding new keys to existing secrets. +The order in which this function returns a secret password: + 1. Already existing 'Secret' resource + (If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned) + 2. Password provided via the values.yaml + (If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned) + 3. Randomly generated secret password + (A new random secret password with the length specified in the 'length' parameter will be generated and returned) + +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $failOnNew := default true .failOnNew }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret).data }} +{{- if $secretData }} + {{- if hasKey $secretData .key }} + {{- $password = index $secretData .key | quote }} + {{- else if $failOnNew }} + {{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}} + {{- end -}} +{{- else if $providedPasswordValue }} + {{- $password = $providedPasswordValue | toString | b64enc | quote }} +{{- else }} + + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} + {{- else }} + {{- $password = randAlphaNum $passwordLength | b64enc | quote }} + {{- end }} +{{- end -}} +{{- printf "%s" $password -}} +{{- end -}} + +{{/* +Reuses the value from an existing secret, otherwise sets its value to a default value. + +Usage: +{{ include "common.secrets.lookup" (dict "secret" "secret-name" "key" "keyName" "defaultValue" .Values.myValue "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - defaultValue - String - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - context - Context - Required - Parent context. + +*/}} +{{- define "common.secrets.lookup" -}} +{{- $value := "" -}} +{{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret).data -}} +{{- if and $secretData (hasKey $secretData .key) -}} + {{- $value = index $secretData .key -}} +{{- else if .defaultValue -}} + {{- $value = .defaultValue | toString | b64enc -}} +{{- end -}} +{{- if $value -}} +{{- printf "%s" $value -}} +{{- end -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/manifest/helm-charts/infra/kafka/charts/common/templates/_storage.tpl b/manifest/helm-charts/infra/kafka/charts/common/templates/_storage.tpl new file mode 100644 index 000000000..16405a0f8 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/common/templates/_storage.tpl @@ -0,0 +1,28 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/manifest/helm-charts/infra/kafka/charts/common/templates/_tplvalues.tpl b/manifest/helm-charts/infra/kafka/charts/common/templates/_tplvalues.tpl new file mode 100644 index 000000000..a8ed7637e --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,38 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template perhaps with scope if the scope is present. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $ ) }} +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $ "scope" $app ) }} +*/}} +{{- define "common.tplvalues.render" -}} +{{- $value := typeIs "string" .value | ternary .value (.value | toYaml) }} +{{- if contains "{{" (toJson .value) }} + {{- if .scope }} + {{- tpl (cat "{{- with $.RelativeScope -}}" $value "{{- end }}") (merge (dict "RelativeScope" .scope) .context) }} + {{- else }} + {{- tpl $value .context }} + {{- end }} +{{- else }} + {{- $value }} +{{- end }} +{{- end -}} + +{{/* +Merge a list of values that contains template after rendering them. +Merge precedence is consistent with http://masterminds.github.io/sprig/dicts.html#merge-mustmerge +Usage: +{{ include "common.tplvalues.merge" ( dict "values" (list .Values.path.to.the.Value1 .Values.path.to.the.Value2) "context" $ ) }} +*/}} +{{- define "common.tplvalues.merge" -}} +{{- $dst := dict -}} +{{- range .values -}} +{{- $dst = include "common.tplvalues.render" (dict "value" . "context" $.context "scope" $.scope) | fromYaml | merge $dst -}} +{{- end -}} +{{ $dst | toYaml }} +{{- end -}} diff --git a/manifest/helm-charts/infra/kafka/charts/common/templates/_utils.tpl b/manifest/helm-charts/infra/kafka/charts/common/templates/_utils.tpl new file mode 100644 index 000000000..c87040cd9 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/common/templates/_utils.tpl @@ -0,0 +1,67 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ include "common.names.namespace" .context | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 -d) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} diff --git a/manifest/helm-charts/infra/kafka/charts/common/templates/_warnings.tpl b/manifest/helm-charts/infra/kafka/charts/common/templates/_warnings.tpl new file mode 100644 index 000000000..66dffc1fe --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/common/templates/_warnings.tpl @@ -0,0 +1,19 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/manifest/helm-charts/infra/kafka/charts/common/templates/validations/_cassandra.tpl b/manifest/helm-charts/infra/kafka/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 000000000..eda9aada5 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,77 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/manifest/helm-charts/infra/kafka/charts/common/templates/validations/_mariadb.tpl b/manifest/helm-charts/infra/kafka/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 000000000..17d83a2fd --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,108 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/manifest/helm-charts/infra/kafka/charts/common/templates/validations/_mongodb.tpl b/manifest/helm-charts/infra/kafka/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 000000000..bbb445b86 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,113 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB® required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB® values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/manifest/helm-charts/infra/kafka/charts/common/templates/validations/_mysql.tpl b/manifest/helm-charts/infra/kafka/charts/common/templates/validations/_mysql.tpl new file mode 100644 index 000000000..ca3953f86 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/common/templates/validations/_mysql.tpl @@ -0,0 +1,108 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MySQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.mysql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MySQL values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mysql.passwords" -}} + {{- $existingSecret := include "common.mysql.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mysql.values.enabled" . -}} + {{- $architecture := include "common.mysql.values.architecture" . -}} + {{- $authPrefix := include "common.mysql.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mysql-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mysql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mysql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mysql.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mysql.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mysql. + +Usage: +{{ include "common.mysql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mysql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mysql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mysql.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mysql.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mysql.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.key.auth" -}} + {{- if .subchart -}} + mysql.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/manifest/helm-charts/infra/kafka/charts/common/templates/validations/_postgresql.tpl b/manifest/helm-charts/infra/kafka/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 000000000..8c9aa570e --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,134 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/manifest/helm-charts/infra/kafka/charts/common/templates/validations/_redis.tpl b/manifest/helm-charts/infra/kafka/charts/common/templates/validations/_redis.tpl new file mode 100644 index 000000000..fc0d208dd --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,81 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis® required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $standarizedVersion := include "common.redis.values.standarized.version" . }} + + {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }} + {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }} + + {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }} + {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}} + {{- if eq $useAuth "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} + +{{/* +Checks whether the redis chart's includes the standarizations (version >= 14) + +Usage: +{{ include "common.redis.values.standarized.version" (dict "context" $) }} +*/}} +{{- define "common.redis.values.standarized.version" -}} + + {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}} + {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }} + + {{- if $standarizedAuthValues -}} + {{- true -}} + {{- end -}} +{{- end -}} diff --git a/manifest/helm-charts/infra/kafka/charts/common/templates/validations/_validations.tpl b/manifest/helm-charts/infra/kafka/charts/common/templates/validations/_validations.tpl new file mode 100644 index 000000000..31ceda871 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,51 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/manifest/helm-charts/infra/kafka/charts/common/values.schema.json b/manifest/helm-charts/infra/kafka/charts/common/values.schema.json new file mode 100644 index 000000000..2124b3e4a --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/common/values.schema.json @@ -0,0 +1,11 @@ +{ + "title": "Chart Values", + "type": "object", + "properties": { + "exampleValue": { + "type": "string", + "description": "", + "default": "common-chart" + } + } +} \ No newline at end of file diff --git a/manifest/helm-charts/infra/kafka/charts/common/values.yaml b/manifest/helm-charts/infra/kafka/charts/common/values.yaml new file mode 100644 index 000000000..9abe0e154 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/common/values.yaml @@ -0,0 +1,8 @@ +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 + +## bitnami/common +## It is required by CI/CD tools and processes. +## @skip exampleValue +## +exampleValue: common-chart diff --git a/manifest/helm-charts/infra/kafka/charts/zookeeper/.helmignore b/manifest/helm-charts/infra/kafka/charts/zookeeper/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/zookeeper/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/manifest/helm-charts/infra/kafka/charts/zookeeper/Chart.lock b/manifest/helm-charts/infra/kafka/charts/zookeeper/Chart.lock new file mode 100644 index 000000000..f54586cc7 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/zookeeper/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: oci://registry-1.docker.io/bitnamicharts + version: 2.10.0 +digest: sha256:023ded170632d04528f30332370f34fc8fb96efb2886a01d934cb3bd6e6d2e09 +generated: "2023-09-05T11:24:06.99508+02:00" diff --git a/manifest/helm-charts/infra/kafka/charts/zookeeper/Chart.yaml b/manifest/helm-charts/infra/kafka/charts/zookeeper/Chart.yaml new file mode 100644 index 000000000..4a7cddb2a --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/zookeeper/Chart.yaml @@ -0,0 +1,29 @@ +annotations: + category: Infrastructure + images: | + - name: os-shell + image: docker.io/bitnami/os-shell:11-debian-11-r51 + - name: zookeeper + image: docker.io/bitnami/zookeeper:3.9.0-debian-11-r11 + licenses: Apache-2.0 +apiVersion: v2 +appVersion: 3.9.0 +dependencies: +- name: common + repository: oci://registry-1.docker.io/bitnamicharts + tags: + - bitnami-common + version: 2.x.x +description: Apache ZooKeeper provides a reliable, centralized register of configuration + data and services for distributed applications. +home: https://bitnami.com +icon: https://bitnami.com/assets/stacks/zookeeper/img/zookeeper-stack-220x234.png +keywords: +- zookeeper +maintainers: +- name: VMware, Inc. + url: https://github.com/bitnami/charts +name: zookeeper +sources: +- https://github.com/bitnami/charts/tree/main/bitnami/zookeeper +version: 12.1.3 diff --git a/manifest/helm-charts/infra/kafka/charts/zookeeper/README.md b/manifest/helm-charts/infra/kafka/charts/zookeeper/README.md new file mode 100644 index 000000000..b3bc29768 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/zookeeper/README.md @@ -0,0 +1,528 @@ + + +# Apache ZooKeeper packaged by Bitnami + +Apache ZooKeeper provides a reliable, centralized register of configuration data and services for distributed applications. + +[Overview of Apache ZooKeeper](https://zookeeper.apache.org) + +Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement. + +## TL;DR + +```console +helm install my-release oci://registry-1.docker.io/bitnamicharts/zookeeper +``` + +## Introduction + +This chart bootstraps a [ZooKeeper](https://github.com/bitnami/containers/tree/main/bitnami/zookeeper) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. + +Looking to use Apache ZooKeeper in production? Try [VMware Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +helm install my-release oci://registry-1.docker.io/bitnamicharts/zookeeper +``` + +These commands deploy ZooKeeper on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +### Global parameters + +| Name | Description | Value | +| ------------------------- | ----------------------------------------------- | ----- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | + +### Common parameters + +| Name | Description | Value | +| ------------------------ | -------------------------------------------------------------------------------------------- | --------------- | +| `kubeVersion` | Override Kubernetes version | `""` | +| `nameOverride` | String to partially override common.names.fullname template (will maintain the release name) | `""` | +| `fullnameOverride` | String to fully override common.names.fullname template | `""` | +| `clusterDomain` | Kubernetes Cluster Domain | `cluster.local` | +| `extraDeploy` | Extra objects to deploy (evaluated as a template) | `[]` | +| `commonLabels` | Add labels to all the deployed resources | `{}` | +| `commonAnnotations` | Add annotations to all the deployed resources | `{}` | +| `namespaceOverride` | Override namespace for ZooKeeper resources | `""` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the statefulset | `["sleep"]` | +| `diagnosticMode.args` | Args to override all containers in the statefulset | `["infinity"]` | + +### ZooKeeper chart parameters + +| Name | Description | Value | +| ----------------------------- | -------------------------------------------------------------------------------------------------------------------------- | ----------------------- | +| `image.registry` | ZooKeeper image registry | `docker.io` | +| `image.repository` | ZooKeeper image repository | `bitnami/zookeeper` | +| `image.tag` | ZooKeeper image tag (immutable tags are recommended) | `3.9.0-debian-11-r11` | +| `image.digest` | ZooKeeper image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `image.pullPolicy` | ZooKeeper image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `image.debug` | Specify if debug values should be set | `false` | +| `auth.client.enabled` | Enable ZooKeeper client-server authentication. It uses SASL/Digest-MD5 | `false` | +| `auth.client.clientUser` | User that will use ZooKeeper clients to auth | `""` | +| `auth.client.clientPassword` | Password that will use ZooKeeper clients to auth | `""` | +| `auth.client.serverUsers` | Comma, semicolon or whitespace separated list of user to be created | `""` | +| `auth.client.serverPasswords` | Comma, semicolon or whitespace separated list of passwords to assign to users when created | `""` | +| `auth.client.existingSecret` | Use existing secret (ignores previous passwords) | `""` | +| `auth.quorum.enabled` | Enable ZooKeeper server-server authentication. It uses SASL/Digest-MD5 | `false` | +| `auth.quorum.learnerUser` | User that the ZooKeeper quorumLearner will use to authenticate to quorumServers. | `""` | +| `auth.quorum.learnerPassword` | Password that the ZooKeeper quorumLearner will use to authenticate to quorumServers. | `""` | +| `auth.quorum.serverUsers` | Comma, semicolon or whitespace separated list of users for the quorumServers. | `""` | +| `auth.quorum.serverPasswords` | Comma, semicolon or whitespace separated list of passwords to assign to users when created | `""` | +| `auth.quorum.existingSecret` | Use existing secret (ignores previous passwords) | `""` | +| `tickTime` | Basic time unit (in milliseconds) used by ZooKeeper for heartbeats | `2000` | +| `initLimit` | ZooKeeper uses to limit the length of time the ZooKeeper servers in quorum have to connect to a leader | `10` | +| `syncLimit` | How far out of date a server can be from a leader | `5` | +| `preAllocSize` | Block size for transaction log file | `65536` | +| `snapCount` | The number of transactions recorded in the transaction log before a snapshot can be taken (and the transaction log rolled) | `100000` | +| `maxClientCnxns` | Limits the number of concurrent connections that a single client may make to a single member of the ZooKeeper ensemble | `60` | +| `maxSessionTimeout` | Maximum session timeout (in milliseconds) that the server will allow the client to negotiate | `40000` | +| `heapSize` | Size (in MB) for the Java Heap options (Xmx and Xms) | `1024` | +| `fourlwCommandsWhitelist` | A list of comma separated Four Letter Words commands that can be executed | `srvr, mntr, ruok` | +| `minServerId` | Minimal SERVER_ID value, nodes increment their IDs respectively | `1` | +| `listenOnAllIPs` | Allow ZooKeeper to listen for connections from its peers on all available IP addresses | `false` | +| `autopurge.snapRetainCount` | The most recent snapshots amount (and corresponding transaction logs) to retain | `3` | +| `autopurge.purgeInterval` | The time interval (in hours) for which the purge task has to be triggered | `0` | +| `logLevel` | Log level for the ZooKeeper server. ERROR by default | `ERROR` | +| `jvmFlags` | Default JVM flags for the ZooKeeper process | `""` | +| `dataLogDir` | Dedicated data log directory | `""` | +| `configuration` | Configure ZooKeeper with a custom zoo.cfg file | `""` | +| `existingConfigmap` | The name of an existing ConfigMap with your custom configuration for ZooKeeper | `""` | +| `extraEnvVars` | Array with extra environment variables to add to ZooKeeper nodes | `[]` | +| `extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for ZooKeeper nodes | `""` | +| `extraEnvVarsSecret` | Name of existing Secret containing extra env vars for ZooKeeper nodes | `""` | +| `command` | Override default container command (useful when using custom images) | `["/scripts/setup.sh"]` | +| `args` | Override default container args (useful when using custom images) | `[]` | + +### Statefulset parameters + +| Name | Description | Value | +| --------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------- | +| `replicaCount` | Number of ZooKeeper nodes | `1` | +| `containerPorts.client` | ZooKeeper client container port | `2181` | +| `containerPorts.tls` | ZooKeeper TLS container port | `3181` | +| `containerPorts.follower` | ZooKeeper follower container port | `2888` | +| `containerPorts.election` | ZooKeeper election container port | `3888` | +| `livenessProbe.enabled` | Enable livenessProbe on ZooKeeper containers | `true` | +| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `30` | +| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `livenessProbe.probeCommandTimeout` | Probe command timeout for livenessProbe | `2` | +| `readinessProbe.enabled` | Enable readinessProbe on ZooKeeper containers | `true` | +| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `readinessProbe.probeCommandTimeout` | Probe command timeout for readinessProbe | `2` | +| `startupProbe.enabled` | Enable startupProbe on ZooKeeper containers | `false` | +| `startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` | +| `startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | +| `startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `lifecycleHooks` | for the ZooKeeper container(s) to automate configuration before or after startup | `{}` | +| `resources.limits` | The resources limits for the ZooKeeper containers | `{}` | +| `resources.requests.memory` | The requested memory for the ZooKeeper containers | `256Mi` | +| `resources.requests.cpu` | The requested cpu for the ZooKeeper containers | `250m` | +| `podSecurityContext.enabled` | Enabled ZooKeeper pods' Security Context | `true` | +| `podSecurityContext.fsGroup` | Set ZooKeeper pod's Security Context fsGroup | `1001` | +| `containerSecurityContext.enabled` | Enabled ZooKeeper containers' Security Context | `true` | +| `containerSecurityContext.runAsUser` | Set ZooKeeper containers' Security Context runAsUser | `1001` | +| `containerSecurityContext.runAsNonRoot` | Set ZooKeeper containers' Security Context runAsNonRoot | `true` | +| `containerSecurityContext.allowPrivilegeEscalation` | Force the child process to be run as nonprivilege | `false` | +| `hostAliases` | ZooKeeper pods host aliases | `[]` | +| `podLabels` | Extra labels for ZooKeeper pods | `{}` | +| `podAnnotations` | Annotations for ZooKeeper pods | `{}` | +| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `nodeAffinityPreset.key` | Node label key to match Ignored if `affinity` is set. | `""` | +| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` | +| `affinity` | Affinity for pod assignment | `{}` | +| `nodeSelector` | Node labels for pod assignment | `{}` | +| `tolerations` | Tolerations for pod assignment | `[]` | +| `topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `podManagementPolicy` | StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: `OrderedReady` and `Parallel` | `Parallel` | +| `priorityClassName` | Name of the existing priority class to be used by ZooKeeper pods, priority class needs to be created beforehand | `""` | +| `schedulerName` | Kubernetes pod scheduler registry | `""` | +| `updateStrategy.type` | ZooKeeper statefulset strategy type | `RollingUpdate` | +| `updateStrategy.rollingUpdate` | ZooKeeper statefulset rolling update configuration parameters | `{}` | +| `extraVolumes` | Optionally specify extra list of additional volumes for the ZooKeeper pod(s) | `[]` | +| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the ZooKeeper container(s) | `[]` | +| `sidecars` | Add additional sidecar containers to the ZooKeeper pod(s) | `[]` | +| `initContainers` | Add additional init containers to the ZooKeeper pod(s) | `[]` | +| `pdb.create` | Deploy a pdb object for the ZooKeeper pod | `false` | +| `pdb.minAvailable` | Minimum available ZooKeeper replicas | `""` | +| `pdb.maxUnavailable` | Maximum unavailable ZooKeeper replicas | `1` | + +### Traffic Exposure parameters + +| Name | Description | Value | +| ------------------------------------------- | --------------------------------------------------------------------------------------- | ----------- | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.ports.client` | ZooKeeper client service port | `2181` | +| `service.ports.tls` | ZooKeeper TLS service port | `3181` | +| `service.ports.follower` | ZooKeeper follower service port | `2888` | +| `service.ports.election` | ZooKeeper election service port | `3888` | +| `service.nodePorts.client` | Node port for clients | `""` | +| `service.nodePorts.tls` | Node port for TLS | `""` | +| `service.disableBaseClientPort` | Remove client port from service definitions. | `false` | +| `service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `service.clusterIP` | ZooKeeper service Cluster IP | `""` | +| `service.loadBalancerIP` | ZooKeeper service Load Balancer IP | `""` | +| `service.loadBalancerSourceRanges` | ZooKeeper service Load Balancer sources | `[]` | +| `service.externalTrafficPolicy` | ZooKeeper service external traffic policy | `Cluster` | +| `service.annotations` | Additional custom annotations for ZooKeeper service | `{}` | +| `service.extraPorts` | Extra ports to expose in the ZooKeeper service (normally used with the `sidecar` value) | `[]` | +| `service.headless.annotations` | Annotations for the Headless Service | `{}` | +| `service.headless.publishNotReadyAddresses` | If the ZooKeeper headless service should publish DNS records for not ready pods | `true` | +| `service.headless.servicenameOverride` | String to partially override headless service name | `""` | +| `networkPolicy.enabled` | Specifies whether a NetworkPolicy should be created | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | + +### Other Parameters + +| Name | Description | Value | +| --------------------------------------------- | ---------------------------------------------------------------------- | ------- | +| `serviceAccount.create` | Enable creation of ServiceAccount for ZooKeeper pod | `false` | +| `serviceAccount.name` | The name of the ServiceAccount to use. | `""` | +| `serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `true` | +| `serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` | + +### Persistence parameters + +| Name | Description | Value | +| -------------------------------------- | ------------------------------------------------------------------------------ | ------------------- | +| `persistence.enabled` | Enable ZooKeeper data persistence using PVC. If false, use emptyDir | `true` | +| `persistence.existingClaim` | Name of an existing PVC to use (only when deploying a single replica) | `""` | +| `persistence.storageClass` | PVC Storage Class for ZooKeeper data volume | `""` | +| `persistence.accessModes` | PVC Access modes | `["ReadWriteOnce"]` | +| `persistence.size` | PVC Storage Request for ZooKeeper data volume | `8Gi` | +| `persistence.annotations` | Annotations for the PVC | `{}` | +| `persistence.labels` | Labels for the PVC | `{}` | +| `persistence.selector` | Selector to match an existing Persistent Volume for ZooKeeper's data PVC | `{}` | +| `persistence.dataLogDir.size` | PVC Storage Request for ZooKeeper's dedicated data log directory | `8Gi` | +| `persistence.dataLogDir.existingClaim` | Provide an existing `PersistentVolumeClaim` for ZooKeeper's data log directory | `""` | +| `persistence.dataLogDir.selector` | Selector to match an existing Persistent Volume for ZooKeeper's data log PVC | `{}` | + +### Volume Permissions parameters + +| Name | Description | Value | +| ------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------- | ------------------ | +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/os-shell` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag (immutable tags are recommended) | `11-debian-11-r51` | +| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | Init container volume-permissions image pull secrets | `[]` | +| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` | +| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` | +| `volumePermissions.containerSecurityContext.enabled` | Enabled init container Security Context | `true` | +| `volumePermissions.containerSecurityContext.runAsUser` | User ID for the init container | `0` | + +### Metrics parameters + +| Name | Description | Value | +| ------------------------------------------ | ------------------------------------------------------------------------------------- | ----------- | +| `metrics.enabled` | Enable Prometheus to access ZooKeeper metrics endpoint | `false` | +| `metrics.containerPort` | ZooKeeper Prometheus Exporter container port | `9141` | +| `metrics.service.type` | ZooKeeper Prometheus Exporter service type | `ClusterIP` | +| `metrics.service.port` | ZooKeeper Prometheus Exporter service port | `9141` | +| `metrics.service.annotations` | Annotations for Prometheus to auto-discover the metrics endpoint | `{}` | +| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using Prometheus Operator | `false` | +| `metrics.serviceMonitor.namespace` | Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) | `""` | +| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `""` | +| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | +| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` | +| `metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` | +| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | +| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | +| `metrics.prometheusRule.enabled` | Create a PrometheusRule for Prometheus Operator | `false` | +| `metrics.prometheusRule.namespace` | Namespace for the PrometheusRule Resource (defaults to the Release Namespace) | `""` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so PrometheusRule will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.rules` | PrometheusRule definitions | `[]` | + +### TLS/SSL parameters + +| Name | Description | Value | +| ----------------------------------------- | -------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------- | +| `tls.client.enabled` | Enable TLS for client connections | `false` | +| `tls.client.auth` | SSL Client auth. Can be "none", "want" or "need". | `none` | +| `tls.client.autoGenerated` | Generate automatically self-signed TLS certificates for ZooKeeper client communications | `false` | +| `tls.client.existingSecret` | Name of the existing secret containing the TLS certificates for ZooKeeper client communications | `""` | +| `tls.client.existingSecretKeystoreKey` | The secret key from the tls.client.existingSecret containing the Keystore. | `""` | +| `tls.client.existingSecretTruststoreKey` | The secret key from the tls.client.existingSecret containing the Truststore. | `""` | +| `tls.client.keystorePath` | Location of the KeyStore file used for Client connections | `/opt/bitnami/zookeeper/config/certs/client/zookeeper.keystore.jks` | +| `tls.client.truststorePath` | Location of the TrustStore file used for Client connections | `/opt/bitnami/zookeeper/config/certs/client/zookeeper.truststore.jks` | +| `tls.client.passwordsSecretName` | Existing secret containing Keystore and truststore passwords | `""` | +| `tls.client.passwordsSecretKeystoreKey` | The secret key from the tls.client.passwordsSecretName containing the password for the Keystore. | `""` | +| `tls.client.passwordsSecretTruststoreKey` | The secret key from the tls.client.passwordsSecretName containing the password for the Truststore. | `""` | +| `tls.client.keystorePassword` | Password to access KeyStore if needed | `""` | +| `tls.client.truststorePassword` | Password to access TrustStore if needed | `""` | +| `tls.quorum.enabled` | Enable TLS for quorum protocol | `false` | +| `tls.quorum.auth` | SSL Quorum Client auth. Can be "none", "want" or "need". | `none` | +| `tls.quorum.autoGenerated` | Create self-signed TLS certificates. Currently only supports PEM certificates. | `false` | +| `tls.quorum.existingSecret` | Name of the existing secret containing the TLS certificates for ZooKeeper quorum protocol | `""` | +| `tls.quorum.existingSecretKeystoreKey` | The secret key from the tls.quorum.existingSecret containing the Keystore. | `""` | +| `tls.quorum.existingSecretTruststoreKey` | The secret key from the tls.quorum.existingSecret containing the Truststore. | `""` | +| `tls.quorum.keystorePath` | Location of the KeyStore file used for Quorum protocol | `/opt/bitnami/zookeeper/config/certs/quorum/zookeeper.keystore.jks` | +| `tls.quorum.truststorePath` | Location of the TrustStore file used for Quorum protocol | `/opt/bitnami/zookeeper/config/certs/quorum/zookeeper.truststore.jks` | +| `tls.quorum.passwordsSecretName` | Existing secret containing Keystore and truststore passwords | `""` | +| `tls.quorum.passwordsSecretKeystoreKey` | The secret key from the tls.quorum.passwordsSecretName containing the password for the Keystore. | `""` | +| `tls.quorum.passwordsSecretTruststoreKey` | The secret key from the tls.quorum.passwordsSecretName containing the password for the Truststore. | `""` | +| `tls.quorum.keystorePassword` | Password to access KeyStore if needed | `""` | +| `tls.quorum.truststorePassword` | Password to access TrustStore if needed | `""` | +| `tls.resources.limits` | The resources limits for the TLS init container | `{}` | +| `tls.resources.requests` | The requested resources for the TLS init container | `{}` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +helm install my-release \ + --set auth.clientUser=newUser \ + oci://registry-1.docker.io/bitnamicharts/zookeeper +``` + +The above command sets the ZooKeeper user to `newUser`. + +> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +helm install my-release -f values.yaml oci://registry-1.docker.io/bitnamicharts/zookeeper +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling vs Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Configure log level + +You can configure the ZooKeeper log level using the `ZOO_LOG_LEVEL` environment variable or the parameter `logLevel`. By default, it is set to `ERROR` because each use of the liveness probe and the readiness probe produces an `INFO` message on connection and a `WARN` message on disconnection, generating a high volume of noise in your logs. + +In order to remove that log noise so levels can be set to 'INFO', two changes must be made. + +First, ensure that you are not getting metrics via the deprecated pattern of polling 'mntr' on the ZooKeeper client port. The preferred method of polling for Apache ZooKeeper metrics is the ZooKeeper metrics server. This is supported in this chart when setting `metrics.enabled` to `true`. + +Second, to avoid the connection/disconnection messages from the probes, you can set custom values for these checks which direct them to the ZooKeeper Admin Server instead of the client port. By default, an Admin Server will be started that listens on `localhost` at port `8080`. The following is an example of this use of the Admin Server for probes: + +```yaml +livenessProbe: + enabled: false +readinessProbe: + enabled: false +customLivenessProbe: + exec: + command: ['/bin/bash', '-c', 'curl -s -m 2 http://localhost:8080/commands/ruok | grep ruok'] + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 +customReadinessProbe: + exec: + command: ['/bin/bash', '-c', 'curl -s -m 2 http://localhost:8080/commands/ruok | grep error | grep null'] + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 +``` + +You can also set the log4j logging level and what log appenders are turned on, by using `ZOO_LOG4J_PROP` set inside of conf/log4j.properties as zookeeper.root.logger by default to + +```console +zookeeper.root.logger=INFO, CONSOLE +``` + +the available appender is + +- CONSOLE +- ROLLINGFILE +- RFAAUDIT +- TRACEFILE + +## Persistence + +The [Bitnami ZooKeeper](https://github.com/bitnami/containers/tree/main/bitnami/zookeeper) image stores the ZooKeeper data and configurations at the `/bitnami/zookeeper` path of the container. + +Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. See the [Parameters](#parameters) section to configure the PVC or to disable persistence. + +If you encounter errors when working with persistent volumes, refer to our [troubleshooting guide for persistent volumes](https://docs.bitnami.com/kubernetes/faq/troubleshooting/troubleshooting-persistence-volumes/). + +### Adjust permissions of persistent volume mountpoint + +As the image run as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it. + +By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. +As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination. + +You can enable this initContainer by setting `volumePermissions.enabled` to `true`. + +### Configure the data log directory + +You can use a dedicated device for logs (instead of using the data directory) to help avoiding competition between logging and snaphots. To do so, set the `dataLogDir` parameter with the path to be used for writing transaction logs. Alternatively, set this parameter with an empty string and it will result in the log being written to the data directory (Zookeeper's default behavior). + +When using a dedicated device for logs, you can use a PVC to persist the logs. To do so, set `persistence.enabled` to `true`. See the [Persistence Parameters](#persistence-parameters) section for more information. + +### Set pod affinity + +This chart allows you to set custom pod affinity using the `affinity` parameter. Find more information about pod affinity in the [Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use any of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/main/bitnami/common#affinities) chart. To do so, set the `podAffinityPreset`, `podAntiAffinityPreset`, or `nodeAffinityPreset` parameters. + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading + +### To 12.0.0 + +This new version of the chart includes the new ZooKeeper major version 3.9.x. For more information, please refer to [Zookeeper 3.9.0 Release Notes](https://zookeeper.apache.org/doc/r3.9.0/releasenotes.html) + +### To 11.0.0 + +This major version removes `commonAnnotations` and `commonLabels` from `volumeClaimTemplates`. Now annotations and labels can be set in volume claims using `persistence.annotations` and `persistence.labels` values. If the previous deployment has already set `commonAnnotations` and/or `commonLabels` values, to ensure a clean upgrade from previous version without loosing data, please set `persistence.annotations` and/or `persistence.labels` values with the same content as the common values. + +### To 10.0.0 + +This new version of the chart adds support for server-server authentication. +The chart previously supported client-server authentication, to avoid confusion, the previous parameters have been renamed from `auth.*` to `auth.client.*`. + +### To 9.0.0 + +This new version of the chart includes the new ZooKeeper major version 3.8.0. Upgrade compatibility is not guaranteed. + +### To 8.0.0 + +This major release renames several values in this chart and adds missing features, in order to be inline with the rest of assets in the Bitnami charts repository. + +Affected values: + +- `allowAnonymousLogin` is deprecated. +- `containerPort`, `tlsContainerPort`, `followerContainerPort` and `electionContainerPort` have been regrouped under the `containerPorts` map. +- `service.port`, `service.tlsClientPort`, `service.followerPort`, and `service.electionPort` have been regrouped under the `service.ports` map. +- `updateStrategy` (string) and `rollingUpdatePartition` are regrouped under the `updateStrategy` map. +- `podDisruptionBudget.*` parameters are renamed to `pdb.*`. + +### To 7.0.0 + +This new version renames the parameters used to configure TLS for both client and quorum. + +- `service.tls.disable_base_client_port` is renamed to `service.disableBaseClientPort` +- `service.tls.client_port` is renamed to `service.tlsClientPort` +- `service.tls.client_enable` is renamed to `tls.client.enabled` +- `service.tls.client_keystore_path` is renamed to `tls.client.keystorePath` +- `service.tls.client_truststore_path` is renamed to `tls.client.truststorePath` +- `service.tls.client_keystore_password` is renamed to `tls.client.keystorePassword` +- `service.tls.client_truststore_password` is renamed to `tls.client.truststorePassword` +- `service.tls.quorum_enable` is renamed to `tls.quorum.enabled` +- `service.tls.quorum_keystore_path` is renamed to `tls.quorum.keystorePath` +- `service.tls.quorum_truststore_path` is renamed to `tls.quorum.truststorePath` +- `service.tls.quorum_keystore_password` is renamed to `tls.quorum.keystorePassword` +- `service.tls.quorum_truststore_password` is renamed to `tls.quorum.truststorePassword` + +### To 6.1.0 + +This version introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/main/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade. + +### To 6.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +[Learn more about this change and related upgrade considerations](https://docs.bitnami.com/kubernetes/infrastructure/zookeeper/administration/upgrade-helm3/). + +### To 5.21.0 + +A couple of parameters related to Zookeeper metrics were renamed or disappeared in favor of new ones: + +- `metrics.port` is renamed to `metrics.containerPort`. +- `metrics.annotations` is deprecated in favor of `metrics.service.annotations`. + +### To 3.0.0 + +This new version of the chart includes the new ZooKeeper major version 3.5.5. Note that to perform an automatic upgrade +of the application, each node will need to have at least one snapshot file created in the data directory. If not, the +new version of the application won't be able to start the service. Please refer to [ZOOKEEPER-3056](https://issues.apache.org/jira/browse/ZOOKEEPER-3056) +in order to find ways to workaround this issue in case you are facing it. + +### To 2.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's statefulsets. +Use the workaround below to upgrade from versions previous to 2.0.0. The following example assumes that the release name is `zookeeper`: + +```console +kubectl delete statefulset zookeeper-zookeeper --cascade=false +``` + +### To 1.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 1.0.0. The following example assumes that the release name is zookeeper: + +```console +kubectl delete statefulset zookeeper-zookeeper --cascade=false +``` + +## License + +Copyright © 2023 VMware, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/.helmignore b/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/.helmignore new file mode 100644 index 000000000..50af03172 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/Chart.yaml b/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/Chart.yaml new file mode 100644 index 000000000..e35c2d0e7 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure + licenses: Apache-2.0 +apiVersion: v2 +appVersion: 2.9.2 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://bitnami.com +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- name: VMware, Inc. + url: https://github.com/bitnami/charts +name: common +sources: +- https://github.com/bitnami/charts +type: library +version: 2.10.0 diff --git a/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/README.md b/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/README.md new file mode 100644 index 000000000..fe6a01000 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/README.md @@ -0,0 +1,235 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between Bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 2.x.x + repository: oci://registry-1.docker.io/bitnamicharts +``` + +```console +helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. + +Looking to use our applications in production? Try [VMware Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ + +## Parameters + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets (evaluated as templates). + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 -d) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 -d) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +#### What changes were introduced in this major version? + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +#### Considerations when upgrading to this version + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +#### Useful links + +- +- +- + +## License + +Copyright © 2023 VMware, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/_affinities.tpl b/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/_affinities.tpl new file mode 100644 index 000000000..e85b1df45 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/_affinities.tpl @@ -0,0 +1,139 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a topologyKey definition +{{ include "common.affinities.topologyKey" (dict "topologyKey" "BAR") -}} +*/}} +{{- define "common.affinities.topologyKey" -}} +{{ .topologyKey | default "kubernetes.io/hostname" -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "customLabels" .Values.podLabels "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "extraPodAffinityTerms" .Values.extraPodAffinityTerms "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +{{- $customLabels := default (dict) .customLabels -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +{{- $extraPodAffinityTerms := default (list) .extraPodAffinityTerms -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" .context )) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} + weight: 1 + {{- range $extraPodAffinityTerms }} + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" $.context )) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := .extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} + weight: {{ .weight | default 1 -}} + {{- end -}} +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "customLabels" .Values.podLabels "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "extraPodAffinityTerms" .Values.extraPodAffinityTerms "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +{{- $customLabels := default (dict) .customLabels -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +{{- $extraPodAffinityTerms := default (list) .extraPodAffinityTerms -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" .context )) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} + {{- range $extraPodAffinityTerms }} + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" $.context )) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := .extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} + {{- end -}} +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/_capabilities.tpl b/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/_capabilities.tpl new file mode 100644 index 000000000..c6d115fe5 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/_capabilities.tpl @@ -0,0 +1,185 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for poddisruptionbudget. +*/}} +{{- define "common.capabilities.policy.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "policy/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "common.capabilities.networkPolicy.apiVersion" -}} +{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for cronjob. +*/}} +{{- define "common.capabilities.cronjob.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "batch/v1beta1" -}} +{{- else -}} +{{- print "batch/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for daemonset. +*/}} +{{- define "common.capabilities.daemonset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for RBAC resources. +*/}} +{{- define "common.capabilities.rbac.apiVersion" -}} +{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for CRDs. +*/}} +{{- define "common.capabilities.crd.apiVersion" -}} +{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiextensions.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiextensions.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for APIService. +*/}} +{{- define "common.capabilities.apiService.apiVersion" -}} +{{- if semverCompare "<1.10-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiregistration.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiregistration.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for Horizontal Pod Autoscaler. +*/}} +{{- define "common.capabilities.hpa.apiVersion" -}} +{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}} +{{- if .beta2 -}} +{{- print "autoscaling/v2beta2" -}} +{{- else -}} +{{- print "autoscaling/v2beta1" -}} +{{- end -}} +{{- else -}} +{{- print "autoscaling/v2" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for Vertical Pod Autoscaler. +*/}} +{{- define "common.capabilities.vpa.apiVersion" -}} +{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}} +{{- if .beta2 -}} +{{- print "autoscaling/v2beta2" -}} +{{- else -}} +{{- print "autoscaling/v2beta1" -}} +{{- end -}} +{{- else -}} +{{- print "autoscaling/v2" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the used Helm version is 3.3+. +A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure. +This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error. +**To be removed when the catalog's minimun Helm version is 3.3** +*/}} +{{- define "common.capabilities.supportsHelmVersion" -}} +{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/_errors.tpl b/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/_errors.tpl new file mode 100644 index 000000000..07ded6f64 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/_errors.tpl @@ -0,0 +1,28 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}} + {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}} + {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}} + {{- $errorString = print $errorString "\n%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/_images.tpl b/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/_images.tpl new file mode 100644 index 000000000..2181f3224 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/_images.tpl @@ -0,0 +1,85 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" .Values.global ) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $separator := ":" -}} +{{- $termination := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- if .imageRoot.digest }} + {{- $separator = "@" -}} + {{- $termination = .imageRoot.digest | toString -}} +{{- end -}} +{{- if $registryName }} + {{- printf "%s/%s%s%s" $registryName $repositoryName $separator $termination -}} +{{- else -}} + {{- printf "%s%s%s" $repositoryName $separator $termination -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets | uniq }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names evaluating values as templates +{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }} +*/}} +{{- define "common.images.renderPullSecrets" -}} + {{- $pullSecrets := list }} + {{- $context := .context }} + + {{- if $context.Values.global }} + {{- range $context.Values.global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets | uniq }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/_ingress.tpl b/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/_ingress.tpl new file mode 100644 index 000000000..efa5b85c7 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/_ingress.tpl @@ -0,0 +1,73 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} +{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} +serviceName: {{ .serviceName }} +servicePort: {{ .servicePort }} +{{- else -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }} + number: {{ .servicePort | int }} + {{- end }} +{{- end -}} +{{- end -}} + +{{/* +Print "true" if the API pathType field is supported +Usage: +{{ include "common.ingress.supportsPathType" . }} +*/}} +{{- define "common.ingress.supportsPathType" -}} +{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the ingressClassname field is supported +Usage: +{{ include "common.ingress.supportsIngressClassname" . }} +*/}} +{{- define "common.ingress.supportsIngressClassname" -}} +{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if cert-manager required annotations for TLS signed +certificates are set in the Ingress annotations +Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations +Usage: +{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }} +*/}} +{{- define "common.ingress.certManagerRequest" -}} +{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") (hasKey .annotations "kubernetes.io/tls-acme") }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/_labels.tpl b/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/_labels.tpl new file mode 100644 index 000000000..fac46076a --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/_labels.tpl @@ -0,0 +1,55 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Kubernetes standard labels +{{ include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) -}} +*/}} +{{- define "common.labels.standard" -}} +{{- if and (hasKey . "customLabels") (hasKey . "context") -}} +{{ merge + (include "common.tplvalues.render" (dict "value" .customLabels "context" .context) | fromYaml) + (dict + "app.kubernetes.io/name" (include "common.names.name" .context) + "helm.sh/chart" (include "common.names.chart" .context) + "app.kubernetes.io/instance" .context.Release.Name + "app.kubernetes.io/managed-by" .context.Release.Service + ) + | toYaml +}} +{{- else -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} +{{- end -}} + +{{/* +Labels used on immutable fields such as deploy.spec.selector.matchLabels or svc.spec.selector +{{ include "common.labels.matchLabels" (dict "customLabels" .Values.podLabels "context" $) -}} + +We don't want to loop over custom labels appending them to the selector +since it's very likely that it will break deployments, services, etc. +However, it's important to overwrite the standard labels if the user +overwrote them on metadata.labels fields. +*/}} +{{- define "common.labels.matchLabels" -}} +{{- if and (hasKey . "customLabels") (hasKey . "context") -}} +{{ merge + (pick (include "common.tplvalues.render" (dict "value" .customLabels "context" .context) | fromYaml) "app.kubernetes.io/name" "app.kubernetes.io/instance") + (dict + "app.kubernetes.io/name" (include "common.names.name" .context) + "app.kubernetes.io/instance" .context.Release.Name + ) + | toYaml +}} +{{- else -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} +{{- end -}} diff --git a/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/_names.tpl b/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/_names.tpl new file mode 100644 index 000000000..a222924f1 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/_names.tpl @@ -0,0 +1,71 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified dependency name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +Usage: +{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }} +*/}} +{{- define "common.names.dependency.fullname" -}} +{{- if .chartValues.fullnameOverride -}} +{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .chartName .chartValues.nameOverride -}} +{{- if contains $name .context.Release.Name -}} +{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts. +*/}} +{{- define "common.names.namespace" -}} +{{- default .Release.Namespace .Values.namespaceOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a fully qualified app name adding the installation's namespace. +*/}} +{{- define "common.names.fullname.namespace" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) (include "common.names.namespace" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/_secrets.tpl b/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/_secrets.tpl new file mode 100644 index 000000000..a193c46b6 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/_secrets.tpl @@ -0,0 +1,172 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- with .name -}} +{{- $name = . -}} +{{- end -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. + - failOnNew - Boolean - Optional - Default to true. If set to false, skip errors adding new keys to existing secrets. +The order in which this function returns a secret password: + 1. Already existing 'Secret' resource + (If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned) + 2. Password provided via the values.yaml + (If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned) + 3. Randomly generated secret password + (A new random secret password with the length specified in the 'length' parameter will be generated and returned) + +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $failOnNew := default true .failOnNew }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret).data }} +{{- if $secretData }} + {{- if hasKey $secretData .key }} + {{- $password = index $secretData .key | quote }} + {{- else if $failOnNew }} + {{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}} + {{- end -}} +{{- else if $providedPasswordValue }} + {{- $password = $providedPasswordValue | toString | b64enc | quote }} +{{- else }} + + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} + {{- else }} + {{- $password = randAlphaNum $passwordLength | b64enc | quote }} + {{- end }} +{{- end -}} +{{- printf "%s" $password -}} +{{- end -}} + +{{/* +Reuses the value from an existing secret, otherwise sets its value to a default value. + +Usage: +{{ include "common.secrets.lookup" (dict "secret" "secret-name" "key" "keyName" "defaultValue" .Values.myValue "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - defaultValue - String - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - context - Context - Required - Parent context. + +*/}} +{{- define "common.secrets.lookup" -}} +{{- $value := "" -}} +{{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret).data -}} +{{- if and $secretData (hasKey $secretData .key) -}} + {{- $value = index $secretData .key -}} +{{- else if .defaultValue -}} + {{- $value = .defaultValue | toString | b64enc -}} +{{- end -}} +{{- if $value -}} +{{- printf "%s" $value -}} +{{- end -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/_storage.tpl b/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/_storage.tpl new file mode 100644 index 000000000..16405a0f8 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/_storage.tpl @@ -0,0 +1,28 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/_tplvalues.tpl b/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/_tplvalues.tpl new file mode 100644 index 000000000..a8ed7637e --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,38 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template perhaps with scope if the scope is present. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $ ) }} +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $ "scope" $app ) }} +*/}} +{{- define "common.tplvalues.render" -}} +{{- $value := typeIs "string" .value | ternary .value (.value | toYaml) }} +{{- if contains "{{" (toJson .value) }} + {{- if .scope }} + {{- tpl (cat "{{- with $.RelativeScope -}}" $value "{{- end }}") (merge (dict "RelativeScope" .scope) .context) }} + {{- else }} + {{- tpl $value .context }} + {{- end }} +{{- else }} + {{- $value }} +{{- end }} +{{- end -}} + +{{/* +Merge a list of values that contains template after rendering them. +Merge precedence is consistent with http://masterminds.github.io/sprig/dicts.html#merge-mustmerge +Usage: +{{ include "common.tplvalues.merge" ( dict "values" (list .Values.path.to.the.Value1 .Values.path.to.the.Value2) "context" $ ) }} +*/}} +{{- define "common.tplvalues.merge" -}} +{{- $dst := dict -}} +{{- range .values -}} +{{- $dst = include "common.tplvalues.render" (dict "value" . "context" $.context "scope" $.scope) | fromYaml | merge $dst -}} +{{- end -}} +{{ $dst | toYaml }} +{{- end -}} diff --git a/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/_utils.tpl b/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/_utils.tpl new file mode 100644 index 000000000..c87040cd9 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/_utils.tpl @@ -0,0 +1,67 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ include "common.names.namespace" .context | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 -d) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} diff --git a/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/_warnings.tpl b/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/_warnings.tpl new file mode 100644 index 000000000..66dffc1fe --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/_warnings.tpl @@ -0,0 +1,19 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/validations/_cassandra.tpl b/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 000000000..eda9aada5 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,77 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/validations/_mariadb.tpl b/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 000000000..17d83a2fd --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,108 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/validations/_mongodb.tpl b/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 000000000..bbb445b86 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,113 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB® required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB® values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/validations/_mysql.tpl b/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/validations/_mysql.tpl new file mode 100644 index 000000000..ca3953f86 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/validations/_mysql.tpl @@ -0,0 +1,108 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MySQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.mysql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MySQL values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mysql.passwords" -}} + {{- $existingSecret := include "common.mysql.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mysql.values.enabled" . -}} + {{- $architecture := include "common.mysql.values.architecture" . -}} + {{- $authPrefix := include "common.mysql.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mysql-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mysql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mysql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mysql.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mysql.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mysql. + +Usage: +{{ include "common.mysql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mysql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mysql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mysql.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mysql.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mysql.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.key.auth" -}} + {{- if .subchart -}} + mysql.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/validations/_postgresql.tpl b/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 000000000..8c9aa570e --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,134 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/validations/_redis.tpl b/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/validations/_redis.tpl new file mode 100644 index 000000000..fc0d208dd --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,81 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis® required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $standarizedVersion := include "common.redis.values.standarized.version" . }} + + {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }} + {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }} + + {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }} + {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}} + {{- if eq $useAuth "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} + +{{/* +Checks whether the redis chart's includes the standarizations (version >= 14) + +Usage: +{{ include "common.redis.values.standarized.version" (dict "context" $) }} +*/}} +{{- define "common.redis.values.standarized.version" -}} + + {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}} + {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }} + + {{- if $standarizedAuthValues -}} + {{- true -}} + {{- end -}} +{{- end -}} diff --git a/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/validations/_validations.tpl b/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/validations/_validations.tpl new file mode 100644 index 000000000..31ceda871 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,51 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/values.yaml b/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/values.yaml new file mode 100644 index 000000000..9abe0e154 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/zookeeper/charts/common/values.yaml @@ -0,0 +1,8 @@ +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 + +## bitnami/common +## It is required by CI/CD tools and processes. +## @skip exampleValue +## +exampleValue: common-chart diff --git a/manifest/helm-charts/infra/kafka/charts/zookeeper/templates/NOTES.txt b/manifest/helm-charts/infra/kafka/charts/zookeeper/templates/NOTES.txt new file mode 100644 index 000000000..c287e1e56 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/zookeeper/templates/NOTES.txt @@ -0,0 +1,76 @@ +CHART NAME: {{ .Chart.Name }} +CHART VERSION: {{ .Chart.Version }} +APP VERSION: {{ .Chart.AppVersion }} + +{{- if and (not .Values.auth.client.enabled) (eq .Values.service.type "LoadBalancer") }} +------------------------------------------------------------------------------- + WARNING + + By specifying "serviceType=LoadBalancer" and not specifying "auth.enabled=true" + you have most likely exposed the ZooKeeper service externally without any + authentication mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP" or + "NodePort". As alternative, you can also specify a valid password on the + "auth.clientPassword" parameter. + +------------------------------------------------------------------------------- +{{- end }} + +** Please be patient while the chart is being deployed ** + +{{- if .Values.diagnosticMode.enabled }} +The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with: + + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }} + +Get the list of pods by executing: + + kubectl get pods --namespace {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }} + +Access the pod you want to debug by executing + + kubectl exec --namespace {{ .Release.Namespace }} -ti -- bash + +In order to replicate the container startup scripts execute this command: + + /opt/bitnami/scripts/zookeeper/entrypoint.sh /opt/bitnami/scripts/zookeeper/run.sh + +{{- else }} + +ZooKeeper can be accessed via port {{ .Values.service.ports.client }} on the following DNS name from within your cluster: + + {{ template "common.names.fullname" . }}.{{ template "zookeeper.namespace" . }}.svc.{{ .Values.clusterDomain }} + +To connect to your ZooKeeper server run the following commands: + + export POD_NAME=$(kubectl get pods --namespace {{ template "zookeeper.namespace" . }} -l "app.kubernetes.io/name={{ template "common.names.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=zookeeper" -o jsonpath="{.items[0].metadata.name}") + kubectl exec -it $POD_NAME -- zkCli.sh + +To connect to your ZooKeeper server from outside the cluster execute the following commands: + +{{- if eq .Values.service.type "NodePort" }} + + export NODE_IP=$(kubectl get nodes --namespace {{ template "zookeeper.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ template "zookeeper.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "common.names.fullname" . }}) + zkCli.sh $NODE_IP:$NODE_PORT + +{{- else if eq .Values.service.type "LoadBalancer" }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ template "zookeeper.namespace" . }} -w {{ template "common.names.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ template "zookeeper.namespace" . }} {{ template "common.names.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") + zkCli.sh $SERVICE_IP:{{ .Values.service.ports.client }} + +{{- else if eq .Values.service.type "ClusterIP" }} + + kubectl port-forward --namespace {{ template "zookeeper.namespace" . }} svc/{{ template "common.names.fullname" . }} {{ .Values.service.ports.client }}:{{ .Values.containerPorts.client }} & + zkCli.sh 127.0.0.1:{{ .Values.service.ports.client }} + +{{- end }} +{{- end }} + +{{- include "zookeeper.validateValues" . }} +{{- include "zookeeper.checkRollingTags" . }} diff --git a/manifest/helm-charts/infra/kafka/charts/zookeeper/templates/_helpers.tpl b/manifest/helm-charts/infra/kafka/charts/zookeeper/templates/_helpers.tpl new file mode 100644 index 000000000..0e2e8e588 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/zookeeper/templates/_helpers.tpl @@ -0,0 +1,366 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the proper ZooKeeper image name +*/}} +{{- define "zookeeper.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "zookeeper.volumePermissions.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "zookeeper.imagePullSecrets" -}} +{{- include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.volumePermissions.image) "global" .Values.global) -}} +{{- end -}} + +{{/* +Check if there are rolling tags in the images +*/}} +{{- define "zookeeper.checkRollingTags" -}} +{{- include "common.warnings.rollingTag" .Values.image }} +{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }} +{{- end -}} + +{{/* +Return ZooKeeper Namespace to use +*/}} +{{- define "zookeeper.namespace" -}} +{{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} +{{- else -}} + {{- .Release.Namespace -}} +{{- end -}} +{{- end -}} + +{{/* + Create the name of the service account to use + */}} +{{- define "zookeeper.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "common.names.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return the ZooKeeper client-server authentication credentials secret +*/}} +{{- define "zookeeper.client.secretName" -}} +{{- if .Values.auth.client.existingSecret -}} + {{- printf "%s" (tpl .Values.auth.client.existingSecret $) -}} +{{- else -}} + {{- printf "%s-client-auth" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return the ZooKeeper server-server authentication credentials secret +*/}} +{{- define "zookeeper.quorum.secretName" -}} +{{- if .Values.auth.quorum.existingSecret -}} + {{- printf "%s" (tpl .Values.auth.quorum.existingSecret $) -}} +{{- else -}} + {{- printf "%s-quorum-auth" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a ZooKeeper client-server authentication credentials secret object should be created +*/}} +{{- define "zookeeper.client.createSecret" -}} +{{- if and .Values.auth.client.enabled (empty .Values.auth.client.existingSecret) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a ZooKeeper server-server authentication credentials secret object should be created +*/}} +{{- define "zookeeper.quorum.createSecret" -}} +{{- if and .Values.auth.quorum.enabled (empty .Values.auth.quorum.existingSecret) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Returns the available value for certain key in an existing secret (if it exists), +otherwise it generates a random value. +*/}} +{{- define "getValueFromSecret" }} + {{- $len := (default 16 .Length) | int -}} + {{- $obj := (lookup "v1" "Secret" .Namespace .Name).data -}} + {{- if $obj }} + {{- index $obj .Key | b64dec -}} + {{- else -}} + {{- randAlphaNum $len -}} + {{- end -}} +{{- end }} + +{{/* +Return the ZooKeeper configuration ConfigMap name +*/}} +{{- define "zookeeper.configmapName" -}} +{{- if .Values.existingConfigmap -}} + {{- printf "%s" (tpl .Values.existingConfigmap $) -}} +{{- else -}} + {{- printf "%s" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a ConfigMap object should be created for ZooKeeper configuration +*/}} +{{- define "zookeeper.createConfigmap" -}} +{{- if and .Values.configuration (not .Values.existingConfigmap) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a TLS secret should be created for ZooKeeper quorum +*/}} +{{- define "zookeeper.quorum.createTlsSecret" -}} +{{- if and .Values.tls.quorum.enabled .Values.tls.quorum.autoGenerated (not .Values.tls.quorum.existingSecret) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the secret containing ZooKeeper quorum TLS certificates +*/}} +{{- define "zookeeper.quorum.tlsSecretName" -}} +{{- $secretName := .Values.tls.quorum.existingSecret -}} +{{- if $secretName -}} + {{- printf "%s" (tpl $secretName $) -}} +{{- else -}} + {{- printf "%s-quorum-crt" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret containing the Keystore and Truststore password should be created for ZooKeeper quorum +*/}} +{{- define "zookeeper.quorum.createTlsPasswordsSecret" -}} +{{- if and .Values.tls.quorum.enabled (not .Values.tls.quorum.passwordsSecretName) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the name of the secret containing the Keystore and Truststore password +*/}} +{{- define "zookeeper.quorum.tlsPasswordsSecret" -}} +{{- $secretName := .Values.tls.quorum.passwordsSecretName -}} +{{- if $secretName -}} + {{- printf "%s" (tpl $secretName $) -}} +{{- else -}} + {{- printf "%s-quorum-tls-pass" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a TLS secret should be created for ZooKeeper client +*/}} +{{- define "zookeeper.client.createTlsSecret" -}} +{{- if and .Values.tls.client.enabled .Values.tls.client.autoGenerated (not .Values.tls.client.existingSecret) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the secret containing ZooKeeper client TLS certificates +*/}} +{{- define "zookeeper.client.tlsSecretName" -}} +{{- $secretName := .Values.tls.client.existingSecret -}} +{{- if $secretName -}} + {{- printf "%s" (tpl $secretName $) -}} +{{- else -}} + {{- printf "%s-client-crt" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the quorum keystore key to be retrieved from tls.quorum.existingSecret. +*/}} +{{- define "zookeeper.quorum.tlsKeystoreKey" -}} +{{- if and .Values.tls.quorum.existingSecret .Values.tls.quorum.existingSecretKeystoreKey -}} + {{- printf "%s" .Values.tls.quorum.existingSecretKeystoreKey -}} +{{- else -}} + {{- printf "zookeeper.keystore.jks" -}} +{{- end -}} +{{- end -}} + +{{/* +Get the quorum truststore key to be retrieved from tls.quorum.existingSecret. +*/}} +{{- define "zookeeper.quorum.tlsTruststoreKey" -}} +{{- if and .Values.tls.quorum.existingSecret .Values.tls.quorum.existingSecretTruststoreKey -}} + {{- printf "%s" .Values.tls.quorum.existingSecretTruststoreKey -}} +{{- else -}} + {{- printf "zookeeper.truststore.jks" -}} +{{- end -}} +{{- end -}} + +{{/* +Get the client keystore key to be retrieved from tls.client.existingSecret. +*/}} +{{- define "zookeeper.client.tlsKeystoreKey" -}} +{{- if and .Values.tls.client.existingSecret .Values.tls.client.existingSecretKeystoreKey -}} + {{- printf "%s" .Values.tls.client.existingSecretKeystoreKey -}} +{{- else -}} + {{- printf "zookeeper.keystore.jks" -}} +{{- end -}} +{{- end -}} + +{{/* +Get the client truststore key to be retrieved from tls.client.existingSecret. +*/}} +{{- define "zookeeper.client.tlsTruststoreKey" -}} +{{- if and .Values.tls.client.existingSecret .Values.tls.client.existingSecretTruststoreKey -}} + {{- printf "%s" .Values.tls.client.existingSecretTruststoreKey -}} +{{- else -}} + {{- printf "zookeeper.truststore.jks" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret containing the Keystore and Truststore password should be created for ZooKeeper client +*/}} +{{- define "zookeeper.client.createTlsPasswordsSecret" -}} +{{- if and .Values.tls.client.enabled (not .Values.tls.client.passwordsSecretName) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the name of the secret containing the Keystore and Truststore password +*/}} +{{- define "zookeeper.client.tlsPasswordsSecret" -}} +{{- $secretName := .Values.tls.client.passwordsSecretName -}} +{{- if $secretName -}} + {{- printf "%s" (tpl $secretName $) -}} +{{- else -}} + {{- printf "%s-client-tls-pass" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the quorum keystore password key to be retrieved from tls.quorum.passwordSecretName. +*/}} +{{- define "zookeeper.quorum.tlsPasswordKeystoreKey" -}} +{{- if and .Values.tls.quorum.passwordsSecretName .Values.tls.quorum.passwordsSecretKeystoreKey -}} + {{- printf "%s" .Values.tls.quorum.passwordsSecretKeystoreKey -}} +{{- else -}} + {{- printf "keystore-password" -}} +{{- end -}} +{{- end -}} + +{{/* +Get the quorum truststore password key to be retrieved from tls.quorum.passwordSecretName. +*/}} +{{- define "zookeeper.quorum.tlsPasswordTruststoreKey" -}} +{{- if and .Values.tls.quorum.passwordsSecretName .Values.tls.quorum.passwordsSecretTruststoreKey -}} + {{- printf "%s" .Values.tls.quorum.passwordsSecretTruststoreKey -}} +{{- else -}} + {{- printf "truststore-password" -}} +{{- end -}} +{{- end -}} + +{{/* +Get the client keystore password key to be retrieved from tls.client.passwordSecretName. +*/}} +{{- define "zookeeper.client.tlsPasswordKeystoreKey" -}} +{{- if and .Values.tls.client.passwordsSecretName .Values.tls.client.passwordsSecretKeystoreKey -}} + {{- printf "%s" .Values.tls.client.passwordsSecretKeystoreKey -}} +{{- else -}} + {{- printf "keystore-password" -}} +{{- end -}} +{{- end -}} + +{{/* +Get the client truststore password key to be retrieved from tls.client.passwordSecretName. +*/}} +{{- define "zookeeper.client.tlsPasswordTruststoreKey" -}} +{{- if and .Values.tls.client.passwordsSecretName .Values.tls.client.passwordsSecretTruststoreKey -}} + {{- printf "%s" .Values.tls.client.passwordsSecretTruststoreKey -}} +{{- else -}} + {{- printf "truststore-password" -}} +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message. +*/}} +{{- define "zookeeper.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "zookeeper.validateValues.client.auth" .) -}} +{{- $messages := append $messages (include "zookeeper.validateValues.quorum.auth" .) -}} +{{- $messages := append $messages (include "zookeeper.validateValues.client.tls" .) -}} +{{- $messages := append $messages (include "zookeeper.validateValues.quorum.tls" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of ZooKeeper - Authentication enabled +*/}} +{{- define "zookeeper.validateValues.client.auth" -}} +{{- if and .Values.auth.client.enabled (not .Values.auth.client.existingSecret) (or (not .Values.auth.client.clientUser) (not .Values.auth.client.serverUsers)) }} +zookeeper: auth.client.enabled + In order to enable client-server authentication, you need to provide the list + of users to be created and the user to use for clients authentication. +{{- end -}} +{{- end -}} + +{{/* +Validate values of ZooKeeper - Authentication enabled +*/}} +{{- define "zookeeper.validateValues.quorum.auth" -}} +{{- if and .Values.auth.quorum.enabled (not .Values.auth.quorum.existingSecret) (or (not .Values.auth.quorum.learnerUser) (not .Values.auth.quorum.serverUsers)) }} +zookeeper: auth.quorum.enabled + In order to enable server-server authentication, you need to provide the list + of users to be created and the user to use for quorum authentication. +{{- end -}} +{{- end -}} + +{{/* +Validate values of ZooKeeper - Client TLS enabled +*/}} +{{- define "zookeeper.validateValues.client.tls" -}} +{{- if and .Values.tls.client.enabled (not .Values.tls.client.autoGenerated) (not .Values.tls.client.existingSecret) }} +zookeeper: tls.client.enabled + In order to enable Client TLS encryption, you also need to provide + an existing secret containing the Keystore and Truststore or + enable auto-generated certificates. +{{- end -}} +{{- end -}} + +{{/* +Validate values of ZooKeeper - Quorum TLS enabled +*/}} +{{- define "zookeeper.validateValues.quorum.tls" -}} +{{- if and .Values.tls.quorum.enabled (not .Values.tls.quorum.autoGenerated) (not .Values.tls.quorum.existingSecret) }} +zookeeper: tls.quorum.enabled + In order to enable Quorum TLS, you also need to provide + an existing secret containing the Keystore and Truststore or + enable auto-generated certificates. +{{- end -}} +{{- end -}} diff --git a/manifest/helm-charts/infra/kafka/charts/zookeeper/templates/configmap.yaml b/manifest/helm-charts/infra/kafka/charts/zookeeper/templates/configmap.yaml new file mode 100644 index 000000000..1c5c60b31 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/zookeeper/templates/configmap.yaml @@ -0,0 +1,20 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if (include "zookeeper.createConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + zoo.cfg: |- + {{- include "common.tplvalues.render" ( dict "value" .Values.configuration "context" $ ) | nindent 4 }} +{{- end }} diff --git a/manifest/helm-charts/infra/kafka/charts/zookeeper/templates/extra-list.yaml b/manifest/helm-charts/infra/kafka/charts/zookeeper/templates/extra-list.yaml new file mode 100644 index 000000000..2d35a580e --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/zookeeper/templates/extra-list.yaml @@ -0,0 +1,9 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/manifest/helm-charts/infra/kafka/charts/zookeeper/templates/metrics-svc.yaml b/manifest/helm-charts/infra/kafka/charts/zookeeper/templates/metrics-svc.yaml new file mode 100644 index 000000000..f66557c39 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/zookeeper/templates/metrics-svc.yaml @@ -0,0 +1,27 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "common.names.fullname" . }}-metrics + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if or .Values.metrics.service.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.metrics.service.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.metrics.service.type }} + ports: + - name: tcp-metrics + port: {{ .Values.metrics.service.port }} + targetPort: metrics + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: zookeeper +{{- end }} diff --git a/manifest/helm-charts/infra/kafka/charts/zookeeper/templates/networkpolicy.yaml b/manifest/helm-charts/infra/kafka/charts/zookeeper/templates/networkpolicy.yaml new file mode 100644 index 000000000..34d36f971 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/zookeeper/templates/networkpolicy.yaml @@ -0,0 +1,44 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }} + podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + policyTypes: + - Ingress + ingress: + # Allow inbound connections to ZooKeeper + - ports: + - port: {{ .Values.containerPorts.client }} + {{- if .Values.metrics.enabled }} + - port: {{ .Values.metrics.containerPort }} + {{- end }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ include "common.names.fullname" . }}-client: "true" + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 14 }} + {{- end }} + # Allow internal communications between nodes + - ports: + - port: {{ .Values.containerPorts.follower }} + - port: {{ .Values.containerPorts.election }} + from: + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 14 }} +{{- end }} diff --git a/manifest/helm-charts/infra/kafka/charts/zookeeper/templates/pdb.yaml b/manifest/helm-charts/infra/kafka/charts/zookeeper/templates/pdb.yaml new file mode 100644 index 000000000..27b7bdcb2 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/zookeeper/templates/pdb.yaml @@ -0,0 +1,29 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- $replicaCount := int .Values.replicaCount }} +{{- if and .Values.pdb.create (gt $replicaCount 1) }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.pdb.minAvailable }} + minAvailable: {{ .Values.pdb.minAvailable }} + {{- end }} + {{- if .Values.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.pdb.maxUnavailable }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: zookeeper +{{- end }} diff --git a/manifest/helm-charts/infra/kafka/charts/zookeeper/templates/prometheusrule.yaml b/manifest/helm-charts/infra/kafka/charts/zookeeper/templates/prometheusrule.yaml new file mode 100644 index 000000000..b5a3046a8 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/zookeeper/templates/prometheusrule.yaml @@ -0,0 +1,25 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled .Values.metrics.prometheusRule.rules }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ default .Release.Namespace .Values.metrics.prometheusRule.namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.metrics.prometheusRule.additionalLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.prometheusRule.additionalLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + groups: + - name: {{ include "common.names.fullname" . }} + rules: {{- toYaml .Values.metrics.prometheusRule.rules | nindent 8 }} +{{- end }} + diff --git a/manifest/helm-charts/infra/kafka/charts/zookeeper/templates/scripts-configmap.yaml b/manifest/helm-charts/infra/kafka/charts/zookeeper/templates/scripts-configmap.yaml new file mode 100644 index 000000000..87950a270 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/zookeeper/templates/scripts-configmap.yaml @@ -0,0 +1,104 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-scripts" (include "common.names.fullname" .) }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + init-certs.sh: |- + #!/bin/bash + + {{- if .Values.tls.client.enabled }} + if [[ -f "/certs/client/tls.key" ]] && [[ -f "/certs/client/tls.crt" ]] && [[ -f "/certs/client/ca.crt" ]]; then + if [[ -f "/opt/bitnami/zookeeper/config/certs/client/.initialized" ]]; then + exit 0 + fi + openssl pkcs12 -export -in "/certs/client/tls.crt" \ + -passout pass:"${ZOO_TLS_CLIENT_KEYSTORE_PASSWORD}" \ + -inkey "/certs/client/tls.key" \ + -out "/tmp/keystore.p12" + keytool -importkeystore -srckeystore "/tmp/keystore.p12" \ + -srcstoretype PKCS12 \ + -srcstorepass "${ZOO_TLS_CLIENT_KEYSTORE_PASSWORD}" \ + -deststorepass "${ZOO_TLS_CLIENT_KEYSTORE_PASSWORD}" \ + -destkeystore "/opt/bitnami/zookeeper/config/certs/client/zookeeper.keystore.jks" + rm "/tmp/keystore.p12" + keytool -import -file "/certs/client/ca.crt" \ + -keystore "/opt/bitnami/zookeeper/config/certs/client/zookeeper.truststore.jks" \ + -storepass "${ZOO_TLS_CLIENT_TRUSTSTORE_PASSWORD}" \ + -noprompt + touch /opt/bitnami/zookeeper/config/certs/client/.initialized + {{- if .Values.tls.client.autoGenerated }} + else + echo "Couldn't find the expected PEM certificates! They are mandatory when Client encryption via TLS is enabled." + exit 1 + fi + {{- else }} + elif [[ -f {{ printf "/certs/client/%s" (include "zookeeper.client.tlsTruststoreKey" .) | quote }} ]] && [[ -f {{ printf "/certs/client/%s" (include "zookeeper.client.tlsKeystoreKey" .) | quote }} ]]; then + cp {{ printf "/certs/client/%s" (include "zookeeper.client.tlsTruststoreKey" .) | quote }} "/opt/bitnami/zookeeper/config/certs/client/zookeeper.truststore.jks" + cp {{ printf "/certs/client/%s" (include "zookeeper.client.tlsKeystoreKey" .) | quote }} "/opt/bitnami/zookeeper/config/certs/client/zookeeper.keystore.jks" + else + echo "Couldn't find the expected Java Key Stores (JKS) files! They are mandatory when Client encryption via TLS is enabled." + exit 1 + fi + {{- end }} + {{- end }} + {{- if .Values.tls.quorum.enabled }} + if [[ -f "/certs/quorum/tls.key" ]] && [[ -f "/certs/quorum/tls.crt" ]] && [[ -f "/certs/quorum/ca.crt" ]]; then + openssl pkcs12 -export -in "/certs/quorum/tls.crt" \ + -passout pass:"${ZOO_TLS_QUORUM_KEYSTORE_PASSWORD}" \ + -inkey "/certs/quorum/tls.key" \ + -out "/tmp/keystore.p12" + keytool -importkeystore -srckeystore "/tmp/keystore.p12" \ + -srcstoretype PKCS12 \ + -srcstorepass "${ZOO_TLS_QUORUM_KEYSTORE_PASSWORD}" \ + -deststorepass "${ZOO_TLS_QUORUM_KEYSTORE_PASSWORD}" \ + -destkeystore "/opt/bitnami/zookeeper/config/certs/quorum/zookeeper.keystore.jks" + rm "/tmp/keystore.p12" + keytool -import -file "/certs/quorum/ca.crt" \ + -keystore "/opt/bitnami/zookeeper/config/certs/quorum/zookeeper.truststore.jks" \ + -storepass "${ZOO_TLS_QUORUM_TRUSTSTORE_PASSWORD}" \ + -noprompt + {{- if .Values.tls.quorum.autoGenerated }} + else + echo "Couldn't find the expected PEM certificates! They are mandatory when encryption Quorum via TLS is enabled." + exit 1 + fi + {{- else }} + elif [[ -f {{ printf "/certs/quorum/%s" (include "zookeeper.quorum.tlsTruststoreKey" .) | quote }} ]] && [[ -f {{ printf "/certs/quorum/%s" (include "zookeeper.quorum.tlsKeystoreKey" .) | quote }} ]]; then + cp {{ printf "/certs/quorum/%s" (include "zookeeper.quorum.tlsTruststoreKey" .) | quote }} "/opt/bitnami/zookeeper/config/certs/quorum/zookeeper.truststore.jks" + cp {{ printf "/certs/quorum/%s" (include "zookeeper.quorum.tlsKeystoreKey" .) | quote }} "/opt/bitnami/zookeeper/config/certs/quorum/zookeeper.keystore.jks" + else + echo "Couldn't find the expected Java Key Stores (JKS) files! They are mandatory when Quorum encryption via TLS is enabled." + exit 1 + fi + {{- end }} + {{- end }} + setup.sh: |- + #!/bin/bash + + # Execute entrypoint as usual after obtaining ZOO_SERVER_ID + # check ZOO_SERVER_ID in persistent volume via myid + # if not present, set based on POD hostname + if [[ -f "/bitnami/zookeeper/data/myid" ]]; then + export ZOO_SERVER_ID="$(cat /bitnami/zookeeper/data/myid)" + else + HOSTNAME="$(hostname -s)" + if [[ $HOSTNAME =~ (.*)-([0-9]+)$ ]]; then + ORD=${BASH_REMATCH[2]} + export ZOO_SERVER_ID="$((ORD + {{ .Values.minServerId }} ))" + else + echo "Failed to get index from hostname $HOSTNAME" + exit 1 + fi + fi + exec /entrypoint.sh /run.sh diff --git a/manifest/helm-charts/infra/kafka/charts/zookeeper/templates/secrets.yaml b/manifest/helm-charts/infra/kafka/charts/zookeeper/templates/secrets.yaml new file mode 100644 index 000000000..40b354413 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/zookeeper/templates/secrets.yaml @@ -0,0 +1,70 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if (include "zookeeper.client.createSecret" .) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-client-auth" (include "common.names.fullname" .) }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + client-password: {{ include "common.secrets.passwords.manage" (dict "secret" (printf "%s-client-auth" (include "common.names.fullname" .)) "key" "client-password" "providedValues" (list "auth.client.clientPassword") "context" $) }} + server-password: {{ include "common.secrets.passwords.manage" (dict "secret" (printf "%s-client-auth" (include "common.names.fullname" .)) "key" "server-password" "providedValues" (list "auth.client.serverPasswords") "context" $) }} +{{- end }} +{{- if (include "zookeeper.quorum.createSecret" .) }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-quorum-auth" (include "common.names.fullname" .) }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + quorum-learner-password: {{ include "common.secrets.passwords.manage" (dict "secret" (printf "%s-quorum-auth" (include "common.names.fullname" .)) "key" "quorum-learner-password" "providedValues" (list "auth.quorum.learnerPassword") "context" $) }} + quorum-server-password: {{ include "common.secrets.passwords.manage" (dict "secret" (printf "%s-quorum-auth" (include "common.names.fullname" .)) "key" "quorum-server-password" "providedValues" (list "auth.quorum.serverPasswords") "context" $) }} +{{- end }} +{{- if (include "zookeeper.client.createTlsPasswordsSecret" .) }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "common.names.fullname" . }}-client-tls-pass + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + keystore-password: {{ default (randAlphaNum 10) .Values.tls.client.keystorePassword | b64enc | quote }} + truststore-password: {{ default (randAlphaNum 10) .Values.tls.client.truststorePassword | b64enc | quote }} +{{- end }} +{{- if (include "zookeeper.quorum.createTlsPasswordsSecret" .) }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "common.names.fullname" . }}-quorum-tls-pass + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + keystore-password: {{ default (randAlphaNum 10) .Values.tls.quorum.keystorePassword | b64enc | quote }} + truststore-password: {{ default (randAlphaNum 10) .Values.tls.quorum.truststorePassword | b64enc | quote }} +{{- end }} diff --git a/manifest/helm-charts/infra/kafka/charts/zookeeper/templates/serviceaccount.yaml b/manifest/helm-charts/infra/kafka/charts/zookeeper/templates/serviceaccount.yaml new file mode 100644 index 000000000..8e6d79cdd --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/zookeeper/templates/serviceaccount.yaml @@ -0,0 +1,20 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "zookeeper.serviceAccountName" . }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: zookeeper + role: zookeeper + {{- if or .Values.commonAnnotations .Values.serviceAccount.annotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.serviceAccount.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/manifest/helm-charts/infra/kafka/charts/zookeeper/templates/servicemonitor.yaml b/manifest/helm-charts/infra/kafka/charts/zookeeper/templates/servicemonitor.yaml new file mode 100644 index 000000000..8cc66ec0c --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/zookeeper/templates/servicemonitor.yaml @@ -0,0 +1,51 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ default .Release.Namespace .Values.metrics.serviceMonitor.namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.metrics.serviceMonitor.additionalLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.additionalLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.metrics.serviceMonitor.jobLabel }} + jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 6 }} + {{- if .Values.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} + app.kubernetes.io/component: metrics + endpoints: + - port: tcp-metrics + path: "/metrics" + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + namespaceSelector: + matchNames: + - {{ template "zookeeper.namespace" . }} +{{- end }} diff --git a/manifest/helm-charts/infra/kafka/charts/zookeeper/templates/statefulset.yaml b/manifest/helm-charts/infra/kafka/charts/zookeeper/templates/statefulset.yaml new file mode 100644 index 000000000..9c9b5dfcf --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/zookeeper/templates/statefulset.yaml @@ -0,0 +1,532 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: zookeeper + role: zookeeper + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.replicaCount }} + podManagementPolicy: {{ .Values.podManagementPolicy }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: zookeeper + serviceName: {{ printf "%s-%s" (include "common.names.fullname" .) (default "headless" .Values.service.headless.servicenameOverride) | trunc 63 | trimSuffix "-" }} + {{- if .Values.updateStrategy }} + updateStrategy: {{- toYaml .Values.updateStrategy | nindent 4 }} + {{- end }} + template: + metadata: + annotations: + {{- if .Values.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.podAnnotations "context" $) | nindent 8 }} + {{- end }} + {{- if (include "zookeeper.createConfigmap" .) }} + checksum/configuration: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- end }} + {{- if or (include "zookeeper.quorum.createSecret" .) (include "zookeeper.client.createSecret" .) (include "zookeeper.client.createTlsPasswordsSecret" .) (include "zookeeper.quorum.createTlsPasswordsSecret" .) }} + checksum/secrets: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + {{- end }} + {{- if or (include "zookeeper.client.createTlsSecret" .) (include "zookeeper.quorum.createTlsSecret" .) }} + checksum/tls-secrets: {{ include (print $.Template.BasePath "/tls-secrets.yaml") . | sha256sum }} + {{- end }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + app.kubernetes.io/component: zookeeper + spec: + serviceAccountName: {{ template "zookeeper.serviceAccountName" . }} + {{- include "zookeeper.imagePullSecrets" . | nindent 6 }} + {{- if .Values.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAffinityPreset "component" "zookeeper" "customLabels" $podLabels "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAntiAffinityPreset "component" "zookeeper" "customLabels" $podLabels "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.nodeAffinityPreset.type "key" .Values.nodeAffinityPreset.key "values" .Values.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName }} + {{- end }} + {{- if .Values.podSecurityContext.enabled }} + securityContext: {{- omit .Values.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} + - name: volume-permissions + image: {{ template "zookeeper.volumePermissions.image" . }} + imagePullPolicy: {{ default "" .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + args: + - -ec + - | + mkdir -p /bitnami/zookeeper + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} /bitnami/zookeeper + find /bitnami/zookeeper -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} + {{- if .Values.dataLogDir }} + mkdir -p {{ .Values.dataLogDir }} + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} {{ .Values.dataLogDir }} + find {{ .Values.dataLogDir }} -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} + {{- end }} + {{- if .Values.volumePermissions.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/zookeeper + {{- if .Values.dataLogDir }} + - name: data-log + mountPath: {{ .Values.dataLogDir }} + {{- end }} + {{- end }} + {{- if or .Values.tls.client.enabled .Values.tls.quorum.enabled }} + - name: init-certs + image: {{ include "zookeeper.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /scripts/init-certs.sh + env: + - name: MY_POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + {{- if or .Values.tls.client.passwordsSecretName (include "zookeeper.client.createTlsPasswordsSecret" .) }} + - name: ZOO_TLS_CLIENT_KEYSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.client.tlsPasswordsSecret" . }} + key: {{ include "zookeeper.client.tlsPasswordKeystoreKey" . }} + - name: ZOO_TLS_CLIENT_TRUSTSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.client.tlsPasswordsSecret" . }} + key: {{ include "zookeeper.client.tlsPasswordTruststoreKey" . }} + {{- end }} + {{- if or .Values.tls.quorum.passwordsSecretName (include "zookeeper.quorum.createTlsPasswordsSecret" .) }} + - name: ZOO_TLS_QUORUM_KEYSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.quorum.tlsPasswordsSecret" . }} + key: {{ include "zookeeper.quorum.tlsPasswordKeystoreKey" . }} + - name: ZOO_TLS_QUORUM_TRUSTSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.quorum.tlsPasswordsSecret" . }} + key: {{ include "zookeeper.quorum.tlsPasswordTruststoreKey" . }} + {{- end }} + {{- if .Values.tls.resources }} + resources: {{- toYaml .Values.tls.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: scripts + mountPath: /scripts/init-certs.sh + subPath: init-certs.sh + {{- if or .Values.tls.client.enabled }} + - name: client-certificates + mountPath: /certs/client + - name: client-shared-certs + mountPath: /opt/bitnami/zookeeper/config/certs/client + {{- end }} + {{- if or .Values.tls.quorum.enabled }} + - name: quorum-certificates + mountPath: /certs/quorum + - name: quorum-shared-certs + mountPath: /opt/bitnami/zookeeper/config/certs/quorum + {{- end }} + {{- end }} + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | trim | nindent 8 }} + {{- end }} + containers: + - name: zookeeper + image: {{ template "zookeeper.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.args "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: ZOO_DATA_LOG_DIR + value: {{ .Values.dataLogDir | quote }} + - name: ZOO_PORT_NUMBER + value: {{ .Values.containerPorts.client | quote }} + - name: ZOO_TICK_TIME + value: {{ .Values.tickTime | quote }} + - name: ZOO_INIT_LIMIT + value: {{ .Values.initLimit | quote }} + - name: ZOO_SYNC_LIMIT + value: {{ .Values.syncLimit | quote }} + - name: ZOO_PRE_ALLOC_SIZE + value: {{ .Values.preAllocSize | quote }} + - name: ZOO_SNAPCOUNT + value: {{ .Values.snapCount | quote }} + - name: ZOO_MAX_CLIENT_CNXNS + value: {{ .Values.maxClientCnxns | quote }} + - name: ZOO_4LW_COMMANDS_WHITELIST + value: {{ .Values.fourlwCommandsWhitelist | quote }} + - name: ZOO_LISTEN_ALLIPS_ENABLED + value: {{ ternary "yes" "no" .Values.listenOnAllIPs | quote }} + - name: ZOO_AUTOPURGE_INTERVAL + value: {{ .Values.autopurge.purgeInterval | quote }} + - name: ZOO_AUTOPURGE_RETAIN_COUNT + value: {{ .Values.autopurge.snapRetainCount | quote }} + - name: ZOO_MAX_SESSION_TIMEOUT + value: {{ .Values.maxSessionTimeout | quote }} + - name: ZOO_SERVERS + {{- $replicaCount := int .Values.replicaCount }} + {{- $minServerId := int .Values.minServerId }} + {{- $followerPort := int .Values.containerPorts.follower }} + {{- $electionPort := int .Values.containerPorts.election }} + {{- $releaseNamespace := include "zookeeper.namespace" . }} + {{- $zookeeperFullname := include "common.names.fullname" . }} + {{- $zookeeperHeadlessServiceName := printf "%s-%s" $zookeeperFullname "headless" | trunc 63 }} + {{- $clusterDomain := .Values.clusterDomain }} + value: {{ range $i, $e := until $replicaCount }}{{ $zookeeperFullname }}-{{ $e }}.{{ $zookeeperHeadlessServiceName }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $followerPort }}:{{ $electionPort }}::{{ add $e $minServerId }} {{ end }} + - name: ZOO_ENABLE_AUTH + value: {{ ternary "yes" "no" .Values.auth.client.enabled | quote }} + {{- if .Values.auth.client.enabled }} + - name: ZOO_CLIENT_USER + value: {{ .Values.auth.client.clientUser | quote }} + - name: ZOO_CLIENT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.client.secretName" . }} + key: client-password + - name: ZOO_SERVER_USERS + value: {{ .Values.auth.client.serverUsers | quote }} + - name: ZOO_SERVER_PASSWORDS + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.client.secretName" . }} + key: server-password + {{- end }} + - name: ZOO_ENABLE_QUORUM_AUTH + value: {{ ternary "yes" "no" .Values.auth.quorum.enabled | quote }} + {{- if .Values.auth.quorum.enabled }} + - name: ZOO_QUORUM_LEARNER_USER + value: {{ .Values.auth.quorum.learnerUser | quote }} + - name: ZOO_QUORUM_LEARNER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.quorum.secretName" . }} + key: quorum-learner-password + - name: ZOO_QUORUM_SERVER_USERS + value: {{ .Values.auth.quorum.serverUsers | quote }} + - name: ZOO_QUORUM_SERVER_PASSWORDS + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.quorum.secretName" . }} + key: quorum-server-password + {{- end }} + - name: ZOO_HEAP_SIZE + value: {{ .Values.heapSize | quote }} + - name: ZOO_LOG_LEVEL + value: {{ .Values.logLevel | quote }} + - name: ALLOW_ANONYMOUS_LOGIN + value: {{ ternary "no" "yes" .Values.auth.client.enabled | quote }} + {{- if .Values.jvmFlags }} + - name: JVMFLAGS + value: {{ .Values.jvmFlags | quote }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: ZOO_ENABLE_PROMETHEUS_METRICS + value: "yes" + - name: ZOO_PROMETHEUS_METRICS_PORT_NUMBER + value: {{ .Values.metrics.containerPort | quote }} + {{- end }} + {{- if .Values.tls.client.enabled }} + - name: ZOO_TLS_PORT_NUMBER + value: {{ .Values.containerPorts.tls | quote }} + - name: ZOO_TLS_CLIENT_ENABLE + value: {{ .Values.tls.client.enabled | quote }} + - name: ZOO_TLS_CLIENT_AUTH + value: {{ .Values.tls.client.auth | quote }} + - name: ZOO_TLS_CLIENT_KEYSTORE_FILE + value: {{ .Values.tls.client.keystorePath | quote }} + - name: ZOO_TLS_CLIENT_TRUSTSTORE_FILE + value: {{ .Values.tls.client.truststorePath | quote }} + {{- if or .Values.tls.client.keystorePassword .Values.tls.client.passwordsSecretName .Values.tls.client.autoGenerated }} + - name: ZOO_TLS_CLIENT_KEYSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.client.tlsPasswordsSecret" . }} + key: {{ include "zookeeper.client.tlsPasswordKeystoreKey" . }} + {{- end }} + {{- if or .Values.tls.client.truststorePassword .Values.tls.client.passwordsSecretName .Values.tls.client.autoGenerated }} + - name: ZOO_TLS_CLIENT_TRUSTSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.client.tlsPasswordsSecret" . }} + key: {{ include "zookeeper.client.tlsPasswordTruststoreKey" . }} + {{- end }} + {{- end }} + {{- if .Values.tls.quorum.enabled }} + - name: ZOO_TLS_QUORUM_ENABLE + value: {{ .Values.tls.quorum.enabled | quote }} + - name: ZOO_TLS_QUORUM_CLIENT_AUTH + value: {{ .Values.tls.quorum.auth | quote }} + - name: ZOO_TLS_QUORUM_KEYSTORE_FILE + value: {{ .Values.tls.quorum.keystorePath | quote }} + - name: ZOO_TLS_QUORUM_TRUSTSTORE_FILE + value: {{ .Values.tls.quorum.truststorePath | quote }} + {{- if or .Values.tls.quorum.keystorePassword .Values.tls.quorum.passwordsSecretName .Values.tls.quorum.autoGenerated }} + - name: ZOO_TLS_QUORUM_KEYSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.quorum.tlsPasswordsSecret" . }} + key: {{ include "zookeeper.quorum.tlsPasswordKeystoreKey" . }} + {{- end }} + {{- if or .Values.tls.quorum.truststorePassword .Values.tls.quorum.passwordsSecretName .Values.tls.quorum.autoGenerated }} + - name: ZOO_TLS_QUORUM_TRUSTSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.quorum.tlsPasswordsSecret" . }} + key: {{ include "zookeeper.quorum.tlsPasswordTruststoreKey" . }} + {{- end }} + {{- end }} + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.extraEnvVarsCM .Values.extraEnvVarsSecret }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsSecret "context" $) }} + {{- end }} + {{- end }} + ports: + {{- if not .Values.service.disableBaseClientPort }} + - name: client + containerPort: {{ .Values.containerPorts.client }} + {{- end }} + {{- if .Values.tls.client.enabled }} + - name: client-tls + containerPort: {{ .Values.containerPorts.tls }} + {{- end }} + - name: follower + containerPort: {{ .Values.containerPorts.follower }} + - name: election + containerPort: {{ .Values.containerPorts.election }} + {{- if .Values.metrics.enabled }} + - name: metrics + containerPort: {{ .Values.metrics.containerPort }} + {{- end }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.livenessProbe "enabled" "probeCommandTimeout") "context" $) | nindent 12 }} + exec: + {{- if not .Values.service.disableBaseClientPort }} + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.livenessProbe.probeCommandTimeout }} nc -w {{ .Values.livenessProbe.probeCommandTimeout }} localhost {{ .Values.containerPorts.client }} | grep imok'] + {{- else if not .Values.tls.client.enabled }} + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.livenessProbe.probeCommandTimeout }} openssl s_client -quiet -crlf -connect localhost:{{ .Values.containerPorts.tls }} | grep imok'] + {{- else }} + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.livenessProbe.probeCommandTimeout }} openssl s_client -quiet -crlf -connect localhost:{{ .Values.containerPorts.tls }} -cert {{ .Values.service.tls.client_cert_pem_path }} -key {{ .Values.service.tls.client_key_pem_path }} | grep imok'] + {{- end }} + {{- end }} + {{- if .Values.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.readinessProbe "enabled" "probeCommandTimeout") "context" $) | nindent 12 }} + exec: + {{- if not .Values.service.disableBaseClientPort }} + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.readinessProbe.probeCommandTimeout }} nc -w {{ .Values.readinessProbe.probeCommandTimeout }} localhost {{ .Values.containerPorts.client }} | grep imok'] + {{- else if not .Values.tls.client.enabled }} + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.readinessProbe.probeCommandTimeout }} openssl s_client -quiet -crlf -connect localhost:{{ .Values.containerPorts.tls }} | grep imok'] + {{- else }} + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.readinessProbe.probeCommandTimeout }} openssl s_client -quiet -crlf -connect localhost:{{ .Values.containerPorts.tls }} -cert {{ .Values.service.tls.client_cert_pem_path }} -key {{ .Values.service.tls.client_key_pem_path }} | grep imok'] + {{- end }} + {{- end }} + {{- if .Values.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + {{- if not .Values.service.disableBaseClientPort }} + port: client + {{- else }} + port: follower + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + volumeMounts: + - name: scripts + mountPath: /scripts/setup.sh + subPath: setup.sh + - name: data + mountPath: /bitnami/zookeeper + {{- if .Values.dataLogDir }} + - name: data-log + mountPath: {{ .Values.dataLogDir }} + {{- end }} + {{- if or .Values.configuration .Values.existingConfigmap }} + - name: config + mountPath: /opt/bitnami/zookeeper/conf/zoo.cfg + subPath: zoo.cfg + {{- end }} + {{- if .Values.tls.client.enabled }} + - name: client-shared-certs + mountPath: /opt/bitnami/zookeeper/config/certs/client + readOnly: true + {{- end }} + {{- if .Values.tls.quorum.enabled }} + - name: quorum-shared-certs + mountPath: /opt/bitnami/zookeeper/config/certs/quorum + readOnly: true + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" .Values.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.sidecars "context" $ ) | nindent 8 }} + {{- end }} + volumes: + - name: scripts + configMap: + name: {{ printf "%s-scripts" (include "common.names.fullname" .) }} + defaultMode: 0755 + {{- if or .Values.configuration .Values.existingConfigmap }} + - name: config + configMap: + name: {{ include "zookeeper.configmapName" . }} + {{- end }} + {{- if and .Values.persistence.enabled .Values.persistence.existingClaim }} + - name: data + persistentVolumeClaim: + claimName: {{ printf "%s" (tpl .Values.persistence.existingClaim .) }} + {{- else if not .Values.persistence.enabled }} + - name: data + emptyDir: {} + {{- end }} + {{- if and .Values.persistence.enabled .Values.persistence.dataLogDir.existingClaim }} + - name: data-log + persistentVolumeClaim: + claimName: {{ printf "%s" (tpl .Values.persistence.dataLogDir.existingClaim .) }} + {{- else if and ( not .Values.persistence.enabled ) .Values.dataLogDir }} + - name: data-log + emptyDir: {} + {{- end }} + {{- if .Values.tls.client.enabled }} + - name: client-certificates + secret: + secretName: {{ include "zookeeper.client.tlsSecretName" . }} + defaultMode: 256 + - name: client-shared-certs + emptyDir: {} + {{- end }} + {{- if .Values.tls.quorum.enabled }} + - name: quorum-certificates + secret: + secretName: {{ include "zookeeper.quorum.tlsSecretName" . }} + defaultMode: 256 + - name: quorum-shared-certs + emptyDir: {} + {{- end }} + {{- if .Values.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if and .Values.persistence.enabled (not (and .Values.persistence.existingClaim .Values.persistence.dataLogDir.existingClaim) ) }} + volumeClaimTemplates: + {{- if not .Values.persistence.existingClaim }} + - metadata: + name: data + {{- if .Values.persistence.annotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.annotations "context" $) | nindent 10 }} + {{- end }} + {{- if .Values.persistence.labels }} + labels: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.labels "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{- include "common.storage.class" (dict "persistence" .Values.persistence "global" .Values.global) | nindent 8 }} + {{- if .Values.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.selector "context" $) | nindent 10 }} + {{- end }} + {{- end }} + {{- if and (not .Values.persistence.dataLogDir.existingClaim) .Values.dataLogDir }} + - metadata: + name: data-log + {{- if .Values.persistence.annotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.annotations "context" $) | nindent 10 }} + {{- end }} + {{- if .Values.persistence.labels }} + labels: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.labels "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.dataLogDir.size | quote }} + {{- include "common.storage.class" (dict "persistence" .Values.persistence "global" .Values.global) | nindent 8 }} + {{- if .Values.persistence.dataLogDir.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.dataLogDir.selector "context" $) | nindent 10 }} + {{- end }} + {{- end }} + {{- end }} diff --git a/manifest/helm-charts/infra/kafka/charts/zookeeper/templates/svc-headless.yaml b/manifest/helm-charts/infra/kafka/charts/zookeeper/templates/svc-headless.yaml new file mode 100644 index 000000000..d571b0af4 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/zookeeper/templates/svc-headless.yaml @@ -0,0 +1,40 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-%s" (include "common.names.fullname" .) (default "headless" .Values.service.headless.servicenameOverride) | trunc 63 | trimSuffix "-" }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- if or .Values.commonAnnotations .Values.service.headless.annotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.service.headless.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: {{ .Values.service.headless.publishNotReadyAddresses }} + ports: + {{- if not .Values.service.disableBaseClientPort }} + - name: tcp-client + port: {{ .Values.service.ports.client }} + targetPort: client + {{- end }} + {{- if .Values.tls.client.enabled }} + - name: tcp-client-tls + port: {{ .Values.service.ports.tls }} + targetPort: client-tls + {{- end }} + - name: tcp-follower + port: {{ .Values.service.ports.follower }} + targetPort: follower + - name: tcp-election + port: {{ .Values.service.ports.election }} + targetPort: election + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: zookeeper diff --git a/manifest/helm-charts/infra/kafka/charts/zookeeper/templates/svc.yaml b/manifest/helm-charts/infra/kafka/charts/zookeeper/templates/svc.yaml new file mode 100644 index 000000000..bfa8b928e --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/zookeeper/templates/svc.yaml @@ -0,0 +1,69 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +apiVersion: v1 +kind: Service +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- if or .Values.commonAnnotations .Values.service.annotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.service.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.service.type }} + {{- if and .Values.service.clusterIP (eq .Values.service.type "ClusterIP") }} + clusterIP: {{ .Values.service.clusterIP }} + {{- end }} + {{- if or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerSourceRanges)) }} + loadBalancerSourceRanges: {{ .Values.service.loadBalancerSourceRanges }} + {{- end }} + {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if .Values.service.sessionAffinity }} + sessionAffinity: {{ .Values.service.sessionAffinity }} + {{- end }} + {{- if .Values.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + ports: + {{- if not .Values.service.disableBaseClientPort }} + - name: tcp-client + port: {{ .Values.service.ports.client }} + targetPort: client + {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.client)) }} + nodePort: {{ .Values.service.nodePorts.client }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- end }} + {{- if .Values.tls.client.enabled }} + - name: tcp-client-tls + port: {{ .Values.service.ports.tls }} + targetPort: client-tls + {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.tls)) }} + nodePort: {{ .Values.service.nodePorts.tls }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- end }} + - name: tcp-follower + port: {{ .Values.service.ports.follower }} + targetPort: follower + - name: tcp-election + port: {{ .Values.service.ports.election }} + targetPort: election + {{- if .Values.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: zookeeper diff --git a/manifest/helm-charts/infra/kafka/charts/zookeeper/templates/tls-secrets.yaml b/manifest/helm-charts/infra/kafka/charts/zookeeper/templates/tls-secrets.yaml new file mode 100644 index 000000000..373bc8626 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/zookeeper/templates/tls-secrets.yaml @@ -0,0 +1,56 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if (include "zookeeper.client.createTlsSecret" .) }} +{{- $secretName := printf "%s-client-crt" (include "common.names.fullname" .) }} +{{- $ca := genCA "zookeeper-client-ca" 365 }} +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $fullname := include "common.names.fullname" . }} +{{- $serviceName := include "common.names.fullname" . }} +{{- $headlessServiceName := printf "%s-headless" (include "common.names.fullname" .) }} +{{- $altNames := list (printf "*.%s.%s.svc.%s" $headlessServiceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $headlessServiceName $releaseNamespace $clusterDomain) (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) "127.0.0.1" "localhost" $fullname }} +{{- $cert := genSignedCert $fullname nil $altNames 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secretName }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }} + tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }} + ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }} +{{- end }} +{{- if (include "zookeeper.quorum.createTlsSecret" .) }} +{{- $secretName := printf "%s-quorum-crt" (include "common.names.fullname" .) }} +{{- $ca := genCA "zookeeper-quorum-ca" 365 }} +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $fullname := include "common.names.fullname" . }} +{{- $serviceName := include "common.names.fullname" . }} +{{- $headlessServiceName := printf "%s-headless" (include "common.names.fullname" .) }} +{{- $altNames := list (printf "*.%s.%s.svc.%s" $headlessServiceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $headlessServiceName $releaseNamespace $clusterDomain) (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) $fullname }} +{{- $cert := genSignedCert $fullname nil $altNames 365 $ca }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secretName }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }} + tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }} + ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }} +{{- end }} diff --git a/manifest/helm-charts/infra/kafka/charts/zookeeper/values.yaml b/manifest/helm-charts/infra/kafka/charts/zookeeper/values.yaml new file mode 100644 index 000000000..7e6ebbe11 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/charts/zookeeper/values.yaml @@ -0,0 +1,882 @@ +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 + +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass +## + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.storageClass Global StorageClass for Persistent Volume(s) +## +global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + storageClass: "" + +## @section Common parameters +## + +## @param kubeVersion Override Kubernetes version +## +kubeVersion: "" +## @param nameOverride String to partially override common.names.fullname template (will maintain the release name) +## +nameOverride: "" +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: "" +## @param clusterDomain Kubernetes Cluster Domain +## +clusterDomain: cluster.local +## @param extraDeploy Extra objects to deploy (evaluated as a template) +## +extraDeploy: [] +## @param commonLabels Add labels to all the deployed resources +## +commonLabels: {} +## @param commonAnnotations Add annotations to all the deployed resources +## +commonAnnotations: {} +## @param namespaceOverride Override namespace for ZooKeeper resources +## Useful when including ZooKeeper as a chart dependency, so it can be released into a different namespace than the parent +## +namespaceOverride: "" + +## Enable diagnostic mode in the statefulset +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the statefulset + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the statefulset + ## + args: + - infinity + +## @section ZooKeeper chart parameters + +## Bitnami ZooKeeper image version +## ref: https://hub.docker.com/r/bitnami/zookeeper/tags/ +## @param image.registry ZooKeeper image registry +## @param image.repository ZooKeeper image repository +## @param image.tag ZooKeeper image tag (immutable tags are recommended) +## @param image.digest ZooKeeper image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag +## @param image.pullPolicy ZooKeeper image pull policy +## @param image.pullSecrets Specify docker-registry secret names as an array +## @param image.debug Specify if debug values should be set +## +image: + registry: docker.io + repository: bitnami/zookeeper + tag: 3.9.0-debian-11-r11 + digest: "" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Set to true if you would like to see extra information on logs + ## + debug: false +## Authentication parameters +## +auth: + client: + ## @param auth.client.enabled Enable ZooKeeper client-server authentication. It uses SASL/Digest-MD5 + ## + enabled: false + ## @param auth.client.clientUser User that will use ZooKeeper clients to auth + ## + clientUser: "" + ## @param auth.client.clientPassword Password that will use ZooKeeper clients to auth + ## + clientPassword: "" + ## @param auth.client.serverUsers Comma, semicolon or whitespace separated list of user to be created + ## Specify them as a string, for example: "user1,user2,admin" + ## + serverUsers: "" + ## @param auth.client.serverPasswords Comma, semicolon or whitespace separated list of passwords to assign to users when created + ## Specify them as a string, for example: "pass4user1, pass4user2, pass4admin" + ## + serverPasswords: "" + ## @param auth.client.existingSecret Use existing secret (ignores previous passwords) + ## + existingSecret: "" + quorum: + ## @param auth.quorum.enabled Enable ZooKeeper server-server authentication. It uses SASL/Digest-MD5 + ## + enabled: false + ## @param auth.quorum.learnerUser User that the ZooKeeper quorumLearner will use to authenticate to quorumServers. + ## Note: Make sure the user is included in auth.quorum.serverUsers + ## + learnerUser: "" + ## @param auth.quorum.learnerPassword Password that the ZooKeeper quorumLearner will use to authenticate to quorumServers. + ## + learnerPassword: "" + ## @param auth.quorum.serverUsers Comma, semicolon or whitespace separated list of users for the quorumServers. + ## Specify them as a string, for example: "user1,user2,admin" + ## + serverUsers: "" + ## @param auth.quorum.serverPasswords Comma, semicolon or whitespace separated list of passwords to assign to users when created + ## Specify them as a string, for example: "pass4user1, pass4user2, pass4admin" + ## + serverPasswords: "" + ## @param auth.quorum.existingSecret Use existing secret (ignores previous passwords) + ## + existingSecret: "" +## @param tickTime Basic time unit (in milliseconds) used by ZooKeeper for heartbeats +## +tickTime: 2000 +## @param initLimit ZooKeeper uses to limit the length of time the ZooKeeper servers in quorum have to connect to a leader +## +initLimit: 10 +## @param syncLimit How far out of date a server can be from a leader +## +syncLimit: 5 +## @param preAllocSize Block size for transaction log file +## +preAllocSize: 65536 +## @param snapCount The number of transactions recorded in the transaction log before a snapshot can be taken (and the transaction log rolled) +## +snapCount: 100000 +## @param maxClientCnxns Limits the number of concurrent connections that a single client may make to a single member of the ZooKeeper ensemble +## +maxClientCnxns: 60 +## @param maxSessionTimeout Maximum session timeout (in milliseconds) that the server will allow the client to negotiate +## Defaults to 20 times the tickTime +## +maxSessionTimeout: 40000 +## @param heapSize Size (in MB) for the Java Heap options (Xmx and Xms) +## This env var is ignored if Xmx an Xms are configured via `jvmFlags` +## +heapSize: 1024 +## @param fourlwCommandsWhitelist A list of comma separated Four Letter Words commands that can be executed +## +fourlwCommandsWhitelist: srvr, mntr, ruok +## @param minServerId Minimal SERVER_ID value, nodes increment their IDs respectively +## Servers increment their ID starting at this minimal value. +## E.g., with `minServerId=10` and 3 replicas, server IDs will be 10, 11, 12 for z-0, z-1 and z-2 respectively. +## +minServerId: 1 +## @param listenOnAllIPs Allow ZooKeeper to listen for connections from its peers on all available IP addresses +## +listenOnAllIPs: false +## Ongoing data directory cleanup configuration +## +autopurge: + ## @param autopurge.snapRetainCount The most recent snapshots amount (and corresponding transaction logs) to retain + ## + snapRetainCount: 3 + ## @param autopurge.purgeInterval The time interval (in hours) for which the purge task has to be triggered + ## Set to a positive integer to enable the auto purging + ## + purgeInterval: 0 +## @param logLevel Log level for the ZooKeeper server. ERROR by default +## Have in mind if you set it to INFO or WARN the ReadinessProve will produce a lot of logs +## +logLevel: ERROR +## @param jvmFlags Default JVM flags for the ZooKeeper process +## +jvmFlags: "" +## @param dataLogDir Dedicated data log directory +## This allows a dedicated log device to be used, and helps avoid competition between logging and snapshots. +## E.g. +## dataLogDir: /bitnami/zookeeper/dataLog +## +dataLogDir: "" +## @param configuration Configure ZooKeeper with a custom zoo.cfg file +## e.g: +## configuration: |- +## deploy-working-dir=/bitnami/geode/data +## log-level=info +## ... +## +configuration: "" +## @param existingConfigmap The name of an existing ConfigMap with your custom configuration for ZooKeeper +## NOTE: When it's set the `configuration` parameter is ignored +## +existingConfigmap: "" +## @param extraEnvVars Array with extra environment variables to add to ZooKeeper nodes +## e.g: +## extraEnvVars: +## - name: FOO +## value: "bar" +## +extraEnvVars: [] +## @param extraEnvVarsCM Name of existing ConfigMap containing extra env vars for ZooKeeper nodes +## +extraEnvVarsCM: "" +## @param extraEnvVarsSecret Name of existing Secret containing extra env vars for ZooKeeper nodes +## +extraEnvVarsSecret: "" +## @param command Override default container command (useful when using custom images) +## +command: + - /scripts/setup.sh +## @param args Override default container args (useful when using custom images) +## +args: [] + +## @section Statefulset parameters + +## @param replicaCount Number of ZooKeeper nodes +## +replicaCount: 1 +## @param containerPorts.client ZooKeeper client container port +## @param containerPorts.tls ZooKeeper TLS container port +## @param containerPorts.follower ZooKeeper follower container port +## @param containerPorts.election ZooKeeper election container port +## +containerPorts: + client: 2181 + tls: 3181 + follower: 2888 + election: 3888 +## Configure extra options for ZooKeeper containers' liveness, readiness and startup probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes +## @param livenessProbe.enabled Enable livenessProbe on ZooKeeper containers +## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe +## @param livenessProbe.periodSeconds Period seconds for livenessProbe +## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe +## @param livenessProbe.failureThreshold Failure threshold for livenessProbe +## @param livenessProbe.successThreshold Success threshold for livenessProbe +## @param livenessProbe.probeCommandTimeout Probe command timeout for livenessProbe +## +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + probeCommandTimeout: 2 +## @param readinessProbe.enabled Enable readinessProbe on ZooKeeper containers +## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe +## @param readinessProbe.periodSeconds Period seconds for readinessProbe +## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe +## @param readinessProbe.failureThreshold Failure threshold for readinessProbe +## @param readinessProbe.successThreshold Success threshold for readinessProbe +## @param readinessProbe.probeCommandTimeout Probe command timeout for readinessProbe +## +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + probeCommandTimeout: 2 +## @param startupProbe.enabled Enable startupProbe on ZooKeeper containers +## @param startupProbe.initialDelaySeconds Initial delay seconds for startupProbe +## @param startupProbe.periodSeconds Period seconds for startupProbe +## @param startupProbe.timeoutSeconds Timeout seconds for startupProbe +## @param startupProbe.failureThreshold Failure threshold for startupProbe +## @param startupProbe.successThreshold Success threshold for startupProbe +## +startupProbe: + enabled: false + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 +## @param customLivenessProbe Custom livenessProbe that overrides the default one +## +customLivenessProbe: {} +## @param customReadinessProbe Custom readinessProbe that overrides the default one +## +customReadinessProbe: {} +## @param customStartupProbe Custom startupProbe that overrides the default one +## +customStartupProbe: {} +## @param lifecycleHooks for the ZooKeeper container(s) to automate configuration before or after startup +## +lifecycleHooks: {} +## ZooKeeper resource requests and limits +## ref: https://kubernetes.io/docs/user-guide/compute-resources/ +## @param resources.limits The resources limits for the ZooKeeper containers +## @param resources.requests.memory The requested memory for the ZooKeeper containers +## @param resources.requests.cpu The requested cpu for the ZooKeeper containers +## +resources: + limits: {} + requests: + memory: 256Mi + cpu: 250m +## Configure Pods Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## @param podSecurityContext.enabled Enabled ZooKeeper pods' Security Context +## @param podSecurityContext.fsGroup Set ZooKeeper pod's Security Context fsGroup +## +podSecurityContext: + enabled: true + fsGroup: 1001 +## Configure Container Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +## @param containerSecurityContext.enabled Enabled ZooKeeper containers' Security Context +## @param containerSecurityContext.runAsUser Set ZooKeeper containers' Security Context runAsUser +## @param containerSecurityContext.runAsNonRoot Set ZooKeeper containers' Security Context runAsNonRoot +## @param containerSecurityContext.allowPrivilegeEscalation Force the child process to be run as nonprivilege +## +containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + allowPrivilegeEscalation: false +## @param hostAliases ZooKeeper pods host aliases +## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ +## +hostAliases: [] +## @param podLabels Extra labels for ZooKeeper pods +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} +## @param podAnnotations Annotations for ZooKeeper pods +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} +## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAffinityPreset: "" +## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAntiAffinityPreset: soft +## Node affinity preset +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity +## +nodeAffinityPreset: + ## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param nodeAffinityPreset.key Node label key to match Ignored if `affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] +## @param affinity Affinity for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set +## +affinity: {} +## @param nodeSelector Node labels for pod assignment +## Ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} +## @param tolerations Tolerations for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] +## @param topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods +## +topologySpreadConstraints: [] +## @param podManagementPolicy StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: `OrderedReady` and `Parallel` +## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pod-management-policy +## +podManagementPolicy: Parallel +## @param priorityClassName Name of the existing priority class to be used by ZooKeeper pods, priority class needs to be created beforehand +## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +## +priorityClassName: "" +## @param schedulerName Kubernetes pod scheduler registry +## https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +schedulerName: "" +## @param updateStrategy.type ZooKeeper statefulset strategy type +## @param updateStrategy.rollingUpdate ZooKeeper statefulset rolling update configuration parameters +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +## +updateStrategy: + type: RollingUpdate + rollingUpdate: {} +## @param extraVolumes Optionally specify extra list of additional volumes for the ZooKeeper pod(s) +## Example Use Case: mount certificates to enable TLS +## e.g: +## extraVolumes: +## - name: zookeeper-keystore +## secret: +## defaultMode: 288 +## secretName: zookeeper-keystore +## - name: zookeeper-truststore +## secret: +## defaultMode: 288 +## secretName: zookeeper-truststore +## +extraVolumes: [] +## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts for the ZooKeeper container(s) +## Example Use Case: mount certificates to enable TLS +## e.g: +## extraVolumeMounts: +## - name: zookeeper-keystore +## mountPath: /certs/keystore +## readOnly: true +## - name: zookeeper-truststore +## mountPath: /certs/truststore +## readOnly: true +## +extraVolumeMounts: [] +## @param sidecars Add additional sidecar containers to the ZooKeeper pod(s) +## e.g: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +sidecars: [] +## @param initContainers Add additional init containers to the ZooKeeper pod(s) +## Example: +## initContainers: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +initContainers: [] +## ZooKeeper Pod Disruption Budget +## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ +## @param pdb.create Deploy a pdb object for the ZooKeeper pod +## @param pdb.minAvailable Minimum available ZooKeeper replicas +## @param pdb.maxUnavailable Maximum unavailable ZooKeeper replicas +## +pdb: + create: false + minAvailable: "" + maxUnavailable: 1 + +## @section Traffic Exposure parameters + +service: + ## @param service.type Kubernetes Service type + ## + type: ClusterIP + ## @param service.ports.client ZooKeeper client service port + ## @param service.ports.tls ZooKeeper TLS service port + ## @param service.ports.follower ZooKeeper follower service port + ## @param service.ports.election ZooKeeper election service port + ## + ports: + client: 2181 + tls: 3181 + follower: 2888 + election: 3888 + ## Node ports to expose + ## NOTE: choose port between <30000-32767> + ## @param service.nodePorts.client Node port for clients + ## @param service.nodePorts.tls Node port for TLS + ## + nodePorts: + client: "" + tls: "" + ## @param service.disableBaseClientPort Remove client port from service definitions. + ## + disableBaseClientPort: false + ## @param service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/user-guide/services/ + ## + sessionAffinity: None + ## @param service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## @param service.clusterIP ZooKeeper service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## @param service.loadBalancerIP ZooKeeper service Load Balancer IP + ## ref: https://kubernetes.io/docs/user-guide/services/#type-loadbalancer + ## + loadBalancerIP: "" + ## @param service.loadBalancerSourceRanges ZooKeeper service Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param service.externalTrafficPolicy ZooKeeper service external traffic policy + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param service.annotations Additional custom annotations for ZooKeeper service + ## + annotations: {} + ## @param service.extraPorts Extra ports to expose in the ZooKeeper service (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param service.headless.annotations Annotations for the Headless Service + ## @param service.headless.publishNotReadyAddresses If the ZooKeeper headless service should publish DNS records for not ready pods + ## @param service.headless.servicenameOverride String to partially override headless service name + ## + headless: + publishNotReadyAddresses: true + annotations: {} + servicenameOverride: "" +## Network policies +## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## @param networkPolicy.enabled Specifies whether a NetworkPolicy should be created + ## + enabled: false + ## @param networkPolicy.allowExternal Don't require client label for connections + ## When set to false, only pods with the correct client label will have network access to the port Redis® is + ## listening on. When true, zookeeper accept connections from any source (with the correct destination port). + ## + allowExternal: true + +## @section Other Parameters + +## Service account for ZooKeeper to use. +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for ZooKeeper pod + ## + create: false + ## @param serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created + ## Can be set to false if pods using this serviceAccount do not need to use K8s API + ## + automountServiceAccountToken: true + ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} + +## @section Persistence parameters + +## Enable persistence using Persistent Volume Claims +## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + ## @param persistence.enabled Enable ZooKeeper data persistence using PVC. If false, use emptyDir + ## + enabled: true + ## @param persistence.existingClaim Name of an existing PVC to use (only when deploying a single replica) + ## + existingClaim: "" + ## @param persistence.storageClass PVC Storage Class for ZooKeeper data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param persistence.accessModes PVC Access modes + ## + accessModes: + - ReadWriteOnce + ## @param persistence.size PVC Storage Request for ZooKeeper data volume + ## + size: 8Gi + ## @param persistence.annotations Annotations for the PVC + ## + annotations: {} + ## @param persistence.labels Labels for the PVC + ## + labels: {} + ## @param persistence.selector Selector to match an existing Persistent Volume for ZooKeeper's data PVC + ## If set, the PVC can't have a PV dynamically provisioned for it + ## E.g. + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## Persistence for a dedicated data log directory + ## + dataLogDir: + ## @param persistence.dataLogDir.size PVC Storage Request for ZooKeeper's dedicated data log directory + ## + size: 8Gi + ## @param persistence.dataLogDir.existingClaim Provide an existing `PersistentVolumeClaim` for ZooKeeper's data log directory + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template + ## + existingClaim: "" + ## @param persistence.dataLogDir.selector Selector to match an existing Persistent Volume for ZooKeeper's data log PVC + ## If set, the PVC can't have a PV dynamically provisioned for it + ## E.g. + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + +## @section Volume Permissions parameters +## + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each node +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume + ## + enabled: false + ## @param volumePermissions.image.registry Init container volume-permissions image registry + ## @param volumePermissions.image.repository Init container volume-permissions image repository + ## @param volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended) + ## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy + ## @param volumePermissions.image.pullSecrets Init container volume-permissions image pull secrets + ## + image: + registry: docker.io + repository: bitnami/os-shell + tag: 11-debian-11-r51 + digest: "" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init container resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param volumePermissions.resources.limits Init container volume-permissions resource limits + ## @param volumePermissions.resources.requests Init container volume-permissions resource requests + ## + resources: + limits: {} + requests: {} + ## Init container' Security Context + ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser + ## and not the below volumePermissions.containerSecurityContext.runAsUser + ## @param volumePermissions.containerSecurityContext.enabled Enabled init container Security Context + ## @param volumePermissions.containerSecurityContext.runAsUser User ID for the init container + ## + containerSecurityContext: + enabled: true + runAsUser: 0 + +## @section Metrics parameters +## + +## ZooKeeper Prometheus Exporter configuration +## +metrics: + ## @param metrics.enabled Enable Prometheus to access ZooKeeper metrics endpoint + ## + enabled: false + ## @param metrics.containerPort ZooKeeper Prometheus Exporter container port + ## + containerPort: 9141 + ## Service configuration + ## + service: + ## @param metrics.service.type ZooKeeper Prometheus Exporter service type + ## + type: ClusterIP + ## @param metrics.service.port ZooKeeper Prometheus Exporter service port + ## + port: 9141 + ## @param metrics.service.annotations [object] Annotations for Prometheus to auto-discover the metrics endpoint + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.service.port }}" + prometheus.io/path: "/metrics" + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using Prometheus Operator + ## + enabled: false + ## @param metrics.serviceMonitor.namespace Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) + ## + namespace: "" + ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped. + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + interval: "" + ## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.additionalLabels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus + ## + additionalLabels: {} + ## @param metrics.serviceMonitor.selector Prometheus instance selector labels + ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration + ## + selector: {} + ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping + ## + relabelings: [] + ## @param metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion + ## + metricRelabelings: [] + ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. + ## + jobLabel: "" + ## Prometheus Operator PrometheusRule configuration + ## + prometheusRule: + ## @param metrics.prometheusRule.enabled Create a PrometheusRule for Prometheus Operator + ## + enabled: false + ## @param metrics.prometheusRule.namespace Namespace for the PrometheusRule Resource (defaults to the Release Namespace) + ## + namespace: "" + ## @param metrics.prometheusRule.additionalLabels Additional labels that can be used so PrometheusRule will be discovered by Prometheus + ## + additionalLabels: {} + ## @param metrics.prometheusRule.rules PrometheusRule definitions + ## - alert: ZooKeeperSyncedFollowers + ## annotations: + ## message: The number of synced followers for the leader node in ZooKeeper deployment my-release is less than 2. This usually means that some of the ZooKeeper nodes aren't communicating properly. If it doesn't resolve itself you can try killing the pods (one by one). + ## expr: max(synced_followers{service="my-release-metrics"}) < 2 + ## for: 5m + ## labels: + ## severity: critical + ## - alert: ZooKeeperOutstandingRequests + ## annotations: + ## message: The number of outstanding requests for ZooKeeper pod {{ $labels.pod }} is greater than 10. This can indicate a performance issue with the Pod or cluster a whole. + ## expr: outstanding_requests{service="my-release-metrics"} > 10 + ## for: 5m + ## labels: + ## severity: critical + ## + rules: [] + +## @section TLS/SSL parameters +## + +## Enable SSL/TLS encryption +## +tls: + client: + ## @param tls.client.enabled Enable TLS for client connections + ## + enabled: false + ## @param tls.client.auth SSL Client auth. Can be "none", "want" or "need". + ## + auth: "none" + ## @param tls.client.autoGenerated Generate automatically self-signed TLS certificates for ZooKeeper client communications + ## Currently only supports PEM certificates + ## + autoGenerated: false + ## @param tls.client.existingSecret Name of the existing secret containing the TLS certificates for ZooKeeper client communications + ## + existingSecret: "" + ## @param tls.client.existingSecretKeystoreKey The secret key from the tls.client.existingSecret containing the Keystore. + ## + existingSecretKeystoreKey: "" + ## @param tls.client.existingSecretTruststoreKey The secret key from the tls.client.existingSecret containing the Truststore. + ## + existingSecretTruststoreKey: "" + ## @param tls.client.keystorePath Location of the KeyStore file used for Client connections + ## + keystorePath: /opt/bitnami/zookeeper/config/certs/client/zookeeper.keystore.jks + ## @param tls.client.truststorePath Location of the TrustStore file used for Client connections + ## + truststorePath: /opt/bitnami/zookeeper/config/certs/client/zookeeper.truststore.jks + ## @param tls.client.passwordsSecretName Existing secret containing Keystore and truststore passwords + ## + passwordsSecretName: "" + ## @param tls.client.passwordsSecretKeystoreKey The secret key from the tls.client.passwordsSecretName containing the password for the Keystore. + ## + passwordsSecretKeystoreKey: "" + ## @param tls.client.passwordsSecretTruststoreKey The secret key from the tls.client.passwordsSecretName containing the password for the Truststore. + ## + passwordsSecretTruststoreKey: "" + ## @param tls.client.keystorePassword Password to access KeyStore if needed + ## + keystorePassword: "" + ## @param tls.client.truststorePassword Password to access TrustStore if needed + ## + truststorePassword: "" + quorum: + ## @param tls.quorum.enabled Enable TLS for quorum protocol + ## + enabled: false + ## @param tls.quorum.auth SSL Quorum Client auth. Can be "none", "want" or "need". + ## + auth: "none" + ## @param tls.quorum.autoGenerated Create self-signed TLS certificates. Currently only supports PEM certificates. + ## + autoGenerated: false + ## @param tls.quorum.existingSecret Name of the existing secret containing the TLS certificates for ZooKeeper quorum protocol + ## + existingSecret: "" + ## @param tls.quorum.existingSecretKeystoreKey The secret key from the tls.quorum.existingSecret containing the Keystore. + ## + existingSecretKeystoreKey: "" + ## @param tls.quorum.existingSecretTruststoreKey The secret key from the tls.quorum.existingSecret containing the Truststore. + ## + existingSecretTruststoreKey: "" + ## @param tls.quorum.keystorePath Location of the KeyStore file used for Quorum protocol + ## + keystorePath: /opt/bitnami/zookeeper/config/certs/quorum/zookeeper.keystore.jks + ## @param tls.quorum.truststorePath Location of the TrustStore file used for Quorum protocol + ## + truststorePath: /opt/bitnami/zookeeper/config/certs/quorum/zookeeper.truststore.jks + ## @param tls.quorum.passwordsSecretName Existing secret containing Keystore and truststore passwords + ## + passwordsSecretName: "" + ## @param tls.quorum.passwordsSecretKeystoreKey The secret key from the tls.quorum.passwordsSecretName containing the password for the Keystore. + ## + passwordsSecretKeystoreKey: "" + ## @param tls.quorum.passwordsSecretTruststoreKey The secret key from the tls.quorum.passwordsSecretName containing the password for the Truststore. + ## + passwordsSecretTruststoreKey: "" + ## @param tls.quorum.keystorePassword Password to access KeyStore if needed + ## + keystorePassword: "" + ## @param tls.quorum.truststorePassword Password to access TrustStore if needed + ## + truststorePassword: "" + ## Init container resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param tls.resources.limits The resources limits for the TLS init container + ## @param tls.resources.requests The requested resources for the TLS init container + ## + resources: + limits: {} + requests: {} diff --git a/manifest/helm-charts/infra/kafka/templates/NOTES.txt b/manifest/helm-charts/infra/kafka/templates/NOTES.txt new file mode 100644 index 000000000..baeb77944 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/templates/NOTES.txt @@ -0,0 +1,317 @@ +CHART NAME: {{ .Chart.Name }} +CHART VERSION: {{ .Chart.Version }} +APP VERSION: {{ .Chart.AppVersion }} + +{{- if .Values.diagnosticMode.enabled }} +The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with: + + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }} + +Get the list of pods by executing: + + kubectl get pods --namespace {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }} + +Access the pod you want to debug by executing + + kubectl exec --namespace {{ .Release.Namespace }} -ti -- bash + +In order to replicate the container startup scripts execute this command: + + /opt/bitnami/scripts/kafka/entrypoint.sh /opt/bitnami/scripts/kafka/run.sh + +{{- else }} + +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $fullname := include "common.names.fullname" . }} +{{- $clientPort := int .Values.listeners.client.containerPort }} + +{{- if and (or (eq .Values.service.type "LoadBalancer") .Values.externalAccess.enabled) (eq (upper .Values.listeners.external.protocol) "PLAINTEXT") }} +--------------------------------------------------------------------------------------------- + WARNING + + By specifying "serviceType=LoadBalancer" and not configuring the authentication + you have most likely exposed the Kafka service externally without any + authentication mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP" or + "NodePort". As alternative, you can also configure the Kafka authentication. + +--------------------------------------------------------------------------------------------- +{{- end }} + +** Please be patient while the chart is being deployed ** + +Kafka can be accessed by consumers via port {{ $clientPort }} on the following DNS name from within your cluster: + + {{ $fullname }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }} + +Each Kafka broker can be accessed by producers via port {{ $clientPort }} on the following DNS name(s) from within your cluster: + +{{- $brokerList := list }} +{{- range $i := until (int .Values.controller.replicaCount) }} +{{- $brokerList = append $brokerList (printf "%s-controller-%d.%s-controller-headless.%s.svc.%s:%d" $fullname $i $fullname $releaseNamespace $clusterDomain $clientPort) }} +{{- end }} +{{- range $i := until (int .Values.broker.replicaCount) }} +{{- $brokerList = append $brokerList (printf "%s-broker-%d.%s-broker-headless.%s.svc.%s:%d" $fullname $i $fullname $releaseNamespace $clusterDomain $clientPort) }} +{{- end }} +{{ join "\n" $brokerList | nindent 4 }} +{{- $clientSaslEnabled := regexFind "SASL" (upper .Values.listeners.client.protocol) }} +{{- $clientSslEnabled := regexFind "SSL" (upper .Values.listeners.client.protocol) }} +{{- $clientMTlsEnabled := or (and .Values.listeners.client.sslClientAuth (not (eq .Values.listeners.client.sslClientAuth "none"))) (and (empty .Values.listeners.client.sslClientAuth) (not (eq .Values.tls.sslClientAuth "none"))) }} +{{- if or $clientSaslEnabled $clientSslEnabled }} + +The {{ upper .Values.listeners.client.name }} listener for Kafka client connections from within your cluster have been configured with the following security settings: + {{- if $clientSaslEnabled }} + - SASL authentication + {{- end }} + {{- if $clientSslEnabled }} + - TLS encryption + {{- end }} + {{- if and $clientSslEnabled $clientMTlsEnabled }} + - mTLS authentication + {{- end }} + +To connect a client to your Kafka, you need to create the 'client.properties' configuration files with the content below: + +security.protocol={{ .Values.listeners.client.protocol }} +{{- if $clientSaslEnabled }} +{{- if regexFind "SCRAM-SHA-256" (upper .Values.sasl.enabledMechanisms) }} +sasl.mechanism=SCRAM-SHA-256 +{{- else if regexFind "SCRAM-SHA-512" (upper .Values.sasl.enabledMechanisms) }} +sasl.mechanism=SCRAM-SHA-512 +{{- else if regexFind "PLAIN" (upper .Values.sasl.enabledMechanisms) }} +sasl.mechanism=PLAIN +{{- end }} +{{- $securityModule := ternary "org.apache.kafka.common.security.scram.ScramLoginModule required" "org.apache.kafka.common.security.plain.PlainLoginModule required" (regexMatch "SCRAM" (upper .Values.sasl.enabledMechanisms)) }} +sasl.jaas.config={{ $securityModule }} \ + username="{{ index .Values.sasl.client.users 0 }}" \ + password="$(kubectl get secret {{ $fullname }}-user-passwords --namespace {{ $releaseNamespace }} -o jsonpath='{.data.client-passwords}' | base64 -d | cut -d , -f 1)"; +{{- end }} +{{- if $clientSslEnabled }} +{{- $clientTlsType := upper .Values.tls.type }} +ssl.truststore.type={{ $clientTlsType }} +{{- if eq $clientTlsType "JKS" }} +ssl.truststore.location=/tmp/kafka.truststore.jks +# Uncomment this line if your client truststore is password protected +#ssl.truststore.password= +{{- else if eq $clientTlsType "PEM" }} +ssl.truststore.certificates=-----BEGIN CERTIFICATE----- \ +... \ +-----END CERTIFICATE----- +{{- end }} +{{- if and $clientMTlsEnabled }} +ssl.keystore.type={{ $clientTlsType }} +{{- if eq $clientTlsType "JKS" }} +ssl.keystore.location=/tmp/client.keystore.jks +# Uncomment this line if your client truststore is password protected +#ssl.keystore.password= +{{- else if eq $clientTlsType "PEM" }} +ssl.keystore.certificate.chain=-----BEGIN CERTIFICATE----- \ +... \ +-----END CERTIFICATE----- +ssl.keystore.key=-----BEGIN ENCRYPTED PRIVATE KEY----- \ +... \ +-----END ENCRYPTED PRIVATE KEY----- +{{- end }} +{{- end }} +{{- if eq .Values.tls.endpointIdentificationAlgorithm "" }} +ssl.endpoint.identification.algorithm= +{{- end }} +{{- end }} +{{- end }} + +To create a pod that you can use as a Kafka client run the following commands: + + kubectl run {{ $fullname }}-client --restart='Never' --image {{ template "kafka.image" . }} --namespace {{ $releaseNamespace }} --command -- sleep infinity + {{- if or $clientSaslEnabled $clientSslEnabled }} + kubectl cp --namespace {{ $releaseNamespace }} /path/to/client.properties {{ $fullname }}-client:/tmp/client.properties + {{- end }} + {{- if and $clientSslEnabled (eq (upper .Values.tls.type) "JKS") }} + kubectl cp --namespace {{ $releaseNamespace }} ./kafka.truststore.jks {{ $fullname }}-client:/tmp/kafka.truststore.jks + {{- if $clientMTlsEnabled }} + kubectl cp --namespace {{ $releaseNamespace }} ./client.keystore.jks {{ $fullname }}-client:/tmp/client.keystore.jks + {{- end }} + {{- end }} + kubectl exec --tty -i {{ $fullname }}-client --namespace {{ $releaseNamespace }} -- bash + + PRODUCER: + kafka-console-producer.sh \ + {{- if or $clientSaslEnabled $clientSslEnabled }} + --producer.config /tmp/client.properties \ + {{- end }} + --broker-list {{ join "," $brokerList }} \ + --topic test + + CONSUMER: + kafka-console-consumer.sh \ + {{- if or $clientSaslEnabled $clientSslEnabled }} + --consumer.config /tmp/client.properties \ + {{- end }} + --bootstrap-server {{ $fullname }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ .Values.service.ports.client }} \ + --topic test \ + --from-beginning + +{{- if .Values.externalAccess.enabled }} +{{- if or (not .Values.kraft.enabled) (not .Values.controller.controllerOnly) .Values.externalAccess.controller.forceExpose }} + +{{- if not .Values.kraft.enabled }} +To connect to your Kafka nodes from outside the cluster, follow these instructions: +{{- else if and .Values.controller.controllerOnly .Values.externalAccess.controller.forceExpose }} +To connect to your Kafka controller-only nodes from outside the cluster, follow these instructions: +{{- else }} +To connect to your Kafka controller+broker nodes from outside the cluster, follow these instructions: +{{- end }} + +{{- if eq "NodePort" .Values.externalAccess.controller.service.type }} + {{- if .Values.externalAccess.controller.service.domain }} + Kafka brokers domain: Use your provided hostname to reach Kafka brokers, {{ .Values.externalAccess.controller.service.domain }} + + {{- else }} + Kafka brokers domain: You can get the external node IP from the Kafka configuration file with the following commands (Check the EXTERNAL listener) + + 1. Obtain the pod name: + + kubectl get pods --namespace {{ include "common.names.namespace" . }} -l "app.kubernetes.io/name={{ template "kafka.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=kafka" + + 2. Obtain pod configuration: + + kubectl exec -it KAFKA_POD -- cat /opt/bitnami/kafka/config/server.properties | grep advertised.listeners + + {{- end }} + Kafka brokers port: You will have a different node port for each Kafka broker. You can get the list of configured node ports using the command below: + + echo "$(kubectl get svc --namespace {{ include "common.names.namespace" . }} -l "app.kubernetes.io/name={{ template "kafka.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=kafka,pod" -o jsonpath='{.items[*].spec.ports[0].nodePort}' | tr ' ' '\n')" + +{{- else if eq "LoadBalancer" .Values.externalAccess.controller.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IPs to be available. + + Watch the status with: 'kubectl get svc --namespace {{ include "common.names.namespace" . }} -l "app.kubernetes.io/name={{ template "kafka.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=kafka,pod" -w' + + Kafka Brokers domain: You will have a different external IP for each Kafka broker. You can get the list of external IPs using the command below: + + echo "$(kubectl get svc --namespace {{ include "common.names.namespace" . }} -l "app.kubernetes.io/name={{ template "kafka.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=kafka,pod" -o jsonpath='{.items[*].status.loadBalancer.ingress[0].ip}' | tr ' ' '\n')" + + Kafka Brokers port: {{ .Values.externalAccess.controller.service.ports.external }} + +{{- else if eq "ClusterIP" .Values.externalAccess.controller.service.type }} + Kafka brokers domain: Use your provided hostname to reach Kafka brokers, {{ .Values.externalAccess.controller.service.domain }} + + Kafka brokers port: You will have a different port for each Kafka broker starting at {{ .Values.externalAccess.controller.service.ports.external }} + +{{- end }} +{{- end }} + +{{- $brokerReplicaCount := int .Values.broker.replicaCount -}} +{{- if gt $brokerReplicaCount 0 }} +To connect to your Kafka broker nodes from outside the cluster, follow these instructions: + +{{- if eq "NodePort" .Values.externalAccess.broker.service.type }} + {{- if .Values.externalAccess.broker.service.domain }} + Kafka brokers domain: Use your provided hostname to reach Kafka brokers, {{ .Values.externalAccess.broker.service.domain }} + + {{- else }} + Kafka brokers domain: You can get the external node IP from the Kafka configuration file with the following commands (Check the EXTERNAL listener) + + 1. Obtain the pod name: + + kubectl get pods --namespace {{ include "common.names.namespace" . }} -l "app.kubernetes.io/name={{ template "kafka.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=kafka" + + 2. Obtain pod configuration: + + kubectl exec -it KAFKA_POD -- cat /opt/bitnami/kafka/config/server.properties | grep advertised.listeners + + {{- end }} + Kafka brokers port: You will have a different node port for each Kafka broker. You can get the list of configured node ports using the command below: + + echo "$(kubectl get svc --namespace {{ include "common.names.namespace" . }} -l "app.kubernetes.io/name={{ template "kafka.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=kafka,pod" -o jsonpath='{.items[*].spec.ports[0].nodePort}' | tr ' ' '\n')" + +{{- else if eq "LoadBalancer" .Values.externalAccess.broker.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IPs to be available. + + Watch the status with: 'kubectl get svc --namespace {{ include "common.names.namespace" . }} -l "app.kubernetes.io/name={{ template "kafka.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=kafka,pod" -w' + + Kafka Brokers domain: You will have a different external IP for each Kafka broker. You can get the list of external IPs using the command below: + + echo "$(kubectl get svc --namespace {{ include "common.names.namespace" . }} -l "app.kubernetes.io/name={{ template "kafka.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=kafka,pod" -o jsonpath='{.items[*].status.loadBalancer.ingress[0].ip}' | tr ' ' '\n')" + + Kafka Brokers port: {{ .Values.externalAccess.broker.service.ports.external }} + +{{- else if eq "ClusterIP" .Values.externalAccess.broker.service.type }} + Kafka brokers domain: Use your provided hostname to reach Kafka brokers, {{ .Values.externalAccess.broker.service.domain }} + + Kafka brokers port: You will have a different port for each Kafka broker starting at {{ .Values.externalAccess.broker.service.ports.external }} + +{{- end }} +{{- end }} +{{- if or $clientSaslEnabled $clientSslEnabled }} +{{- $externalSaslEnabled := regexFind "SASL" (upper .Values.listeners.external.protocol) }} +{{- $externalSslEnabled := regexFind "SSL" (upper .Values.listeners.external.protocol) }} +{{- $externalMTlsEnabled := or (and .Values.listeners.external.sslClientAuth (not (eq .Values.listeners.external.sslClientAuth "none"))) (and (empty .Values.listeners.external.sslClientAuth) (not (eq .Values.tls.sslClientAuth "none"))) }} + +The {{ upper .Values.listeners.external.name }} listener for Kafka client connections from within your cluster have been configured with the following settings: + {{- if $externalSaslEnabled }} + - SASL authentication + {{- end }} + {{- if $externalSslEnabled }} + - TLS encryption + {{- end }} + {{- if and $externalSslEnabled $externalMTlsEnabled }} + - mTLS authentication + {{- end }} + +To connect a client to your Kafka, you need to create the 'client.properties' configuration files with the content below: + +security.protocol={{ .Values.listeners.external.protocol }} +{{- if $externalSaslEnabled }} +{{- if regexFind "SCRAM-SHA-256" (upper .Values.sasl.enabledMechanisms) }} +sasl.mechanism=SCRAM-SHA-256 +{{- else if regexFind "SCRAM-SHA-512" (upper .Values.sasl.enabledMechanisms) }} +sasl.mechanism=SCRAM-SHA-512 +{{- else }} +sasl.mechanism=PLAIN +{{- end }} +{{- $securityModule := ternary "org.apache.kafka.common.security.scram.ScramLoginModule required" "org.apache.kafka.common.security.plain.PlainLoginModule required" (regexMatch "SCRAM" (upper .Values.sasl.enabledMechanisms)) }} +sasl.jaas.config={{ $securityModule }} \ + username="{{ index .Values.sasl.client.users 0 }}" \ + password="$(kubectl get secret {{ $fullname }}-user-passwords --namespace {{ $releaseNamespace }} -o jsonpath='{.data.client-passwords}' | base64 -d | cut -d , -f 1)"; +{{- end }} +{{- if $externalSslEnabled }} +{{- $clientTlsType := upper .Values.tls.type }} +ssl.truststore.type={{ $clientTlsType }} +{{- if eq $clientTlsType "JKS" }} +ssl.truststore.location=/tmp/kafka.truststore.jks +# Uncomment this line if your client truststore is password protected +#ssl.truststore.password= +{{- else if eq $clientTlsType "PEM" }} +ssl.truststore.certificates=-----BEGIN CERTIFICATE----- \ +... \ +-----END CERTIFICATE----- +{{- end }} +{{- if and $externalMTlsEnabled }} +ssl.keystore.type={{ $clientTlsType }} +{{- if eq $clientTlsType "JKS" }} +ssl.keystore.location=/tmp/client.keystore.jks +# Uncomment this line if your client truststore is password protected +#ssl.keystore.password= +{{- else if eq $clientTlsType "PEM" }} +ssl.keystore.certificate.chain=-----BEGIN CERTIFICATE----- \ +... \ +-----END CERTIFICATE----- +ssl.keystore.key=-----BEGIN ENCRYPTED PRIVATE KEY----- \ +... \ +-----END ENCRYPTED PRIVATE KEY----- +{{- end }} +{{- end }} +{{- if eq .Values.tls.endpointIdentificationAlgorithm "" }} +ssl.endpoint.identification.algorithm= +{{- end }} +{{- end }} + +{{- end }} +{{- end }} +{{- end }} + +{{- include "kafka.checkRollingTags" . }} +{{- include "kafka.validateValues" . }} diff --git a/manifest/helm-charts/infra/kafka/templates/_helpers.tpl b/manifest/helm-charts/infra/kafka/templates/_helpers.tpl new file mode 100644 index 000000000..a5a83c675 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/templates/_helpers.tpl @@ -0,0 +1,1162 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Expand the name of the chart. +*/}} +{{- define "kafka.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified zookeeper name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "kafka.zookeeper.fullname" -}} +{{- if .Values.zookeeper.fullnameOverride -}} +{{- .Values.zookeeper.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default "zookeeper" .Values.zookeeper.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* + Create the name of the service account to use + */}} +{{- define "kafka.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "common.names.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Kafka image name +*/}} +{{- define "kafka.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container auto-discovery image) +*/}} +{{- define "kafka.externalAccess.autoDiscovery.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.externalAccess.autoDiscovery.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "kafka.volumePermissions.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Kafka exporter image name +*/}} +{{- define "kafka.metrics.kafka.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.metrics.kafka.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper JMX exporter image name +*/}} +{{- define "kafka.metrics.jmx.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.metrics.jmx.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "kafka.imagePullSecrets" -}} +{{ include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.externalAccess.autoDiscovery.image .Values.volumePermissions.image .Values.metrics.kafka.image .Values.metrics.jmx.image) "global" .Values.global) }} +{{- end -}} + +{{/* +Create a default fully qualified Kafka exporter name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "kafka.metrics.kafka.fullname" -}} + {{- printf "%s-exporter" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} +{{- end -}} + +{{/* + Create the name of the service account to use for Kafka exporter pods + */}} +{{- define "kafka.metrics.kafka.serviceAccountName" -}} +{{- if .Values.metrics.kafka.serviceAccount.create -}} + {{ default (include "kafka.metrics.kafka.fullname" .) .Values.metrics.kafka.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.metrics.kafka.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return true if encryption via TLS for client connections should be configured +*/}} +{{- define "kafka.sslEnabled" -}} +{{- $res := "" -}} +{{- $listeners := list .Values.listeners.client .Values.listeners.interbroker -}} +{{- range $i := .Values.listeners.extraListeners -}} +{{- $listeners = append $listeners $i -}} +{{- end -}} +{{- if and .Values.externalAccess.enabled -}} +{{- $listeners = append $listeners .Values.listeners.external -}} +{{- end -}} +{{- if and .Values.kraft.enabled -}} +{{- $listeners = append $listeners .Values.listeners.controller -}} +{{- end -}} +{{- range $listener := $listeners -}} +{{- if regexFind "SSL" (upper $listener.protocol) -}} +{{- $res = "true" -}} +{{- end -}} +{{- end -}} +{{- if $res -}} +{{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if SASL connections should be configured +*/}} +{{- define "kafka.saslEnabled" -}} +{{- $res := "" -}} +{{- if (include "kafka.client.saslEnabled" .) -}} +{{- $res = "true" -}} +{{- else -}} +{{- $listeners := list .Values.listeners.interbroker -}} +{{- if and .Values.kraft.enabled -}} +{{- $listeners = append $listeners .Values.listeners.controller -}} +{{- end -}} +{{- range $listener := $listeners -}} +{{- if regexFind "SASL" (upper $listener.protocol) -}} +{{- $res = "true" -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{- if $res -}} +{{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if SASL connections should be configured +*/}} +{{- define "kafka.client.saslEnabled" -}} +{{- $res := "" -}} +{{- $listeners := list .Values.listeners.client -}} +{{- range $i := .Values.listeners.extraListeners -}} +{{- $listeners = append $listeners $i -}} +{{- end -}} +{{- if and .Values.externalAccess.enabled -}} +{{- $listeners = append $listeners .Values.listeners.external -}} +{{- end -}} +{{- range $listener := $listeners -}} +{{- if regexFind "SASL" (upper $listener.protocol) -}} +{{- $res = "true" -}} +{{- end -}} +{{- end -}} +{{- if $res -}} +{{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the Kafka SASL credentials secret +*/}} +{{- define "kafka.saslSecretName" -}} +{{- if .Values.sasl.existingSecret -}} + {{- include "common.tplvalues.render" (dict "value" .Values.sasl.existingSecret "context" $) -}} +{{- else -}} + {{- printf "%s-user-passwords" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a SASL credentials secret object should be created +*/}} +{{- define "kafka.createSaslSecret" -}} +{{- $secretName := .Values.sasl.existingSecret -}} +{{- if and (or (include "kafka.saslEnabled" .) (or .Values.zookeeper.auth.client.enabled .Values.sasl.zookeeper.user)) (empty $secretName) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a TLS credentials secret object should be created +*/}} +{{- define "kafka.tlsSecretName" -}} +{{- if .Values.tls.existingSecret -}} + {{- include "common.tplvalues.render" (dict "value" .Values.tls.existingSecret "context" $) -}} +{{- else -}} + {{- printf "%s-tls" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a TLS credentials secret object should be created +*/}} +{{- define "kafka.createTlsSecret" -}} +{{- if and (include "kafka.sslEnabled" .) (empty .Values.tls.existingSecret) .Values.tls.autoGenerated -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the Kafka TLS credentials secret +*/}} +{{- define "kafka.tlsPasswordsSecretName" -}} +{{- if .Values.tls.passwordsSecret -}} + {{- include "common.tplvalues.render" (dict "value" .Values.tls.passwordsSecret "context" $) -}} +{{- else -}} + {{- printf "%s-tls-passwords" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a TLS credentials secret object should be created +*/}} +{{- define "kafka.createTlsPasswordsSecret" -}} +{{- $secretName := .Values.tls.passwordsSecret -}} +{{- if and (include "kafka.sslEnabled" .) (or (empty $secretName) .Values.tls.autoGenerated ) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the Kafka TLS credentials secret +*/}} +{{- define "kafka.zookeeper.tlsPasswordsSecretName" -}} +{{- if .Values.tls.zookeeper.passwordsSecret -}} + {{- include "common.tplvalues.render" (dict "value" .Values.tls.zookeeper.passwordsSecret "context" $) -}} +{{- else -}} + {{- printf "%s-zookeeper-tls-passwords" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a TLS credentials secret object should be created +*/}} +{{- define "kafka.zookeeper.createTlsPasswordsSecret" -}} +{{- $secretName := .Values.tls.zookeeper.passwordsSecret -}} +{{- if and .Values.tls.zookeeper.enabled (or (empty $secretName) .Values.tls.zookeeper.keystorePassword .Values.tls.zookeeper.truststorePassword ) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Returns the secret name for the Kafka Provisioning client +*/}} +{{- define "kafka.client.passwordsSecretName" -}} +{{- if .Values.provisioning.auth.tls.passwordsSecret -}} + {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.auth.tls.passwordsSecret "context" $) -}} +{{- else -}} + {{- printf "%s-client-secret" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use for the Kafka Provisioning client +*/}} +{{- define "kafka.provisioning.serviceAccountName" -}} +{{- if .Values.provisioning.serviceAccount.create -}} + {{ default (include "common.names.fullname" .) .Values.provisioning.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.provisioning.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return the Kafka controller-eligible configuration configmap +*/}} +{{- define "kafka.controller.configmapName" -}} +{{- if .Values.controller.existingConfigmap -}} + {{- include "common.tplvalues.render" (dict "value" .Values.controller.existingConfigmap "context" $) -}} +{{- else if .Values.existingConfigmap -}} + {{- include "common.tplvalues.render" (dict "value" .Values.existingConfigmap "context" $) -}} +{{- else -}} + {{- printf "%s-controller-configuration" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return the Kafka controller-eligible secret configuration +*/}} +{{- define "kafka.controller.secretConfigName" -}} +{{- if .Values.controller.existingSecretConfig -}} + {{- include "common.tplvalues.render" (dict "value" .Values.controller.existingSecretConfig "context" $) -}} +{{- else if .Values.existingSecretConfig -}} + {{- include "common.tplvalues.render" (dict "value" .Values.existingSecretConfig "context" $) -}} +{{- else -}} + {{- printf "%s-controller-secret-configuration" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return the Kafka controller-eligible secret configuration values +*/}} +{{- define "kafka.controller.secretConfig" -}} +{{- if .Values.secretConfig }} +{{- include "common.tplvalues.render" ( dict "value" .Values.secretConfig "context" $ ) }} +{{- end }} +{{- if .Values.controller.secretConfig }} +{{- include "common.tplvalues.render" ( dict "value" .Values.controller.secretConfig "context" $ ) }} +{{- end }} +{{- end -}} + +{{/* +Return true if a configmap object should be created for controller-eligible pods +*/}} +{{- define "kafka.controller.createConfigmap" -}} +{{- if and (not .Values.controller.existingConfigmap) (not .Values.existingConfigmap) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret object with config should be created for controller-eligible pods +*/}} +{{- define "kafka.controller.createSecretConfig" -}} +{{- if and (or .Values.controller.secretConfig .Values.secretConfig) (and (not .Values.controller.existingSecretConfig) (not .Values.existingSecretConfig)) }} + {{- true -}} +{{- end -}} +{{- end -}} +{{/* +Return true if a secret object with config exists for controller-eligible pods +*/}} +{{- define "kafka.controller.secretConfigExists" -}} +{{- if or .Values.controller.secretConfig .Values.secretConfig .Values.controller.existingSecretConfig .Values.existingSecretConfig }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the Kafka broker configuration configmap +*/}} +{{- define "kafka.broker.configmapName" -}} +{{- if .Values.broker.existingConfigmap -}} + {{- printf "%s" (tpl .Values.broker.existingConfigmap $) -}} +{{- else if .Values.existingConfigmap -}} + {{- printf "%s" (tpl .Values.existingConfigmap $) -}} +{{- else -}} + {{- printf "%s-broker-configuration" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return the Kafka broker secret configuration +*/}} +{{- define "kafka.broker.secretConfigName" -}} +{{- if .Values.broker.existingSecretConfig -}} + {{- include "common.tplvalues.render" (dict "value" .Values.broker.existingSecretConfig "context" $) -}} +{{- else if .Values.existingSecretConfig -}} + {{- include "common.tplvalues.render" (dict "value" .Values.existingSecretConfig "context" $) -}} +{{- else -}} + {{- printf "%s-broker-secret-configuration" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return the Kafka broker secret configuration values +*/}} +{{- define "kafka.broker.secretConfig" -}} +{{- if .Values.secretConfig }} +{{- include "common.tplvalues.render" ( dict "value" .Values.secretConfig "context" $ ) }} +{{- end }} +{{- if .Values.broker.secretConfig }} +{{- include "common.tplvalues.render" ( dict "value" .Values.broker.secretConfig "context" $ ) }} +{{- end }} +{{- end -}} + +{{/* +Return true if a configmap object should be created for broker pods +*/}} +{{- define "kafka.broker.createConfigmap" -}} +{{- if and (not .Values.broker.existingConfigmap) (not .Values.existingConfigmap) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret object with config should be created for broker pods +*/}} +{{- define "kafka.broker.createSecretConfig" -}} +{{- if and (or .Values.broker.secretConfig .Values.secretConfig) (and (not .Values.broker.existingSecretConfig) (not .Values.existingSecretConfig)) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret object with config exists for broker pods +*/}} +{{- define "kafka.broker.secretConfigExists" -}} +{{- if or .Values.broker.secretConfig .Values.secretConfig .Values.broker.existingSecretConfig .Values.existingSecretConfig }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the Kafka log4j ConfigMap name. +*/}} +{{- define "kafka.log4j.configMapName" -}} +{{- if .Values.existingLog4jConfigMap -}} + {{- include "common.tplvalues.render" (dict "value" .Values.existingLog4jConfigMap "context" $) -}} +{{- else -}} + {{- printf "%s-log4j-configuration" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return the SASL mechanism to use for the Kafka exporter to access Kafka +The exporter uses a different nomenclature so we need to do this hack +*/}} +{{- define "kafka.metrics.kafka.saslMechanism" -}} +{{- $saslMechanisms := .Values.sasl.enabledMechanisms }} +{{- if contains "SCRAM-SHA-512" (upper $saslMechanisms) }} + {{- print "scram-sha512" -}} +{{- else if contains "SCRAM-SHA-256" (upper $saslMechanisms) }} + {{- print "scram-sha256" -}} +{{- else if contains "PLAIN" (upper $saslMechanisms) }} + {{- print "plain" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the Kafka configuration configmap +*/}} +{{- define "kafka.metrics.jmx.configmapName" -}} +{{- if .Values.metrics.jmx.existingConfigmap -}} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.jmx.existingConfigmap "context" $) -}} +{{- else -}} + {{ printf "%s-jmx-configuration" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap object should be created +*/}} +{{- define "kafka.metrics.jmx.createConfigmap" -}} +{{- if and .Values.metrics.jmx.enabled .Values.metrics.jmx.config (not .Values.metrics.jmx.existingConfigmap) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Returns the Kafka listeners settings based on the listeners.* object +*/}} +{{- define "kafka.listeners" -}} +{{- if .context.Values.listeners.overrideListeners -}} + {{- printf "%s" .context.Values.listeners.overrideListeners -}} +{{- else -}} + {{- $listeners := list .context.Values.listeners.client .context.Values.listeners.interbroker -}} + {{- if and .context.Values.externalAccess.enabled -}} + {{- $listeners = append $listeners .context.Values.listeners.external -}} + {{- end -}} + {{- if and .context.Values.kraft.enabled .isController -}} + {{- if and .context.Values.controller.controllerOnly -}} + {{- $listeners = list .context.Values.listeners.controller -}} + {{- else -}} + {{- $listeners = append $listeners .context.Values.listeners.controller -}} + {{- end -}} + {{- end -}} + {{- $res := list -}} + {{- range $listener := $listeners -}} + {{- $res = append $res (printf "%s://:%d" (upper $listener.name) (int $listener.containerPort)) -}} + {{- end -}} + {{- printf "%s" (join "," $res) -}} +{{- end -}} +{{- end -}} + +{{/* +Returns the list of advertised listeners, although the advertised address will be replaced during each node init time +*/}} +{{- define "kafka.advertisedListeners" -}} +{{- if .Values.listeners.advertisedListeners -}} + {{- printf "%s" .Values.listeners.advertisedListeners -}} +{{- else -}} + {{- $listeners := list .Values.listeners.client .Values.listeners.interbroker -}} + {{- range $i := .Values.listeners.extraListeners -}} + {{- $listeners = append $listeners $i -}} + {{- end -}} + {{- $res := list -}} + {{- range $listener := $listeners -}} + {{- $res = append $res (printf "%s://advertised-address-placeholder:%d" (upper $listener.name) (int $listener.containerPort)) -}} + {{- end -}} + {{- printf "%s" (join "," $res) -}} +{{- end -}} +{{- end -}} + +{{/* +Returns the value listener.security.protocol.map based on the values of 'listeners.*.protocol' +*/}} +{{- define "kafka.securityProtocolMap" -}} +{{- if .Values.listeners.securityProtocolMap -}} + {{- printf "%s" .Values.listeners.securityProtocolMap -}} +{{- else -}} + {{- $listeners := list .Values.listeners.client .Values.listeners.interbroker -}} + {{- range $i := .Values.listeners.extraListeners -}} + {{- $listeners = append $listeners $i -}} + {{- end -}} + {{- if .Values.kraft.enabled -}} + {{- $listeners = append $listeners .Values.listeners.controller -}} + {{- end -}} + {{- if and .Values.externalAccess.enabled -}} + {{- $listeners = append $listeners .Values.listeners.external -}} + {{- end -}} + {{- $res := list -}} + {{- range $listener := $listeners -}} + {{- $res = append $res (printf "%s:%s" (upper $listener.name) (upper $listener.protocol)) -}} + {{- end -}} + {{ printf "%s" (join "," $res)}} +{{- end -}} +{{- end -}} + +{{/* +Returns the containerPorts for listeneres.extraListeners +*/}} +{{- define "kafka.extraListeners.containerPorts" -}} +{{- range $listener := .Values.listeners.extraListeners -}} +- name: {{ lower $listener.name}} + containerPort: {{ $listener.containerPort }} +{{- end -}} +{{- end -}} + +{{/* +Returns the zookeeper.connect setting value +*/}} +{{- define "kafka.zookeeperConnect" -}} +{{- if .Values.zookeeper.enabled -}} +{{- printf "%s:%s%s" (include "kafka.zookeeper.fullname" .) (ternary "3181" "2181" .Values.tls.zookeeper.enabled) (tpl .Values.zookeeperChrootPath .) -}} +{{- else -}} +{{- printf "%s%s" (join "," .Values.externalZookeeper.servers) (tpl .Values.zookeeperChrootPath .) -}} +{{- end -}} +{{- end -}} + +{{/* +Returns the controller quorum voters based on the number of controller-eligible nodes +*/}} +{{- define "kafka.kraft.controllerQuorumVoters" -}} +{{- if .Values.kraft.controllerQuorumVoters -}} + {{- include "common.tplvalues.render" (dict "value" .Values.kraft.controllerQuorumVoters "context" $) -}} +{{- else -}} + {{- $controllerVoters := list -}} + {{- $fullname := include "common.names.fullname" . -}} + {{- $releaseNamespace := include "common.names.namespace" . -}} + {{- range $i := until (int .Values.controller.replicaCount) -}} + {{- $nodeId := add (int $i) (int $.Values.controller.minId) -}} + {{- $nodeAddress := printf "%s-controller-%d.%s-controller-headless.%s.svc.%s:%d" $fullname (int $i) $fullname $releaseNamespace $.Values.clusterDomain (int $.Values.listeners.controller.containerPort) -}} + {{- $controllerVoters = append $controllerVoters (printf "%d@%s" $nodeId $nodeAddress ) -}} + {{- end -}} + {{- join "," $controllerVoters -}} +{{- end -}} +{{- end -}} + +{{/* +Section of the server.properties configmap shared by both controller-eligible and broker nodes +*/}} +{{- define "kafka.commonConfig" -}} +log.dir={{ printf "%s/data" .Values.controller.persistence.mountPath }} +{{- if or (include "kafka.saslEnabled" .) }} +sasl.enabled.mechanisms={{ upper .Values.sasl.enabledMechanisms }} +{{- end }} +# Interbroker configuration +inter.broker.listener.name={{ .Values.listeners.interbroker.name }} +{{- if regexFind "SASL" (upper .Values.listeners.interbroker.protocol) }} +sasl.mechanism.inter.broker.protocol={{ upper .Values.sasl.interBrokerMechanism }} +{{- end }} +{{- if (include "kafka.sslEnabled" .) }} +# TLS configuration +ssl.keystore.type=JKS +ssl.truststore.type=JKS +ssl.keystore.location=/opt/bitnami/kafka/config/certs/kafka.keystore.jks +ssl.truststore.location=/opt/bitnami/kafka/config/certs/kafka.truststore.jks +#ssl.keystore.password= +#ssl.truststore.password= +#ssl.key.password= +ssl.client.auth={{ .Values.tls.sslClientAuth }} +ssl.endpoint.identification.algorithm={{ .Values.tls.endpointIdentificationAlgorithm }} +{{- end }} +{{- if (include "kafka.saslEnabled" .) }} +# Listeners SASL JAAS configuration +{{- $listeners := list .Values.listeners.client .Values.listeners.interbroker }} +{{- range $i := .Values.listeners.extraListeners }} +{{- $listeners = append $listeners $i }} +{{- end }} +{{- if .Values.externalAccess.enabled }} +{{- $listeners = append $listeners .Values.listeners.external }} +{{- end }} +{{- range $listener := $listeners }} +{{- if and $listener.sslClientAuth (regexFind "SSL" (upper $listener.protocol)) }} +listener.name.{{lower $listener.name}}.ssl.client.auth={{ $listener.sslClientAuth }} +{{- end }} +{{- if regexFind "SASL" (upper $listener.protocol) }} +{{- range $mechanism := ( splitList "," $.Values.sasl.enabledMechanisms )}} + {{- $securityModule := ternary "org.apache.kafka.common.security.plain.PlainLoginModule required" "org.apache.kafka.common.security.scram.ScramLoginModule required" (eq "PLAIN" (upper $mechanism)) }} + {{- $saslJaasConfig := list $securityModule }} + {{- if eq $listener.name $.Values.listeners.interbroker.name }} + {{- $saslJaasConfig = append $saslJaasConfig (printf "username=\"%s\"" $.Values.sasl.interbroker.user) }} + {{- $saslJaasConfig = append $saslJaasConfig (print "password=\"interbroker-password-placeholder\"") }} + {{- end }} + {{- if eq (upper $mechanism) "PLAIN" }} + {{- if eq $listener.name $.Values.listeners.interbroker.name }} + {{- $saslJaasConfig = append $saslJaasConfig (printf "user_%s=\"interbroker-password-placeholder\"" $.Values.sasl.interbroker.user) }} + {{- end }} + {{- range $i, $user := $.Values.sasl.client.users }} + {{- $saslJaasConfig = append $saslJaasConfig (printf "user_%s=\"password-placeholder-%d\"" $user (int $i)) }} + {{- end }} + {{- end }} +listener.name.{{lower $listener.name}}.{{lower $mechanism}}.sasl.jaas.config={{ join " " $saslJaasConfig }}; +{{- end }} +{{- end }} +{{- end }} +# End of SASL JAAS configuration +{{- end }} +{{- end -}} + +{{/* +Zookeeper connection section of the server.properties +*/}} +{{- define "kafka.zookeeperConfig" -}} +zookeeper.connect={{ include "kafka.zookeeperConnect" . }} +#broker.id= +{{- if .Values.sasl.zookeeper.user }} +sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ + username="{{ .Values.sasl.zookeeper.user }}" \ + password="zookeeper-password-placeholder"; +{{- end }} +{{- if and .Values.tls.zookeeper.enabled .Values.tls.zookeeper.existingSecret }} +zookeeper.clientCnxnSocket=org.apache.zookeeper.ClientCnxnSocketNetty +zookeeper.ssl.client.enable=true +zookeeper.ssl.keystore.location=/opt/bitnami/kafka/config/certs/zookeeper.keystore.jks +zookeeper.ssl.truststore.location=/opt/bitnami/kafka/config/certs/zookeeper.truststore.jks +zookeeper.ssl.hostnameVerification={{ .Values.tls.zookeeper.verifyHostname }} +#zookeeper.ssl.keystore.password= +#zookeeper.ssl.truststore.password= +{{- end }} +{{- end -}} + +{{/* +Kraft section of the server.properties +*/}} +{{- define "kafka.kraftConfig" -}} +#node.id= +controller.listener.names={{ .Values.listeners.controller.name }} +controller.quorum.voters={{ include "kafka.kraft.controllerQuorumVoters" . }} +{{- $listener := $.Values.listeners.controller }} +{{- if and $listener.sslClientAuth (regexFind "SSL" (upper $listener.protocol)) }} +# Kraft Controller listener SSL settings +listener.name.{{lower $listener.name}}.ssl.client.auth={{ $listener.sslClientAuth }} +{{- end }} +{{- if regexFind "SASL" (upper $listener.protocol) }} + {{- $mechanism := $.Values.sasl.controllerMechanism }} + {{- $securityModule := ternary "org.apache.kafka.common.security.plain.PlainLoginModule required" "org.apache.kafka.common.security.scram.ScramLoginModule required" (eq "PLAIN" (upper $mechanism)) }} + {{- $saslJaasConfig := list $securityModule }} + {{- $saslJaasConfig = append $saslJaasConfig (printf "username=\"%s\"" $.Values.sasl.controller.user) }} + {{- $saslJaasConfig = append $saslJaasConfig (print "password=\"controller-password-placeholder\"") }} + {{- if eq (upper $mechanism) "PLAIN" }} + {{- $saslJaasConfig = append $saslJaasConfig (printf "user_%s=\"controller-password-placeholder\"" $.Values.sasl.controller.user) }} + {{- end }} +# Kraft Controller listener SASL settings +sasl.mechanism.controller.protocol={{ upper $mechanism }} +listener.name.{{lower $listener.name}}.sasl.enabled.mechanisms={{ upper $mechanism }} +listener.name.{{lower $listener.name}}.{{lower $mechanism }}.sasl.jaas.config={{ join " " $saslJaasConfig }}; +{{- end }} +{{- end -}} + +{{/* +Init container definition for Kafka initialization +*/}} +{{- define "kafka.prepareKafkaInitContainer" -}} +{{- $role := .role -}} +{{- $roleSettings := index .context.Values .role -}} +- name: kafka-init + image: {{ include "kafka.image" .context }} + imagePullPolicy: {{ .context.Values.image.pullPolicy }} + {{- if $roleSettings.containerSecurityContext.enabled }} + securityContext: {{- omit $roleSettings.containerSecurityContext "enabled" | toYaml | nindent 4 }} + {{- end }} + command: + - /bin/bash + args: + - -ec + - | + /scripts/kafka-init.sh + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .context.Values.image.debug .context.Values.diagnosticMode.enabled) | quote }} + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: KAFKA_VOLUME_DIR + value: {{ $roleSettings.persistence.mountPath | quote }} + - name: KAFKA_MIN_ID + value: {{ $roleSettings.minId | quote }} + {{- if or (and (eq .role "broker") .context.Values.externalAccess.enabled) (and (eq .role "controller") .context.Values.externalAccess.enabled (or .context.Values.externalAccess.controller.forceExpose (not .context.Values.controller.controllerOnly))) }} + {{- $externalAccess := index .context.Values.externalAccess .role }} + - name: EXTERNAL_ACCESS_ENABLED + value: "true" + {{- if eq $externalAccess.service.type "LoadBalancer" }} + {{- if not .context.Values.externalAccess.autoDiscovery.enabled }} + - name: EXTERNAL_ACCESS_HOSTS_LIST + value: {{ join "," (default $externalAccess.service.loadBalancerIPs $externalAccess.service.loadBalancerNames) | quote }} + {{- end }} + - name: EXTERNAL_ACCESS_PORT + value: {{ $externalAccess.service.ports.external | quote }} + {{- else if eq $externalAccess.service.type "NodePort" }} + {{- if $externalAccess.service.domain }} + - name: EXTERNAL_ACCESS_HOST + value: {{ $externalAccess.service.domain | quote }} + {{- else if and $externalAccess.service.usePodIPs .context.Values.externalAccess.autoDiscovery.enabled }} + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: EXTERNAL_ACCESS_HOST + value: "$(MY_POD_IP)" + {{- else if or $externalAccess.service.useHostIPs .context.Values.externalAccess.autoDiscovery.enabled }} + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: EXTERNAL_ACCESS_HOST + value: "$(HOST_IP)" + {{- else if and $externalAccess.service.externalIPs (not .context.Values.externalAccess.autoDiscovery.enabled) }} + - name: EXTERNAL_ACCESS_HOSTS_LIST + value: {{ join "," $externalAccess.service.externalIPs }} + {{- else }} + - name: EXTERNAL_ACCESS_HOST_USE_PUBLIC_IP + value: "true" + {{- end }} + {{- if not .context.Values.externalAccess.autoDiscovery.enabled }} + {{- if and $externalAccess.service.externalIPs (empty $externalAccess.service.nodePorts)}} + - name: EXTERNAL_ACCESS_PORT + value: {{ $externalAccess.service.ports.external | quote }} + {{- else }} + - name: EXTERNAL_ACCESS_PORTS_LIST + value: {{ join "," $externalAccess.service.nodePorts | quote }} + {{- end }} + {{- end }} + {{- else if eq $externalAccess.service.type "ClusterIP" }} + - name: EXTERNAL_ACCESS_HOST + value: {{ $externalAccess.service.domain | quote }} + - name: EXTERNAL_ACCESS_PORT + value: {{ $externalAccess.service.ports.external | quote}} + - name: EXTERNAL_ACCESS_PORT_AUTOINCREMENT + value: "true" + {{- end }} + {{- end }} + {{- if and (include "kafka.client.saslEnabled" .context ) .context.Values.sasl.client.users }} + - name: KAFKA_CLIENT_USERS + value: {{ join "," .context.Values.sasl.client.users | quote }} + - name: KAFKA_CLIENT_PASSWORDS + valueFrom: + secretKeyRef: + name: {{ include "kafka.saslSecretName" .context }} + key: client-passwords + {{- end }} + {{- if regexFind "SASL" (upper .context.Values.listeners.interbroker.protocol) }} + - name: KAFKA_INTER_BROKER_USER + value: {{ .context.Values.sasl.interbroker.user | quote }} + - name: KAFKA_INTER_BROKER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kafka.saslSecretName" .context }} + key: inter-broker-password + {{- end }} + {{- if and .context.Values.kraft.enabled (regexFind "SASL" (upper .context.Values.listeners.controller.protocol)) }} + - name: KAFKA_CONTROLLER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kafka.saslSecretName" .context }} + key: controller-password + {{- end }} + {{- if (include "kafka.sslEnabled" .context ) }} + - name: KAFKA_TLS_TYPE + value: {{ ternary "PEM" "JKS" (or .context.Values.tls.autoGenerated (eq (upper .context.Values.tls.type) "PEM")) }} + - name: KAFKA_TLS_KEYSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kafka.tlsPasswordsSecretName" .context }} + key: {{ .context.Values.tls.passwordsSecretKeystoreKey | quote }} + - name: KAFKA_TLS_TRUSTSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kafka.tlsPasswordsSecretName" .context }} + key: {{ .context.Values.tls.passwordsSecretTruststoreKey | quote }} + {{- if and (not .context.Values.tls.autoGenerated) (or .context.Values.tls.keyPassword (and .context.Values.tls.passwordsSecret .context.Values.tls.passwordsSecretPemPasswordKey)) }} + - name: KAFKA_TLS_PEM_KEY_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kafka.tlsPasswordsSecretName" .context }} + key: {{ default "key-password" .context.Values.tls.passwordsSecretPemPasswordKey | quote }} + {{- end }} + {{- end }} + {{- if or .context.Values.zookeeper.enabled .context.Values.externalZookeeper.servers }} + {{- if .context.Values.sasl.zookeeper.user }} + - name: KAFKA_ZOOKEEPER_USER + value: {{ .context.Values.sasl.zookeeper.user | quote }} + - name: KAFKA_ZOOKEEPER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kafka.saslSecretName" .context }} + key: zookeeper-password + {{- end }} + {{- if .context.Values.tls.zookeeper.enabled }} + {{- if and .context.Values.tls.zookeeper.passwordsSecretKeystoreKey (or .context.Values.tls.zookeeper.passwordsSecret .context.Values.tls.zookeeper.keystorePassword) }} + - name: KAFKA_ZOOKEEPER_TLS_KEYSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kafka.zookeeper.tlsPasswordsSecretName" .context }} + key: {{ .context.Values.tls.zookeeper.passwordsSecretKeystoreKey | quote }} + {{- end }} + {{- if and .context.Values.tls.zookeeper.passwordsSecretTruststoreKey (or .context.Values.tls.zookeeper.passwordsSecret .context.Values.tls.zookeeper.truststorePassword) }} + - name: KAFKA_ZOOKEEPER_TLS_TRUSTSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kafka.zookeeper.tlsPasswordsSecretName" .context }} + key: {{ .context.Values.tls.zookeeper.passwordsSecretTruststoreKey | quote }} + {{- end }} + {{- end }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/kafka + - name: kafka-config + mountPath: /config + - name: kafka-configmaps + mountPath: /configmaps + - name: kafka-secret-config + mountPath: /secret-config + - name: scripts + mountPath: /scripts + - name: tmp + mountPath: /tmp + {{- if and .context.Values.externalAccess.enabled .context.Values.externalAccess.autoDiscovery.enabled }} + - name: kafka-autodiscovery-shared + mountPath: /shared + {{- end }} + {{- if or (include "kafka.sslEnabled" .context) .context.Values.tls.zookeeper.enabled }} + - name: kafka-shared-certs + mountPath: /certs + {{- if and (include "kafka.sslEnabled" .context) (or .context.Values.tls.existingSecret .context.Values.tls.autoGenerated) }} + - name: kafka-certs + mountPath: /mounted-certs + readOnly: true + {{- end }} + {{- if and .context.Values.tls.zookeeper.enabled .context.Values.tls.zookeeper.existingSecret }} + - name: kafka-zookeeper-cert + mountPath: /zookeeper-certs + readOnly: true + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Init container definition for waiting for Kubernetes autodiscovery +*/}} +{{- define "kafka.autoDiscoveryInitContainer" -}} +{{- $externalAccessService := index .context.Values.externalAccess .role }} +- name: auto-discovery + image: {{ include "kafka.externalAccess.autoDiscovery.image" .context }} + imagePullPolicy: {{ .context.Values.externalAccess.autoDiscovery.image.pullPolicy | quote }} + command: + - /scripts/auto-discovery.sh + env: + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: AUTODISCOVERY_SERVICE_TYPE + value: {{ $externalAccessService.service.type | quote }} + {{- if .context.Values.externalAccess.autoDiscovery.resources }} + resources: {{- toYaml .context.Values.externalAccess.autoDiscovery.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: scripts + mountPath: /scripts/auto-discovery.sh + subPath: auto-discovery.sh + - name: kafka-autodiscovery-shared + mountPath: /shared +{{- end -}} + +{{/* +Check if there are rolling tags in the images +*/}} +{{- define "kafka.checkRollingTags" -}} +{{- include "common.warnings.rollingTag" .Values.image }} +{{- include "common.warnings.rollingTag" .Values.externalAccess.autoDiscovery.image }} +{{- include "common.warnings.rollingTag" .Values.metrics.kafka.image }} +{{- include "common.warnings.rollingTag" .Values.metrics.jmx.image }} +{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "kafka.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "kafka.validateValues.listener.protocols" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.controller.nodePortListLength" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.broker.nodePortListLength" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.controller.externalIPListLength" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.broker.externalIPListLength" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.domainSpecified" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.externalAccessServiceType" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.externalAccessAutoDiscoveryRBAC" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.externalAccessAutoDiscoveryIPsOrNames" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.externalAccessServiceList" (dict "element" "loadBalancerIPs" "context" .)) -}} +{{- $messages := append $messages (include "kafka.validateValues.externalAccessServiceList" (dict "element" "loadBalancerNames" "context" .)) -}} +{{- $messages := append $messages (include "kafka.validateValues.externalAccessServiceList" (dict "element" "loadBalancerAnnotations" "context" . )) -}} +{{- $messages := append $messages (include "kafka.validateValues.saslMechanisms" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.tlsSecret" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.provisioning.tlsPasswords" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.kraftMode" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.kraftMissingControllers" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.zookeeperMissingBrokers" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.zookeeperNoControllers" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.modeEmpty" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - Authentication protocols for Kafka */}} +{{- define "kafka.validateValues.listener.protocols" -}} +{{- $authProtocols := list "PLAINTEXT" "SASL_PLAINTEXT" "SASL_SSL" "SSL" -}} +{{- if not .Values.listeners.securityProtocolMap -}} +{{- $listeners := list .Values.listeners.client .Values.listeners.interbroker -}} +{{- if .Values.kraft.enabled -}} +{{- $listeners = append $listeners .Values.listeners.controller -}} +{{- end -}} +{{- if and .Values.externalAccess.enabled -}} +{{- $listeners = append $listeners .Values.listeners.external -}} +{{- end -}} +{{- $error := false -}} +{{- range $listener := $listeners -}} +{{- if not (has (upper $listener.protocol) $authProtocols) -}} +{{- $error := true -}} +{{- end -}} +{{- end -}} +{{- if $error -}} +kafka: listeners.*.protocol + Available authentication protocols are "PLAINTEXT" "SASL_PLAINTEXT" "SSL" "SASL_SSL" +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - number of controller-eligible replicas must be the same as NodePort list in controller-eligible external service */}} +{{- define "kafka.validateValues.controller.nodePortListLength" -}} +{{- $replicaCount := int .Values.controller.replicaCount -}} +{{- $nodePortListLength := len .Values.externalAccess.controller.service.nodePorts -}} +{{- $nodePortListIsEmpty := empty .Values.externalAccess.controller.service.nodePorts -}} +{{- $nodePortListLengthEqualsReplicaCount := eq $nodePortListLength $replicaCount -}} +{{- $externalIPListIsEmpty := empty .Values.externalAccess.controller.service.externalIPs -}} +{{- if and .Values.externalAccess.enabled (not .Values.externalAccess.autoDiscovery.enabled) (eq .Values.externalAccess.controller.service.type "NodePort") (or (and (not $nodePortListIsEmpty) (not $nodePortListLengthEqualsReplicaCount)) (and $nodePortListIsEmpty $externalIPListIsEmpty)) -}} +kafka: .Values.externalAccess.controller.service.nodePorts + Number of controller-eligible replicas and externalAccess.controller.service.nodePorts array length must be the same. Currently: replicaCount = {{ $replicaCount }} and length nodePorts = {{ $nodePortListLength }} - {{ $externalIPListIsEmpty }} +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - number of broker replicas must be the same as NodePort list in broker external service */}} +{{- define "kafka.validateValues.broker.nodePortListLength" -}} +{{- $replicaCount := int .Values.broker.replicaCount -}} +{{- $nodePortListLength := len .Values.externalAccess.broker.service.nodePorts -}} +{{- $nodePortListIsEmpty := empty .Values.externalAccess.broker.service.nodePorts -}} +{{- $nodePortListLengthEqualsReplicaCount := eq $nodePortListLength $replicaCount -}} +{{- $externalIPListIsEmpty := empty .Values.externalAccess.broker.service.externalIPs -}} +{{- if and .Values.externalAccess.enabled (not .Values.externalAccess.autoDiscovery.enabled) (eq .Values.externalAccess.broker.service.type "NodePort") (or (and (not $nodePortListIsEmpty) (not $nodePortListLengthEqualsReplicaCount)) (and $nodePortListIsEmpty $externalIPListIsEmpty)) -}} +kafka: .Values.externalAccess.broker.service.nodePorts + Number of broker replicas and externalAccess.broker.service.nodePorts array length must be the same. Currently: replicaCount = {{ $replicaCount }} and length nodePorts = {{ $nodePortListLength }} - {{ $externalIPListIsEmpty }} +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - number of replicas must be the same as externalIPs list */}} +{{- define "kafka.validateValues.controller.externalIPListLength" -}} +{{- $replicaCount := int .Values.controller.replicaCount -}} +{{- $externalIPListLength := len .Values.externalAccess.controller.service.externalIPs -}} +{{- $externalIPListIsEmpty := empty .Values.externalAccess.controller.service.externalIPs -}} +{{- $externalIPListEqualsReplicaCount := eq $externalIPListLength $replicaCount -}} +{{- $nodePortListIsEmpty := empty .Values.externalAccess.controller.service.nodePorts -}} +{{- if and .Values.externalAccess.enabled (or .Values.externalAccess.controller.forceExpose (not .Values.controller.controllerOnly)) (not .Values.externalAccess.autoDiscovery.enabled) (eq .Values.externalAccess.controller.service.type "NodePort") (or (and (not $externalIPListIsEmpty) (not $externalIPListEqualsReplicaCount)) (and $externalIPListIsEmpty $nodePortListIsEmpty)) -}} +kafka: .Values.externalAccess.controller.service.externalIPs + Number of controller-eligible replicas and externalAccess.controller.service.externalIPs array length must be the same. Currently: replicaCount = {{ $replicaCount }} and length externalIPs = {{ $externalIPListLength }} +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - number of replicas must be the same as externalIPs list */}} +{{- define "kafka.validateValues.broker.externalIPListLength" -}} +{{- $replicaCount := int .Values.broker.replicaCount -}} +{{- $externalIPListLength := len .Values.externalAccess.broker.service.externalIPs -}} +{{- $externalIPListIsEmpty := empty .Values.externalAccess.broker.service.externalIPs -}} +{{- $externalIPListEqualsReplicaCount := eq $externalIPListLength $replicaCount -}} +{{- $nodePortListIsEmpty := empty .Values.externalAccess.broker.service.nodePorts -}} +{{- if and .Values.externalAccess.enabled (not .Values.externalAccess.autoDiscovery.enabled) (eq .Values.externalAccess.broker.service.type "NodePort") (or (and (not $externalIPListIsEmpty) (not $externalIPListEqualsReplicaCount)) (and $externalIPListIsEmpty $nodePortListIsEmpty)) -}} +kafka: .Values.externalAccess.broker.service.externalIPs + Number of broker replicas and externalAccess.broker.service.externalIPs array length must be the same. Currently: replicaCount = {{ $replicaCount }} and length externalIPs = {{ $externalIPListLength }} +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - domain must be defined if external service type ClusterIP */}} +{{- define "kafka.validateValues.domainSpecified" -}} +{{- if and (eq .Values.externalAccess.controller.service.type "ClusterIP") (empty .Values.externalAccess.controller.service.domain) -}} +kafka: .Values.externalAccess.controller.service.domain + Domain must be specified if service type ClusterIP is set for external service +{{- end -}} +{{- if and (eq .Values.externalAccess.broker.service.type "ClusterIP") (empty .Values.externalAccess.broker.service.domain) -}} +kafka: .Values.externalAccess.broker.service.domain + Domain must be specified if service type ClusterIP is set for external service +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - service type for external access */}} +{{- define "kafka.validateValues.externalAccessServiceType" -}} +{{- if and (not (eq .Values.externalAccess.controller.service.type "NodePort")) (not (eq .Values.externalAccess.controller.service.type "LoadBalancer")) (not (eq .Values.externalAccess.controller.service.type "ClusterIP")) -}} +kafka: externalAccess.controller.service.type + Available service type for external access are NodePort, LoadBalancer or ClusterIP. +{{- end -}} +{{- if and (not (eq .Values.externalAccess.broker.service.type "NodePort")) (not (eq .Values.externalAccess.broker.service.type "LoadBalancer")) (not (eq .Values.externalAccess.broker.service.type "ClusterIP")) -}} +kafka: externalAccess.broker.service.type + Available service type for external access are NodePort, LoadBalancer or ClusterIP. +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - RBAC should be enabled when autoDiscovery is enabled */}} +{{- define "kafka.validateValues.externalAccessAutoDiscoveryRBAC" -}} +{{- if and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled (not .Values.rbac.create ) }} +kafka: rbac.create + By specifying "externalAccess.enabled=true" and "externalAccess.autoDiscovery.enabled=true" + an initContainer will be used to auto-detect the external IPs/ports by querying the + K8s API. Please note this initContainer requires specific RBAC resources. You can create them + by specifying "--set rbac.create=true". +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - LoadBalancerIPs or LoadBalancerNames should be set when autoDiscovery is disabled */}} +{{- define "kafka.validateValues.externalAccessAutoDiscoveryIPsOrNames" -}} +{{- $loadBalancerNameListLength := len .Values.externalAccess.controller.service.loadBalancerNames -}} +{{- $loadBalancerIPListLength := len .Values.externalAccess.controller.service.loadBalancerIPs -}} +{{- if and .Values.externalAccess.enabled (or .Values.externalAccess.controller.forceExpose (not .Values.controller.controllerOnly)) (eq .Values.externalAccess.controller.service.type "LoadBalancer") (not .Values.externalAccess.autoDiscovery.enabled) (eq $loadBalancerNameListLength 0) (eq $loadBalancerIPListLength 0) }} +kafka: externalAccess.controller.service.loadBalancerNames or externalAccess.controller.service.loadBalancerIPs + By specifying "externalAccess.enabled=true", "externalAccess.autoDiscovery.enabled=false" and + "externalAccess.controller.service.type=LoadBalancer" at least one of externalAccess.controller.service.loadBalancerNames + or externalAccess.controller.service.loadBalancerIPs must be set and the length of those arrays must be equal + to the number of replicas. +{{- end -}} +{{- $loadBalancerNameListLength := len .Values.externalAccess.broker.service.loadBalancerNames -}} +{{- $loadBalancerIPListLength := len .Values.externalAccess.broker.service.loadBalancerIPs -}} +{{- $replicaCount := int .Values.broker.replicaCount }} +{{- if and .Values.externalAccess.enabled (gt 0 $replicaCount) (eq .Values.externalAccess.broker.service.type "LoadBalancer") (not .Values.externalAccess.autoDiscovery.enabled) (eq $loadBalancerNameListLength 0) (eq $loadBalancerIPListLength 0) }} +kafka: externalAccess.broker.service.loadBalancerNames or externalAccess.broker.service.loadBalancerIPs + By specifying "externalAccess.enabled=true", "externalAccess.autoDiscovery.enabled=false" and + "externalAccess.broker.service.type=LoadBalancer" at least one of externalAccess.broker.service.loadBalancerNames + or externalAccess.broker.service.loadBalancerIPs must be set and the length of those arrays must be equal + to the number of replicas. +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - number of replicas must be the same as loadBalancerIPs list */}} +{{- define "kafka.validateValues.externalAccessServiceList" -}} +{{- $replicaCount := int .context.Values.controller.replicaCount }} +{{- $listLength := len (get .context.Values.externalAccess.controller.service .element) -}} +{{- if and .context.Values.externalAccess.enabled (or .context.Values.externalAccess.controller.forceExpose (not .context.Values.controller.controllerOnly)) (not .context.Values.externalAccess.autoDiscovery.enabled) (eq .context.Values.externalAccess.controller.service.type "LoadBalancer") (gt $listLength 0) (not (eq $replicaCount $listLength)) }} +kafka: externalAccess.service.{{ .element }} + Number of replicas and {{ .element }} array length must be the same. Currently: replicaCount = {{ $replicaCount }} and {{ .element }} = {{ $listLength }} +{{- end -}} +{{- $replicaCount := int .context.Values.broker.replicaCount }} +{{- $listLength := len (get .context.Values.externalAccess.broker.service .element) -}} +{{- if and .context.Values.externalAccess.enabled (gt 0 $replicaCount) (not .context.Values.externalAccess.autoDiscovery.enabled) (eq .context.Values.externalAccess.broker.service.type "LoadBalancer") (gt $listLength 0) (not (eq $replicaCount $listLength)) }} +kafka: externalAccess.service.{{ .element }} + Number of replicas and {{ .element }} array length must be the same. Currently: replicaCount = {{ $replicaCount }} and {{ .element }} = {{ $listLength }} +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - SASL mechanisms must be provided when using SASL */}} +{{- define "kafka.validateValues.saslMechanisms" -}} +{{- if and (include "kafka.saslEnabled" .) (not .Values.sasl.enabledMechanisms) }} +kafka: sasl.enabledMechanisms + The SASL mechanisms are required when listeners use SASL security protocol. +{{- end }} +{{- if not (contains .Values.sasl.interBrokerMechanism .Values.sasl.enabledMechanisms) }} +kafka: sasl.enabledMechanisms + sasl.interBrokerMechanism must be provided and it should be one of the specified mechanisms at sasl.enabledMechanisms +{{- end -}} +{{- if and .Values.kraft.enabled (not (contains .Values.sasl.controllerMechanism .Values.sasl.enabledMechanisms)) }} +kafka: sasl.enabledMechanisms + sasl.controllerMechanism must be provided and it should be one of the specified mechanisms at sasl.enabledMechanisms +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - Secrets containing TLS certs must be provided when TLS authentication is enabled */}} +{{- define "kafka.validateValues.tlsSecret" -}} +{{- if and (include "kafka.sslEnabled" .) (eq (upper .Values.tls.type) "JKS") (empty .Values.tls.existingSecret) (not .Values.tls.autoGenerated) }} +kafka: tls.existingSecret + A secret containing the Kafka JKS keystores and truststore is required + when TLS encryption in enabled and TLS format is "JKS" +{{- else if and (include "kafka.sslEnabled" .) (eq (upper .Values.tls.type) "PEM") (empty .Values.tls.existingSecret) (not .Values.tls.autoGenerated) }} +kafka: tls.existingSecret + A secret containing the Kafka TLS certificates and keys is required + when TLS encryption in enabled and TLS format is "PEM" +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka provisioning - keyPasswordSecretKey, keystorePasswordSecretKey or truststorePasswordSecretKey must not be used without passwordsSecret */}} +{{- define "kafka.validateValues.provisioning.tlsPasswords" -}} +{{- if and (regexFind "SSL" (upper .Values.listeners.client.protocol)) .Values.provisioning.enabled (not .Values.provisioning.auth.tls.passwordsSecret) }} +{{- if or .Values.provisioning.auth.tls.keyPasswordSecretKey .Values.provisioning.auth.tls.keystorePasswordSecretKey .Values.provisioning.auth.tls.truststorePasswordSecretKey }} +kafka: tls.keyPasswordSecretKey,tls.keystorePasswordSecretKey,tls.truststorePasswordSecretKey + tls.keyPasswordSecretKey,tls.keystorePasswordSecretKey,tls.truststorePasswordSecretKey + must not be used without passwordsSecret setted. +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka Kraft mode. It cannot be used with Zookeeper unless migration is enabled */}} +{{- define "kafka.validateValues.kraftMode" -}} +{{- if and .Values.kraft.enabled (or .Values.zookeeper.enabled .Values.externalZookeeper.servers) (and (not .Values.controller.zookeeperMigrationMode ) (not .Values.broker.zookeeperMigrationMode )) }} +kafka: Simultaneous KRaft and Zookeeper modes + Both Zookeeper and KRaft modes have been configured simultaneously, but migration mode has not been enabled. +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka Kraft mode. At least 1 controller is configured or controller.quorum.voters is set */}} +{{- define "kafka.validateValues.kraftMissingControllers" -}} +{{- if and .Values.kraft.enabled (le (int .Values.controller.replicaCount) 0) (not .Values.kraft.controllerQuorumVoters) }} +kafka: Kraft mode - Missing controller-eligible nodes + Kraft mode has been enabled, but no controller-eligible nodes have been configured +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka Zookeper mode. At least 1 broker is configured */}} +{{- define "kafka.validateValues.zookeeperMissingBrokers" -}} +{{- if and (or .Values.zookeeper.enabled .Values.externalZookeeper.servers) (le (int .Values.broker.replicaCount) 0)}} +kafka: Zookeeper mode - No Kafka brokers configured + Zookeper mode has been enabled, but no Kafka brokers nodes have been configured +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka Zookeper mode. Controller nodes not enabled in Zookeeper mode unless migration enabled */}} +{{- define "kafka.validateValues.zookeeperNoControllers" -}} +{{- if and (or .Values.zookeeper.enabled .Values.externalZookeeper.servers) (gt (int .Values.controller.replicaCount) 0) (and (not .Values.controller.zookeeperMigrationMode ) (not .Values.broker.zookeeperMigrationMode )) }} +kafka: Zookeeper mode - Controller nodes not supported + Controller replicas have been enabled in Zookeeper mode, set controller.replicaCount to zero or enable migration mode to migrate to Kraft mode +{{- end -}} +{{- end -}} + +{{/* Validate either KRaft or Zookeeper mode are enabled */}} +{{- define "kafka.validateValues.modeEmpty" -}} +{{- if and (not .Values.kraft.enabled) (not (or .Values.zookeeper.enabled .Values.externalZookeeper.servers)) }} +kafka: Missing KRaft or Zookeeper mode settings + The Kafka chart has been deployed but neither KRaft or Zookeeper modes have been enabled. + Please configure 'kraft.enabled', 'zookeeper.enabled' or `externalZookeeper.servers` before proceeding. +{{- end -}} +{{- end -}} diff --git a/manifest/helm-charts/infra/kafka/templates/broker/config-secrets.yaml b/manifest/helm-charts/infra/kafka/templates/broker/config-secrets.yaml new file mode 100644 index 000000000..2965e7e17 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/templates/broker/config-secrets.yaml @@ -0,0 +1,25 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- $replicaCount := int .Values.broker.replicaCount }} +{{- if and (include "kafka.broker.createSecretConfig" .) (gt $replicaCount 0) }} +{{- $secretName := printf "%s-broker-secret-configuration" (include "common.names.fullname" .) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secretName }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + server-secret.properties: {{ include "kafka.broker.secretConfig" . | b64enc }} +{{- end }} + diff --git a/manifest/helm-charts/infra/kafka/templates/broker/configmap.yaml b/manifest/helm-charts/infra/kafka/templates/broker/configmap.yaml new file mode 100644 index 000000000..12a231c9f --- /dev/null +++ b/manifest/helm-charts/infra/kafka/templates/broker/configmap.yaml @@ -0,0 +1,47 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- $replicaCount := int .Values.broker.replicaCount }} +{{- if and (include "kafka.broker.createConfigmap" .) (gt $replicaCount 0) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-broker-configuration" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: broker + app.kubernetes.io/part-of: kafka + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + {{- if or .Values.config .Values.broker.config }} + server.properties: {{- include "common.tplvalues.render" ( dict "value" (coalesce .Values.broker.config .Values.config) "context" $ ) | nindent 4 }} + {{- else }} + server.properties: |- + # Listeners configuration + listeners={{ include "kafka.listeners" ( dict "isController" false "context" $ ) }} + listener.security.protocol.map={{ include "kafka.securityProtocolMap" . }} + advertised.listeners={{ include "kafka.advertisedListeners" . }} + {{- if .Values.kraft.enabled }} + {{- if not .Values.broker.zookeeperMigrationMode }} + # KRaft node role + process.roles=broker + {{- end -}} + {{- include "kafka.kraftConfig" . | nindent 4 }} + {{- end }} + {{- if or .Values.zookeeper.enabled .Values.externalZookeeper.servers }} + # Zookeeper configuration + {{- include "kafka.zookeeperConfig" . | nindent 4 }} + {{- if .Values.broker.zookeeperMigrationMode }} + zookeeper.metadata.migration.enable=true + inter.broker.protocol.version={{ default (regexFind "^[0-9].[0-9]+" .Chart.AppVersion) .Values.interBrokerProtocolVersion }} + {{- end }} + {{- end }} + {{- include "kafka.commonConfig" . | nindent 4 }} + {{- include "common.tplvalues.render" ( dict "value" .Values.extraConfig "context" $ ) | nindent 4 }} + {{- include "common.tplvalues.render" ( dict "value" .Values.broker.extraConfig "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/manifest/helm-charts/infra/kafka/templates/broker/pdb.yaml b/manifest/helm-charts/infra/kafka/templates/broker/pdb.yaml new file mode 100644 index 000000000..585ca4f19 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/templates/broker/pdb.yaml @@ -0,0 +1,31 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- $replicaCount := int .Values.broker.replicaCount }} +{{- if and .Values.broker.pdb.create (gt $replicaCount 0) }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ printf "%s-broker" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: broker + app.kubernetes.io/part-of: kafka + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.broker.pdb.minAvailable }} + minAvailable: {{ .Values.broker.pdb.minAvailable }} + {{- end }} + {{- if .Values.broker.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.broker.pdb.maxUnavailable }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.broker.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: broker + app.kubernetes.io/part-of: kafka +{{- end }} diff --git a/manifest/helm-charts/infra/kafka/templates/broker/statefulset.yaml b/manifest/helm-charts/infra/kafka/templates/broker/statefulset.yaml new file mode 100644 index 000000000..399600379 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/templates/broker/statefulset.yaml @@ -0,0 +1,455 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- $replicaCount := int .Values.broker.replicaCount }} +{{- if gt $replicaCount 0 }} +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ printf "%s-broker" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: broker + app.kubernetes.io/part-of: kafka + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podManagementPolicy: {{ .Values.broker.podManagementPolicy }} + replicas: {{ .Values.broker.replicaCount }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.broker.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: broker + app.kubernetes.io/part-of: kafka + serviceName: {{ printf "%s-broker-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + updateStrategy: {{- include "common.tplvalues.render" (dict "value" .Values.broker.updateStrategy "context" $ ) | nindent 4 }} + template: + metadata: + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + app.kubernetes.io/component: broker + app.kubernetes.io/part-of: kafka + annotations: + {{- if (include "kafka.broker.createConfigmap" .) }} + checksum/configuration: {{ include (print $.Template.BasePath "/broker/configmap.yaml") . | sha256sum }} + {{- end }} + {{- if (include "kafka.createSaslSecret" .) }} + checksum/passwords-secret: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + {{- end }} + {{- if (include "kafka.createTlsSecret" .) }} + checksum/tls-secret: {{ include (print $.Template.BasePath "/tls-secret.yaml") . | sha256sum }} + {{- end }} + {{- if (include "kafka.metrics.jmx.createConfigmap" .) }} + checksum/jmx-configuration: {{ include (print $.Template.BasePath "/metrics/jmx-configmap.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.broker.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.broker.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + {{- include "kafka.imagePullSecrets" . | nindent 6 }} + {{- if .Values.broker.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.broker.hostAliases "context" $) | nindent 8 }} + {{- end }} + hostNetwork: {{ .Values.broker.hostNetwork }} + hostIPC: {{ .Values.broker.hostIPC }} + {{- if .Values.broker.schedulerName }} + schedulerName: {{ .Values.broker.schedulerName | quote }} + {{- end }} + {{- if .Values.broker.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.broker.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.broker.podAffinityPreset "component" "broker" "customLabels" $podLabels "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.broker.podAntiAffinityPreset "component" "broker" "customLabels" $podLabels "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.broker.nodeAffinityPreset.type "key" .Values.broker.nodeAffinityPreset.key "values" .Values.broker.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.broker.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.broker.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.broker.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.broker.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.broker.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.broker.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.broker.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.broker.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.broker.priorityClassName }} + priorityClassName: {{ .Values.broker.priorityClassName }} + {{- end }} + {{- if .Values.controller.runtimeClassName }} + runtimeClassName: {{ .Values.controller.runtimeClassName }} + {{- end }} + {{- if .Values.broker.podSecurityContext.enabled }} + securityContext: {{- omit .Values.broker.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "kafka.serviceAccountName" . }} + enableServiceLinks: {{ .Values.broker.enableServiceLinks }} + initContainers: + {{- if and .Values.volumePermissions.enabled .Values.broker.persistence.enabled }} + - name: volume-permissions + image: {{ include "kafka.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + args: + - -ec + - | + mkdir -p "{{ .Values.broker.persistence.mountPath }}" "{{ .Values.broker.logPersistence.mountPath }}" + chown -R {{ .Values.broker.containerSecurityContext.runAsUser }}:{{ .Values.broker.podSecurityContext.fsGroup }} "{{ .Values.broker.persistence.mountPath }}" "{{ .Values.broker.logPersistence.mountPath }}" + find "{{ .Values.broker.persistence.mountPath }}" -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ .Values.broker.containerSecurityContext.runAsUser }}:{{ .Values.broker.podSecurityContext.fsGroup }} + find "{{ .Values.broker.logPersistence.mountPath }}" -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ .Values.broker.containerSecurityContext.runAsUser }}:{{ .Values.broker.podSecurityContext.fsGroup }} + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.containerSecurityContext | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: {{ .Values.broker.persistence.mountPath }} + - name: logs + mountPath: {{ .Values.broker.logPersistence.mountPath }} + {{- end }} + {{- if and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled }} + {{- include "kafka.autoDiscoveryInitContainer" ( dict "role" "broker" "context" $) | nindent 8 }} + {{- end }} + {{- include "kafka.prepareKafkaInitContainer" ( dict "role" "broker" "context" $) | nindent 8 }} + {{- if .Values.broker.initContainers }} + {{- include "common.tplvalues.render" ( dict "value" .Values.broker.initContainers "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" ( dict "value" .Values.initContainers "context" $ ) | nindent 8 }} + {{- end }} + containers: + - name: kafka + image: {{ include "kafka.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.broker.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.broker.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.broker.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.broker.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.broker.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.broker.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: KAFKA_HEAP_OPTS + value: {{ coalesce .Values.broker.heapOpts .Values.heapOpts | quote }} + {{- if .Values.kraft.enabled }} + - name: KAFKA_KRAFT_CLUSTER_ID + valueFrom: + secretKeyRef: + name: {{ printf "%s-kraft-cluster-id" (include "common.names.fullname" .) }} + key: kraft-cluster-id + {{- if .Values.broker.zookeeperMigrationMode }} + - name: KAFKA_SKIP_KRAFT_STORAGE_INIT + value: "true" + {{- end }} + {{- end }} + {{- if and (include "kafka.saslEnabled" .) (or (regexFind "SCRAM" (upper .Values.sasl.enabledMechanisms)) (regexFind "SCRAM" (upper .Values.sasl.controllerMechanism)) (regexFind "SCRAM" (upper .Values.sasl.interBrokerMechanism))) }} + {{- if or .Values.zookeeper.enabled .Values.externalZookeeper.servers }} + - name: KAFKA_ZOOKEEPER_BOOTSTRAP_SCRAM_USERS + value: "true" + {{- else }} + - name: KAFKA_KRAFT_BOOTSTRAP_SCRAM_USERS + value: "true" + {{- end }} + {{- if and (include "kafka.client.saslEnabled" . ) .Values.sasl.client.users }} + - name: KAFKA_CLIENT_USERS + value: {{ join "," .Values.sasl.client.users | quote }} + - name: KAFKA_CLIENT_PASSWORDS + valueFrom: + secretKeyRef: + name: {{ include "kafka.saslSecretName" . }} + key: client-passwords + {{- end }} + {{- if regexFind "SASL" (upper .Values.listeners.interbroker.protocol) }} + - name: KAFKA_INTER_BROKER_USER + value: {{ .Values.sasl.interbroker.user | quote }} + - name: KAFKA_INTER_BROKER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kafka.saslSecretName" . }} + key: inter-broker-password + {{- end }} + {{- if and .Values.kraft.enabled (regexFind "SASL" (upper .Values.listeners.controller.protocol)) }} + - name: KAFKA_CONTROLLER_USER + value: {{ .Values.sasl.controller.user | quote }} + - name: KAFKA_CONTROLLER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kafka.saslSecretName" . }} + key: controller-password + {{- end }} + {{- end }} + {{- if .Values.metrics.jmx.enabled }} + - name: JMX_PORT + value: {{ .Values.metrics.jmx.kafkaJmxPort | quote }} + {{- end }} + {{- if .Values.broker.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.broker.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.broker.extraEnvVarsCM .Values.extraEnvVarsCM .Values.broker.extraEnvVarsSecret .Values.extraEnvVarsSecret }} + envFrom: + {{- if .Values.broker.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.broker.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.broker.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.broker.extraEnvVarsSecret "context" $) }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsSecret "context" $) }} + {{- end }} + {{- end }} + ports: + - name: client + containerPort: {{ .Values.listeners.client.containerPort }} + - name: interbroker + containerPort: {{ .Values.listeners.interbroker.containerPort }} + {{- if .Values.externalAccess.enabled }} + - name: external + containerPort: {{ .Values.listeners.external.containerPort }} + {{- end }} + {{- if .Values.listeners.extraListeners }} + {{- include "kafka.extraListeners.containerPorts" . | nindent 12 }} + {{- end }} + {{- if .Values.broker.extraContainerPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.broker.extraContainerPorts "context" $) | nindent 12 }} + {{- end }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.broker.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.broker.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.broker.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.broker.livenessProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: "client" + {{- end }} + {{- if .Values.broker.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.broker.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.broker.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.broker.readinessProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: "client" + {{- end }} + {{- if .Values.broker.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.broker.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.broker.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.broker.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: "client" + {{- end }} + {{- end }} + {{- if .Values.broker.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.broker.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.broker.resources }} + resources: {{- toYaml .Values.broker.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: {{ .Values.broker.persistence.mountPath }} + - name: logs + mountPath: {{ .Values.broker.logPersistence.mountPath }} + - name: kafka-config + mountPath: /opt/bitnami/kafka/config/server.properties + subPath: server.properties + - name: tmp + mountPath: /tmp + {{- if or .Values.log4j .Values.existingLog4jConfigMap }} + - name: log4j-config + mountPath: /opt/bitnami/kafka/config/log4j.properties + subPath: log4j.properties + {{- end }} + {{- if or .Values.tls.zookeeper.enabled (include "kafka.sslEnabled" .) }} + - name: kafka-shared-certs + mountPath: /opt/bitnami/kafka/config/certs + readOnly: true + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.broker.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.broker.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.jmx.enabled }} + - name: jmx-exporter + image: {{ include "kafka.metrics.jmx.image" . }} + imagePullPolicy: {{ .Values.metrics.jmx.image.pullPolicy | quote }} + {{- if .Values.metrics.jmx.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.metrics.jmx.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else }} + command: + - java + args: + - -XX:MaxRAMPercentage=100 + - -XshowSettings:vm + - -jar + - jmx_prometheus_httpserver.jar + - "5556" + - /etc/jmx-kafka/jmx-kafka-prometheus.yml + {{- end }} + ports: + - name: metrics + containerPort: {{ .Values.metrics.jmx.containerPorts.metrics }} + {{- if .Values.metrics.jmx.resources }} + resources: {{- toYaml .Values.metrics.jmx.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: jmx-config + mountPath: /etc/jmx-kafka + {{- end }} + {{- if .Values.broker.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.broker.sidecars "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + - name: kafka-configmaps + configMap: + name: {{ include "kafka.broker.configmapName" . }} + - name: kafka-secret-config + {{- if (include "kafka.broker.secretConfigExists" .) }} + secret: + secretName: {{ include "kafka.broker.secretConfigName" . }} + {{- else }} + emptyDir: {} + {{- end }} + - name: kafka-config + emptyDir: {} + - name: tmp + emptyDir: {} + - name: scripts + configMap: + name: {{ include "common.names.fullname" . }}-scripts + defaultMode: 0755 + {{- if and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled }} + - name: kafka-autodiscovery-shared + emptyDir: {} + {{- end }} + {{- if or .Values.log4j .Values.existingLog4jConfigMap }} + - name: log4j-config + configMap: + name: {{ include "kafka.log4j.configMapName" . }} + {{- end }} + {{- if .Values.metrics.jmx.enabled }} + - name: jmx-config + configMap: + name: {{ include "kafka.metrics.jmx.configmapName" . }} + {{- end }} + {{- if or .Values.tls.zookeeper.enabled (include "kafka.sslEnabled" .) }} + - name: kafka-shared-certs + emptyDir: {} + {{- if and (include "kafka.sslEnabled" .) (or .Values.tls.existingSecret .Values.tls.autoGenerated) }} + - name: kafka-certs + projected: + defaultMode: 256 + sources: + - secret: + name: {{ include "kafka.tlsSecretName" . }} + {{- if .Values.tls.jksTruststoreSecret }} + - secret: + name: {{ .Values.tls.jksTruststoreSecret }} + {{- end }} + {{- end }} + {{- if and .Values.tls.zookeeper.enabled .Values.tls.zookeeper.existingSecret }} + - name: kafka-zookeeper-cert + secret: + secretName: {{ .Values.tls.zookeeper.existingSecret }} + defaultMode: 256 + {{- end }} + {{- end }} + {{- if .Values.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.broker.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.broker.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if not .Values.broker.persistence.enabled }} + - name: data + emptyDir: {} + {{- else if .Values.broker.persistence.existingClaim }} + - name: data + persistentVolumeClaim: + claimName: {{ printf "%s" (tpl .Values.broker.persistence.existingClaim .) }} + {{- end }} + {{- if not .Values.broker.logPersistence.enabled }} + - name: logs + emptyDir: {} + {{- else if .Values.broker.logPersistence.existingClaim }} + - name: logs + persistentVolumeClaim: + claimName: {{ printf "%s" (tpl .Values.broker.logPersistence.existingClaim .) }} + {{- end }} + {{- if or (and .Values.broker.persistence.enabled (not .Values.broker.persistence.existingClaim)) (and .Values.broker.logPersistence.enabled (not .Values.broker.logPersistence.existingClaim)) }} + volumeClaimTemplates: + {{- if and .Values.broker.persistence.enabled (not .Values.broker.persistence.existingClaim) }} + - metadata: + name: data + {{- if .Values.broker.persistence.annotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.broker.persistence.annotations "context" $) | nindent 10 }} + {{- end }} + {{- if .Values.broker.persistence.labels }} + labels: {{- include "common.tplvalues.render" (dict "value" .Values.broker.persistence.labels "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.broker.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.broker.persistence.size | quote }} + {{- include "common.storage.class" (dict "persistence" .Values.broker.persistence "global" .Values.global) | nindent 8 }} + {{- if .Values.broker.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.broker.persistence.selector "context" $) | nindent 10 }} + {{- end -}} + {{- end }} + {{- if and .Values.broker.logPersistence.enabled (not .Values.broker.logPersistence.existingClaim) }} + - metadata: + name: logs + {{- if .Values.broker.logPersistence.annotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.broker.logPersistence.annotations "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.broker.logPersistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.broker.logPersistence.size | quote }} + {{- include "common.storage.class" (dict "persistence" .Values.broker.persistence "global" .Values.global) | nindent 8 }} + {{- if .Values.broker.logPersistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.broker.logPersistence.selector "context" $) | nindent 10 }} + {{- end -}} + {{- end }} + {{- end }} +{{- end }} diff --git a/manifest/helm-charts/infra/kafka/templates/broker/svc-external-access.yaml b/manifest/helm-charts/infra/kafka/templates/broker/svc-external-access.yaml new file mode 100644 index 000000000..53ae586fe --- /dev/null +++ b/manifest/helm-charts/infra/kafka/templates/broker/svc-external-access.yaml @@ -0,0 +1,63 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.externalAccess.enabled }} +{{- $fullname := include "common.names.fullname" . }} +{{- $replicaCount := .Values.broker.replicaCount | int }} +{{- range $i := until $replicaCount }} +{{- $targetPod := printf "%s-broker-%d" (printf "%s" $fullname) $i }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-%d-external" (include "common.names.fullname" $) $i | trunc 63 | trimSuffix "-" }} + namespace: {{ include "common.names.namespace" $ | quote }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list $.Values.externalAccess.broker.service.labels $.Values.commonLabels ) "context" $ ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: kafka + pod: {{ $targetPod }} + {{- if or $.Values.externalAccess.broker.service.annotations $.Values.commonAnnotations $.Values.externalAccess.broker.service.loadBalancerAnnotations }} + annotations: + {{- if and (not (empty $.Values.externalAccess.broker.service.loadBalancerAnnotations)) (eq (len $.Values.externalAccess.broker.service.loadBalancerAnnotations) $replicaCount) }} + {{ include "common.tplvalues.render" ( dict "value" (index $.Values.externalAccess.broker.service.loadBalancerAnnotations $i) "context" $) | nindent 4 }} + {{- end }} + {{- if or $.Values.externalAccess.broker.service.annotations $.Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list $.Values.externalAccess.broker.service.annotations $.Values.commonAnnotations ) "context" $ ) }} + {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ $.Values.externalAccess.broker.service.type }} + {{- if eq $.Values.externalAccess.broker.service.type "LoadBalancer" }} + {{- if and (not (empty $.Values.externalAccess.broker.service.loadBalancerIPs)) (eq (len $.Values.externalAccess.broker.service.loadBalancerIPs) $replicaCount) }} + loadBalancerIP: {{ index $.Values.externalAccess.broker.service.loadBalancerIPs $i }} + {{- end }} + {{- if $.Values.externalAccess.broker.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml $.Values.externalAccess.broker.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- end }} + publishNotReadyAddresses: {{ $.Values.externalAccess.broker.service.publishNotReadyAddresses }} + ports: + - name: tcp-kafka + port: {{ $.Values.externalAccess.broker.service.ports.external }} + {{- if le (add $i 1) (len $.Values.externalAccess.broker.service.nodePorts) }} + nodePort: {{ index $.Values.externalAccess.broker.service.nodePorts $i }} + {{- else }} + nodePort: null + {{- end }} + targetPort: external + {{- if $.Values.externalAccess.broker.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" $.Values.externalAccess.broker.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + {{- if and (eq $.Values.externalAccess.broker.service.type "NodePort") (le (add $i 1) (len $.Values.externalAccess.broker.service.externalIPs)) }} + externalIPs: [{{ index $.Values.externalAccess.broker.service.externalIPs $i | quote }}] + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list $.Values.broker.podLabels $.Values.commonLabels ) "context" $ ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: kafka + app.kubernetes.io/component: broker + statefulset.kubernetes.io/pod-name: {{ $targetPod }} +--- +{{- end }} +{{- end }} diff --git a/manifest/helm-charts/infra/kafka/templates/broker/svc-headless.yaml b/manifest/helm-charts/infra/kafka/templates/broker/svc-headless.yaml new file mode 100644 index 000000000..7c373e3b8 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/templates/broker/svc-headless.yaml @@ -0,0 +1,38 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- $replicaCount := int .Values.broker.replicaCount }} +{{- if gt $replicaCount 0 }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-broker-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ include "common.names.namespace" . | quote }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.externalAccess.broker.service.labels .Values.commonLabels ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: broker + app.kubernetes.io/part-of: kafka + {{- if or .Values.service.headless.broker.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.service.headless.broker.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - name: tcp-interbroker + port: {{ .Values.service.ports.interbroker }} + protocol: TCP + targetPort: interbroker + - name: tcp-client + port: {{ .Values.service.ports.client }} + protocol: TCP + targetPort: client + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.broker.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: broker + app.kubernetes.io/part-of: kafka +{{- end }} diff --git a/manifest/helm-charts/infra/kafka/templates/controller-eligible/config-secrets.yaml b/manifest/helm-charts/infra/kafka/templates/controller-eligible/config-secrets.yaml new file mode 100644 index 000000000..8eaa1ac56 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/templates/controller-eligible/config-secrets.yaml @@ -0,0 +1,25 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- $replicaCount := int .Values.controller.replicaCount }} +{{- if and (include "kafka.controller.createSecretConfig" .) (gt $replicaCount 0) }} +{{- $secretName := printf "%s-controller-secret-configuration" (include "common.names.fullname" .) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secretName }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + server-secret.properties: {{ include "kafka.controller.secretConfig" . | b64enc }} +{{- end }} + diff --git a/manifest/helm-charts/infra/kafka/templates/controller-eligible/configmap.yaml b/manifest/helm-charts/infra/kafka/templates/controller-eligible/configmap.yaml new file mode 100644 index 000000000..ed77b6533 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/templates/controller-eligible/configmap.yaml @@ -0,0 +1,46 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- $replicaCount := int .Values.controller.replicaCount }} +{{- if and .Values.kraft.enabled (include "kafka.controller.createConfigmap" .) (gt $replicaCount 0)}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-controller-configuration" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: controller-eligible + app.kubernetes.io/part-of: kafka + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + {{- if or .Values.config .Values.controller.config }} + server.properties: {{- include "common.tplvalues.render" ( dict "value" (coalesce .Values.controller.config .Values.config) "context" $ ) | nindent 4 }} + {{- else }} + server.properties: |- + # Listeners configuration + listeners={{ include "kafka.listeners" ( dict "isController" true "context" $ ) }} + {{- if not .Values.controller.controllerOnly }} + advertised.listeners={{ include "kafka.advertisedListeners" . }} + {{- end }} + listener.security.protocol.map={{ include "kafka.securityProtocolMap" . }} + {{- if .Values.kraft.enabled }} + # KRaft process roles + process.roles={{ ternary "controller" "controller,broker" .Values.controller.controllerOnly }} + {{- include "kafka.kraftConfig" . | nindent 4 }} + {{- end }} + {{- if or .Values.zookeeper.enabled .Values.externalZookeeper.servers }} + # Zookeeper configuration + zookeeper.metadata.migration.enable=true + inter.broker.protocol.version=3.4 + inter.broker.protocol.version={{ default (regexFind "^[0-9].[0-9]+" .Chart.AppVersion) .Values.interBrokerProtocolVersion }} + {{- include "kafka.zookeeperConfig" . | nindent 4 }} + {{- end }} + {{- include "kafka.commonConfig" . | nindent 4 }} + {{- include "common.tplvalues.render" ( dict "value" .Values.extraConfig "context" $ ) | nindent 4 }} + {{- include "common.tplvalues.render" ( dict "value" .Values.controller.extraConfig "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/manifest/helm-charts/infra/kafka/templates/controller-eligible/pdb.yaml b/manifest/helm-charts/infra/kafka/templates/controller-eligible/pdb.yaml new file mode 100644 index 000000000..4e4cd06e9 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/templates/controller-eligible/pdb.yaml @@ -0,0 +1,31 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- $replicaCount := int .Values.controller.replicaCount }} +{{- if and .Values.controller.pdb.create .Values.kraft.enabled (gt $replicaCount 0) }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ printf "%s-controller" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: controller-eligible + app.kubernetes.io/part-of: kafka + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.controller.pdb.minAvailable }} + minAvailable: {{ .Values.controller.pdb.minAvailable }} + {{- end }} + {{- if .Values.controller.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.controller.pdb.maxUnavailable }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.controller.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: controller-eligible + app.kubernetes.io/part-of: kafka +{{- end }} diff --git a/manifest/helm-charts/infra/kafka/templates/controller-eligible/statefulset.yaml b/manifest/helm-charts/infra/kafka/templates/controller-eligible/statefulset.yaml new file mode 100644 index 000000000..d661a455b --- /dev/null +++ b/manifest/helm-charts/infra/kafka/templates/controller-eligible/statefulset.yaml @@ -0,0 +1,448 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- $replicaCount := int .Values.controller.replicaCount }} +{{- if and .Values.kraft.enabled (gt $replicaCount 0) }} +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ printf "%s-controller" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: controller-eligible + app.kubernetes.io/part-of: kafka + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podManagementPolicy: {{ .Values.controller.podManagementPolicy }} + replicas: {{ .Values.controller.replicaCount }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.controller.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: controller-eligible + app.kubernetes.io/part-of: kafka + serviceName: {{ printf "%s-controller-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + updateStrategy: {{- include "common.tplvalues.render" (dict "value" .Values.controller.updateStrategy "context" $ ) | nindent 4 }} + template: + metadata: + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + app.kubernetes.io/component: controller-eligible + app.kubernetes.io/part-of: kafka + annotations: + {{- if (include "kafka.controller.createConfigmap" .) }} + checksum/configuration: {{ include (print $.Template.BasePath "/controller-eligible/configmap.yaml") . | sha256sum }} + {{- end }} + {{- if (include "kafka.createSaslSecret" .) }} + checksum/passwords-secret: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + {{- end }} + {{- if (include "kafka.createTlsSecret" .) }} + checksum/tls-secret: {{ include (print $.Template.BasePath "/tls-secret.yaml") . | sha256sum }} + {{- end }} + {{- if (include "kafka.metrics.jmx.createConfigmap" .) }} + checksum/jmx-configuration: {{ include (print $.Template.BasePath "/metrics/jmx-configmap.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.controller.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.controller.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + {{- include "kafka.imagePullSecrets" . | nindent 6 }} + {{- if .Values.controller.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.controller.hostAliases "context" $) | nindent 8 }} + {{- end }} + hostNetwork: {{ .Values.controller.hostNetwork }} + hostIPC: {{ .Values.controller.hostIPC }} + {{- if .Values.controller.schedulerName }} + schedulerName: {{ .Values.controller.schedulerName | quote }} + {{- end }} + {{- if .Values.controller.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.controller.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.controller.podAffinityPreset "component" "controller-eligible" "customLabels" $podLabels "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.controller.podAntiAffinityPreset "component" "controller-eligible" "customLabels" $podLabels "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.controller.nodeAffinityPreset.type "key" .Values.controller.nodeAffinityPreset.key "values" .Values.controller.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.controller.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.controller.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.controller.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.controller.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.controller.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.controller.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.controller.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.controller.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.controller.priorityClassName }} + priorityClassName: {{ .Values.controller.priorityClassName }} + {{- end }} + {{- if .Values.controller.runtimeClassName }} + runtimeClassName: {{ .Values.controller.runtimeClassName }} + {{- end }} + {{- if .Values.controller.podSecurityContext.enabled }} + securityContext: {{- omit .Values.controller.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "kafka.serviceAccountName" . }} + enableServiceLinks: {{ .Values.controller.enableServiceLinks }} + initContainers: + {{- if and .Values.volumePermissions.enabled .Values.controller.persistence.enabled }} + - name: volume-permissions + image: {{ include "kafka.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + args: + - -ec + - | + mkdir -p "{{ .Values.controller.persistence.mountPath }}" "{{ .Values.controller.logPersistence.mountPath }}" + chown -R {{ .Values.controller.containerSecurityContext.runAsUser }}:{{ .Values.controller.podSecurityContext.fsGroup }} "{{ .Values.controller.persistence.mountPath }}" "{{ .Values.controller.logPersistence.mountPath }}" + find "{{ .Values.controller.persistence.mountPath }}" -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ .Values.controller.containerSecurityContext.runAsUser }}:{{ .Values.controller.podSecurityContext.fsGroup }} + find "{{ .Values.controller.logPersistence.mountPath }}" -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ .Values.controller.containerSecurityContext.runAsUser }}:{{ .Values.controller.podSecurityContext.fsGroup }} + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.containerSecurityContext | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: {{ .Values.controller.persistence.mountPath }} + - name: logs + mountPath: {{ .Values.controller.logPersistence.mountPath }} + {{- end }} + {{- if and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled (or .Values.externalAccess.controller.forceExpose (not .Values.controller.controllerOnly))}} + {{- include "kafka.autoDiscoveryInitContainer" ( dict "role" "controller" "context" $) | nindent 8 }} + {{- end }} + {{- include "kafka.prepareKafkaInitContainer" ( dict "role" "controller" "context" $) | nindent 8 }} + {{- if .Values.controller.initContainers }} + {{- include "common.tplvalues.render" ( dict "value" .Values.controller.initContainers "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" ( dict "value" .Values.initContainers "context" $ ) | nindent 8 }} + {{- end }} + containers: + - name: kafka + image: {{ include "kafka.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.controller.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.controller.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.controller.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.controller.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.controller.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.controller.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: KAFKA_HEAP_OPTS + value: {{ coalesce .Values.controller.heapOpts .Values.heapOpts | quote }} + - name: KAFKA_KRAFT_CLUSTER_ID + valueFrom: + secretKeyRef: + name: {{ printf "%s-kraft-cluster-id" (include "common.names.fullname" .) }} + key: kraft-cluster-id + {{- if and (include "kafka.saslEnabled" .) (or (regexFind "SCRAM" (upper .Values.sasl.enabledMechanisms)) (regexFind "SCRAM" (upper .Values.sasl.controllerMechanism)) (regexFind "SCRAM" (upper .Values.sasl.interBrokerMechanism))) }} + - name: KAFKA_KRAFT_BOOTSTRAP_SCRAM_USERS + value: "true" + {{- if and (include "kafka.client.saslEnabled" . ) .Values.sasl.client.users }} + - name: KAFKA_CLIENT_USERS + value: {{ join "," .Values.sasl.client.users | quote }} + - name: KAFKA_CLIENT_PASSWORDS + valueFrom: + secretKeyRef: + name: {{ include "kafka.saslSecretName" . }} + key: client-passwords + {{- end }} + {{- if regexFind "SASL" (upper .Values.listeners.interbroker.protocol) }} + - name: KAFKA_INTER_BROKER_USER + value: {{ .Values.sasl.interbroker.user | quote }} + - name: KAFKA_INTER_BROKER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kafka.saslSecretName" . }} + key: inter-broker-password + {{- end }} + {{- if regexFind "SASL" (upper .Values.listeners.controller.protocol) }} + - name: KAFKA_CONTROLLER_USER + value: {{ .Values.sasl.controller.user | quote }} + - name: KAFKA_CONTROLLER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kafka.saslSecretName" . }} + key: controller-password + {{- end }} + {{- end }} + {{- if .Values.metrics.jmx.enabled }} + - name: JMX_PORT + value: {{ .Values.metrics.jmx.kafkaJmxPort | quote }} + {{- end }} + {{- if .Values.controller.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.controller.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.controller.extraEnvVarsCM .Values.extraEnvVarsCM .Values.controller.extraEnvVarsSecret .Values.extraEnvVarsSecret }} + envFrom: + {{- if .Values.controller.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.controller.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.controller.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.controller.extraEnvVarsSecret "context" $) }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsSecret "context" $) }} + {{- end }} + {{- end }} + ports: + - name: controller + containerPort: {{ .Values.listeners.controller.containerPort }} + {{- if not .Values.controller.controllerOnly }} + - name: client + containerPort: {{ .Values.listeners.client.containerPort }} + - name: interbroker + containerPort: {{ .Values.listeners.interbroker.containerPort }} + {{- if .Values.externalAccess.enabled }} + - name: external + containerPort: {{ .Values.listeners.external.containerPort }} + {{- end }} + {{- if .Values.listeners.extraListeners }} + {{- include "kafka.extraListeners.containerPorts" . | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.controller.extraContainerPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.controller.extraContainerPorts "context" $) | nindent 12 }} + {{- end }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.controller.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.controller.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.controller.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.controller.livenessProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: "controller" + {{- end }} + {{- if .Values.controller.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.controller.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.controller.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.controller.readinessProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: "controller" + {{- end }} + {{- if .Values.controller.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.controller.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.controller.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.controller.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: "controller" + {{- end }} + {{- end }} + {{- if .Values.controller.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.controller.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.controller.resources }} + resources: {{- toYaml .Values.controller.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: {{ .Values.controller.persistence.mountPath }} + - name: logs + mountPath: {{ .Values.controller.logPersistence.mountPath }} + - name: kafka-config + mountPath: /opt/bitnami/kafka/config/server.properties + subPath: server.properties + - name: tmp + mountPath: /tmp + {{- if or .Values.log4j .Values.existingLog4jConfigMap }} + - name: log4j-config + mountPath: /opt/bitnami/kafka/config/log4j.properties + subPath: log4j.properties + {{- end }} + {{- if or .Values.tls.zookeeper.enabled (include "kafka.sslEnabled" .) }} + - name: kafka-shared-certs + mountPath: /opt/bitnami/kafka/config/certs + readOnly: true + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.controller.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.controller.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.jmx.enabled }} + - name: jmx-exporter + image: {{ include "kafka.metrics.jmx.image" . }} + imagePullPolicy: {{ .Values.metrics.jmx.image.pullPolicy | quote }} + {{- if .Values.metrics.jmx.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.metrics.jmx.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else }} + command: + - java + args: + - -XX:MaxRAMPercentage=100 + - -XshowSettings:vm + - -jar + - jmx_prometheus_httpserver.jar + - "5556" + - /etc/jmx-kafka/jmx-kafka-prometheus.yml + {{- end }} + ports: + - name: metrics + containerPort: {{ .Values.metrics.jmx.containerPorts.metrics }} + {{- if .Values.metrics.jmx.resources }} + resources: {{- toYaml .Values.metrics.jmx.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: jmx-config + mountPath: /etc/jmx-kafka + {{- end }} + {{- if .Values.controller.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.controller.sidecars "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + - name: kafka-configmaps + configMap: + name: {{ include "kafka.controller.configmapName" . }} + - name: kafka-secret-config + {{- if (include "kafka.controller.secretConfigExists" .) }} + secret: + secretName: {{ include "kafka.controller.secretConfigName" . }} + {{- else }} + emptyDir: {} + {{- end }} + - name: kafka-config + emptyDir: {} + - name: tmp + emptyDir: {} + - name: scripts + configMap: + name: {{ include "common.names.fullname" . }}-scripts + defaultMode: 0755 + {{- if and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled }} + - name: kafka-autodiscovery-shared + emptyDir: {} + {{- end }} + {{- if or .Values.log4j .Values.existingLog4jConfigMap }} + - name: log4j-config + configMap: + name: {{ include "kafka.log4j.configMapName" . }} + {{- end }} + {{- if .Values.metrics.jmx.enabled }} + - name: jmx-config + configMap: + name: {{ include "kafka.metrics.jmx.configmapName" . }} + {{- end }} + {{- if or .Values.tls.zookeeper.enabled (include "kafka.sslEnabled" .) }} + - name: kafka-shared-certs + emptyDir: {} + {{- if and (include "kafka.sslEnabled" .) (or .Values.tls.existingSecret .Values.tls.autoGenerated) }} + - name: kafka-certs + projected: + defaultMode: 256 + sources: + - secret: + name: {{ include "kafka.tlsSecretName" . }} + {{- if .Values.tls.jksTruststoreSecret }} + - secret: + name: {{ .Values.tls.jksTruststoreSecret }} + {{- end }} + {{- end }} + {{- if and .Values.tls.zookeeper.enabled .Values.tls.zookeeper.existingSecret }} + - name: kafka-zookeeper-cert + secret: + secretName: {{ .Values.tls.zookeeper.existingSecret }} + defaultMode: 256 + {{- end }} + {{- end }} + {{- if .Values.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.controller.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.controller.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if not .Values.controller.persistence.enabled }} + - name: data + emptyDir: {} + {{- else if .Values.controller.persistence.existingClaim }} + - name: data + persistentVolumeClaim: + claimName: {{ printf "%s" (tpl .Values.controller.persistence.existingClaim .) }} + {{- end }} + {{- if not .Values.controller.logPersistence.enabled }} + - name: logs + emptyDir: {} + {{- else if .Values.controller.logPersistence.existingClaim }} + - name: logs + persistentVolumeClaim: + claimName: {{ printf "%s" (tpl .Values.controller.logPersistence.existingClaim .) }} + {{- end }} + {{- if or (and .Values.controller.persistence.enabled (not .Values.controller.persistence.existingClaim)) (and .Values.controller.logPersistence.enabled (not .Values.controller.logPersistence.existingClaim)) }} + volumeClaimTemplates: + {{- if and .Values.controller.persistence.enabled (not .Values.controller.persistence.existingClaim) }} + - metadata: + name: data + {{- if .Values.controller.persistence.annotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.controller.persistence.annotations "context" $) | nindent 10 }} + {{- end }} + {{- if .Values.controller.persistence.labels }} + labels: {{- include "common.tplvalues.render" (dict "value" .Values.controller.persistence.labels "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.controller.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.controller.persistence.size | quote }} + {{- include "common.storage.class" (dict "persistence" .Values.controller.persistence "global" .Values.global) | nindent 8 }} + {{- if .Values.controller.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.controller.persistence.selector "context" $) | nindent 10 }} + {{- end -}} + {{- end }} + {{- if and .Values.controller.logPersistence.enabled (not .Values.controller.logPersistence.existingClaim) }} + - metadata: + name: logs + {{- if .Values.controller.logPersistence.annotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.controller.logPersistence.annotations "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.controller.logPersistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.controller.logPersistence.size | quote }} + {{- include "common.storage.class" (dict "persistence" .Values.controller.logPersistence "global" .Values.global) | nindent 8 }} + {{- if .Values.controller.logPersistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.controller.logPersistence.selector "context" $) | nindent 10 }} + {{- end -}} + {{- end }} + {{- end }} +{{- end }} diff --git a/manifest/helm-charts/infra/kafka/templates/controller-eligible/svc-external-access.yaml b/manifest/helm-charts/infra/kafka/templates/controller-eligible/svc-external-access.yaml new file mode 100644 index 000000000..68f9854c7 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/templates/controller-eligible/svc-external-access.yaml @@ -0,0 +1,65 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.kraft.enabled .Values.externalAccess.enabled }} +{{- $fullname := include "common.names.fullname" . }} +{{- if or .Values.externalAccess.controller.forceExpose (not .Values.controller.controllerOnly)}} +{{- $replicaCount := .Values.controller.replicaCount | int }} +{{- range $i := until $replicaCount }} +{{- $targetPod := printf "%s-controller-%d" $fullname $i }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-controller-%d-external" $fullname $i | trunc 63 | trimSuffix "-" }} + namespace: {{ include "common.names.namespace" $ | quote }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list $.Values.externalAccess.controller.service.labels $.Values.commonLabels ) "context" $ ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: kafka + pod: {{ $targetPod }} + {{- if or $.Values.externalAccess.controller.service.annotations $.Values.commonAnnotations $.Values.externalAccess.controller.service.loadBalancerAnnotations }} + annotations: + {{- if and (not (empty $.Values.externalAccess.controller.service.loadBalancerAnnotations)) (eq (len $.Values.externalAccess.controller.service.loadBalancerAnnotations) $replicaCount) }} + {{ include "common.tplvalues.render" ( dict "value" (index $.Values.externalAccess.controller.service.loadBalancerAnnotations $i) "context" $) | nindent 4 }} + {{- end }} + {{- if or $.Values.externalAccess.controller.service.annotations $.Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list $.Values.externalAccess.controller.service.annotations $.Values.commonAnnotations ) "context" $ ) }} + {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ $.Values.externalAccess.controller.service.type }} + {{- if eq $.Values.externalAccess.controller.service.type "LoadBalancer" }} + {{- if and (not (empty $.Values.externalAccess.controller.service.loadBalancerIPs)) (eq (len $.Values.externalAccess.controller.service.loadBalancerIPs) $replicaCount) }} + loadBalancerIP: {{ index $.Values.externalAccess.controller.service.loadBalancerIPs $i }} + {{- end }} + {{- if $.Values.externalAccess.controller.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml $.Values.externalAccess.controller.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- end }} + publishNotReadyAddresses: {{ $.Values.externalAccess.controller.service.publishNotReadyAddresses }} + ports: + - name: tcp-kafka + port: {{ $.Values.externalAccess.controller.service.ports.external }} + {{- if le (add $i 1) (len $.Values.externalAccess.controller.service.nodePorts) }} + nodePort: {{ index $.Values.externalAccess.controller.service.nodePorts $i }} + {{- else }} + nodePort: null + {{- end }} + targetPort: external + {{- if $.Values.externalAccess.controller.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" $.Values.externalAccess.controller.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + {{- if and (eq $.Values.externalAccess.controller.service.type "NodePort") (le (add $i 1) (len $.Values.externalAccess.controller.service.externalIPs)) }} + externalIPs: [{{ index $.Values.externalAccess.controller.service.externalIPs $i | quote }}] + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list $.Values.controller.podLabels $.Values.commonLabels ) "context" $ ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: kafka + app.kubernetes.io/component: controller-eligible + statefulset.kubernetes.io/pod-name: {{ $targetPod }} +--- +{{- end }} +{{- end }} +{{- end }} diff --git a/manifest/helm-charts/infra/kafka/templates/controller-eligible/svc-headless.yaml b/manifest/helm-charts/infra/kafka/templates/controller-eligible/svc-headless.yaml new file mode 100644 index 000000000..7d862254f --- /dev/null +++ b/manifest/helm-charts/infra/kafka/templates/controller-eligible/svc-headless.yaml @@ -0,0 +1,46 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- $replicaCount := int .Values.controller.replicaCount }} +{{- if and .Values.kraft.enabled (gt $replicaCount 0) }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-controller-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ include "common.names.namespace" . | quote }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.service.headless.controller.labels .Values.commonLabels ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: controller-eligible + app.kubernetes.io/part-of: kafka + {{- if or .Values.service.headless.controller.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.service.headless.controller.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + {{- if or (not .Values.kraft.enabled) (not .Values.controller.controllerOnly) }} + - name: tcp-interbroker + port: {{ .Values.service.ports.interbroker }} + protocol: TCP + targetPort: interbroker + - name: tcp-client + port: {{ .Values.service.ports.client }} + protocol: TCP + targetPort: client + {{- end }} + {{- if .Values.kraft.enabled }} + - name: tcp-controller + protocol: TCP + port: {{ .Values.service.ports.controller }} + targetPort: controller + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.controller.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: controller-eligible + app.kubernetes.io/part-of: kafka +{{- end }} diff --git a/manifest/helm-charts/infra/kafka/templates/extra-list.yaml b/manifest/helm-charts/infra/kafka/templates/extra-list.yaml new file mode 100644 index 000000000..2d35a580e --- /dev/null +++ b/manifest/helm-charts/infra/kafka/templates/extra-list.yaml @@ -0,0 +1,9 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/manifest/helm-charts/infra/kafka/templates/log4j-configmap.yaml b/manifest/helm-charts/infra/kafka/templates/log4j-configmap.yaml new file mode 100644 index 000000000..0c29a2a19 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/templates/log4j-configmap.yaml @@ -0,0 +1,20 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.log4j (not .Values.existingLog4jConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{- printf "%s-log4j-configuration" (include "common.names.fullname" .) -}} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: kafka + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + log4j.properties: |- + {{- include "common.tplvalues.render" ( dict "value" .Values.log4j "context" $ ) | nindent 4 }} +{{- end }} diff --git a/manifest/helm-charts/infra/kafka/templates/metrics/deployment.yaml b/manifest/helm-charts/infra/kafka/templates/metrics/deployment.yaml new file mode 100644 index 000000000..b32b36f91 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/templates/metrics/deployment.yaml @@ -0,0 +1,174 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.metrics.kafka.enabled }} +{{- $releaseNamespace := include "common.names.namespace" . -}} +{{- $clusterDomain := .Values.clusterDomain -}} +{{- $fullname := include "common.names.fullname" . -}} +{{- $containerPort := int .Values.listeners.client.containerPort -}} +apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} +kind: Deployment +metadata: + name: {{ include "kafka.metrics.kafka.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + {{- $versionLabel := dict "app.kubernetes.io/version" ( include "common.images.version" ( dict "imageRoot" .Values.metrics.kafka.image "chart" .Chart ) ) }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.commonLabels $versionLabel ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: cluster-metrics + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + replicas: 1 + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.metrics.kafka.podLabels .Values.commonLabels $versionLabel ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: cluster-metrics + template: + metadata: + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + app.kubernetes.io/component: cluster-metrics + {{- if .Values.metrics.kafka.podAnnotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + {{- include "kafka.imagePullSecrets" . | nindent 6 }} + {{- if .Values.metrics.kafka.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.kafka.affinity }} + affinity: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.kafka.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.metrics.kafka.podAffinityPreset "component" "cluster-metrics" "customLabels" $podLabels "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.metrics.kafka.podAntiAffinityPreset "component" "cluster-metrics" "customLabels" $podLabels "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.metrics.kafka.nodeAffinityPreset.type "key" .Values.metrics.kafka.nodeAffinityPreset.key "values" .Values.metrics.kafka.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.metrics.kafka.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.kafka.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.kafka.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.tolerations "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.kafka.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.kafka.priorityClassName }} + priorityClassName: {{ .Values.metrics.kafka.priorityClassName }} + {{- end }} + {{- if .Values.metrics.kafka.schedulerName }} + schedulerName: {{ .Values.metrics.kafka.schedulerName }} + {{- end }} + {{- if .Values.metrics.kafka.podSecurityContext.enabled }} + securityContext: {{- omit .Values.metrics.kafka.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "kafka.metrics.kafka.serviceAccountName" . }} + enableServiceLinks: {{ .Values.metrics.kafka.enableServiceLinks }} + {{- if .Values.metrics.kafka.initContainers }} + initContainers: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.initContainers "context" $) | nindent 8 }} + {{- end }} + containers: + - name: kafka-exporter + image: {{ include "kafka.metrics.kafka.image" . }} + imagePullPolicy: {{ .Values.metrics.kafka.image.pullPolicy | quote }} + {{- if .Values.metrics.kafka.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.metrics.kafka.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.metrics.kafka.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.command "context" $) | nindent 12 }} + {{- else }} + command: + - bash + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.metrics.kafka.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.args "context" $) | nindent 12 }} + {{- else }} + args: + - -ce + - | + kafka_exporter \ + {{- range $i := until (int .Values.controller.replicaCount) }} + --kafka.server={{ $fullname }}-controller-{{ $i }}.{{ $fullname }}-controller-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $containerPort }} \ + {{- end }} + {{- range $i := until (int .Values.broker.replicaCount) }} + --kafka.server={{ $fullname }}-broker-{{ $i }}.{{ $fullname }}-broker-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $containerPort }} \ + {{- end }} + {{- if regexFind "SASL" (upper .Values.listeners.client.protocol) }} + --sasl.enabled \ + --sasl.username=$SASL_USERNAME \ + --sasl.password=$SASL_USER_PASSWORD \ + --sasl.mechanism={{ include "kafka.metrics.kafka.saslMechanism" . }} \ + {{- end }} + {{- if regexFind "SSL" (upper .Values.listeners.client.protocol) }} + --tls.enabled \ + {{- if .Values.metrics.kafka.certificatesSecret }} + --tls.key-file=/opt/bitnami/kafka-exporter/certs/{{ .Values.metrics.kafka.tlsKey }} \ + --tls.cert-file=/opt/bitnami/kafka-exporter/certs/{{ .Values.metrics.kafka.tlsCert }} \ + {{- if .Values.metrics.kafka.tlsCaSecret }} + --tls.ca-file=/opt/bitnami/kafka-exporter/cacert/{{ .Values.metrics.kafka.tlsCaCert }} \ + {{- else }} + --tls.ca-file=/opt/bitnami/kafka-exporter/certs/{{ .Values.metrics.kafka.tlsCaCert }} \ + {{- end }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.metrics.kafka.extraFlags }} + --{{ $key }}{{ if $value }}={{ $value }}{{ end }} \ + {{- end }} + --web.listen-address=:{{ .Values.metrics.kafka.containerPorts.metrics }} + {{- end }} + {{- if regexFind "SASL" (upper .Values.listeners.client.protocol) }} + env: + - name: SASL_USERNAME + value: {{ index .Values.sasl.client.users 0 | quote }} + - name: SASL_USER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kafka.saslSecretName" . }} + key: system-user-password + {{- end }} + ports: + - name: metrics + containerPort: {{ .Values.metrics.kafka.containerPorts.metrics }} + {{- if .Values.metrics.kafka.resources }} + resources: {{ toYaml .Values.metrics.kafka.resources | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.metrics.kafka.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if and (regexFind "SSL" (upper .Values.listeners.client.protocol)) .Values.metrics.kafka.certificatesSecret }} + - name: kafka-exporter-certificates + mountPath: /opt/bitnami/kafka-exporter/certs/ + readOnly: true + {{- if .Values.metrics.kafka.tlsCaSecret }} + - name: kafka-exporter-ca-certificate + mountPath: /opt/bitnami/kafka-exporter/cacert/ + readOnly: true + {{- end }} + {{- end }} + {{- if .Values.metrics.kafka.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if .Values.metrics.kafka.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if and (regexFind "SSL" (upper .Values.listeners.client.protocol)) .Values.metrics.kafka.certificatesSecret }} + - name: kafka-exporter-certificates + secret: + secretName: {{ .Values.metrics.kafka.certificatesSecret }} + defaultMode: 0440 + {{- if .Values.metrics.kafka.tlsCaSecret }} + - name: kafka-exporter-ca-certificate + secret: + secretName: {{ .Values.metrics.kafka.tlsCaSecret }} + defaultMode: 0440 + {{- end }} + {{- end }} +{{- end }} diff --git a/manifest/helm-charts/infra/kafka/templates/metrics/jmx-configmap.yaml b/manifest/helm-charts/infra/kafka/templates/metrics/jmx-configmap.yaml new file mode 100644 index 000000000..64fad2565 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/templates/metrics/jmx-configmap.yaml @@ -0,0 +1,70 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if (include "kafka.metrics.jmx.createConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-jmx-configuration" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + jmx-kafka-prometheus.yml: |- + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.jmx.config "context" $ ) | nindent 4 }} + rules: + - pattern: kafka.controller<>(Value) + name: kafka_controller_$1_$2_$4 + labels: + broker_id: "$3" + - pattern: kafka.controller<>(Value) + name: kafka_controller_$1_$2_$3 + - pattern: kafka.controller<>(Value) + name: kafka_controller_$1_$2_$3 + - pattern: kafka.controller<>(Count) + name: kafka_controller_$1_$2_$3 + - pattern : kafka.network<>(Value) + name: kafka_network_$1_$2_$4 + labels: + network_processor: $3 + - pattern : kafka.network<>(Count|Value) + name: kafka_network_$1_$2_$4 + labels: + request: $3 + - pattern : kafka.network<>(Count|Value) + name: kafka_network_$1_$2_$3 + - pattern : kafka.network<>(Count|Value) + name: kafka_network_$1_$2_$3 + - pattern: kafka.server<>(Count|OneMinuteRate) + name: kafka_server_$1_$2_$4 + labels: + topic: $3 + - pattern: kafka.server<>(Value) + name: kafka_server_$1_$2_$4 + labels: + client_id: "$3" + - pattern: kafka.server<>(Value) + name: kafka_server_$1_$2_$3_$4 + - pattern: kafka.server<>(Count|Value|OneMinuteRate) + name: kafka_server_$1_total_$2_$3 + - pattern: kafka.server<>(queue-size) + name: kafka_server_$1_$2 + - pattern: java.lang<(.+)>(\w+) + name: java_lang_$1_$4_$3_$2 + - pattern: java.lang<>(\w+) + name: java_lang_$1_$3_$2 + - pattern : java.lang + - pattern: kafka.log<>Value + name: kafka_log_$1_$2 + labels: + topic: $3 + partition: $4 + {{- if .Values.metrics.jmx.extraRules }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.jmx.extraRules "context" $ ) | nindent 6 }} + {{- end }} +{{- end -}} diff --git a/manifest/helm-charts/infra/kafka/templates/metrics/jmx-servicemonitor.yaml b/manifest/helm-charts/infra/kafka/templates/metrics/jmx-servicemonitor.yaml new file mode 100644 index 000000000..13ca96c1e --- /dev/null +++ b/manifest/helm-charts/infra/kafka/templates/metrics/jmx-servicemonitor.yaml @@ -0,0 +1,49 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.metrics.jmx.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ printf "%s-jmx-metrics" (include "common.names.fullname" .) }} + namespace: {{ default (include "common.names.namespace" .) .Values.metrics.serviceMonitor.namespace | quote }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.metrics.serviceMonitor.labels .Values.commonLabels ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.metrics.serviceMonitor.jobLabel }} + jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 6 }} + {{- if .Values.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} + app.kubernetes.io/component: metrics + endpoints: + - port: http-metrics + path: "/" + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + namespaceSelector: + matchNames: + - {{ include "common.names.namespace" . }} +{{- end }} diff --git a/manifest/helm-charts/infra/kafka/templates/metrics/jmx-svc.yaml b/manifest/helm-charts/infra/kafka/templates/metrics/jmx-svc.yaml new file mode 100644 index 000000000..0b123c2d0 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/templates/metrics/jmx-svc.yaml @@ -0,0 +1,31 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.metrics.jmx.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-jmx-metrics" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if or .Values.metrics.jmx.service.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.metrics.jmx.service.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + sessionAffinity: {{ .Values.metrics.jmx.service.sessionAffinity }} + {{- if .Values.metrics.jmx.service.clusterIP }} + clusterIP: {{ .Values.metrics.jmx.service.clusterIP }} + {{- end }} + ports: + - name: http-metrics + port: {{ .Values.metrics.jmx.service.ports.metrics }} + protocol: TCP + targetPort: metrics + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: kafka +{{- end }} diff --git a/manifest/helm-charts/infra/kafka/templates/metrics/prometheusrule.yaml b/manifest/helm-charts/infra/kafka/templates/metrics/prometheusrule.yaml new file mode 100644 index 000000000..f59f30025 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/templates/metrics/prometheusrule.yaml @@ -0,0 +1,21 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and (or .Values.metrics.kafka.enabled .Values.metrics.jmx.enabled) .Values.metrics.prometheusRule.enabled .Values.metrics.prometheusRule.groups }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ default (include "common.names.namespace" .) .Values.metrics.prometheusRule.namespace }} + {{- $versionLabel := dict "app.kubernetes.io/version" ( include "common.images.version" ( dict "imageRoot" .Values.metrics.kafka.image "chart" .Chart ) ) }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.metrics.prometheusRule.labels .Values.commonLabels $versionLabel ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" .) | nindent 4 }} + {{- end }} +spec: + groups: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.prometheusRule.groups "context" .) | nindent 4 }} +{{- end }} diff --git a/manifest/helm-charts/infra/kafka/templates/metrics/serviceaccount.yaml b/manifest/helm-charts/infra/kafka/templates/metrics/serviceaccount.yaml new file mode 100644 index 000000000..bd3c0a10c --- /dev/null +++ b/manifest/helm-charts/infra/kafka/templates/metrics/serviceaccount.yaml @@ -0,0 +1,20 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.metrics.kafka.enabled .Values.metrics.kafka.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "kafka.metrics.kafka.serviceAccountName" . }} + namespace: {{ include "common.names.namespace" . | quote }} + {{- $versionLabel := dict "app.kubernetes.io/version" ( include "common.images.version" ( dict "imageRoot" .Values.metrics.kafka.image "chart" .Chart ) ) }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.commonLabels $versionLabel ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: cluster-metrics + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.metrics.kafka.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/manifest/helm-charts/infra/kafka/templates/metrics/servicemonitor.yaml b/manifest/helm-charts/infra/kafka/templates/metrics/servicemonitor.yaml new file mode 100644 index 000000000..1f64b9bba --- /dev/null +++ b/manifest/helm-charts/infra/kafka/templates/metrics/servicemonitor.yaml @@ -0,0 +1,50 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.metrics.kafka.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ printf "%s-metrics" (include "common.names.fullname" .) }} + namespace: {{ default (include "common.names.namespace" .) .Values.metrics.serviceMonitor.namespace | quote }} + {{- $versionLabel := dict "app.kubernetes.io/version" ( include "common.images.version" ( dict "imageRoot" .Values.metrics.kafka.image "chart" .Chart ) ) }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.metrics.serviceMonitor.labels .Values.commonLabels $versionLabel ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: cluster-metrics + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.metrics.serviceMonitor.jobLabel }} + jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 6 }} + {{- if .Values.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} + app.kubernetes.io/component: cluster-metrics + endpoints: + - port: http-metrics + path: "/metrics" + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + namespaceSelector: + matchNames: + - {{ include "common.names.namespace" . }} +{{- end }} diff --git a/manifest/helm-charts/infra/kafka/templates/metrics/svc.yaml b/manifest/helm-charts/infra/kafka/templates/metrics/svc.yaml new file mode 100644 index 000000000..eed033e83 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/templates/metrics/svc.yaml @@ -0,0 +1,34 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.metrics.kafka.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-metrics" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ include "common.names.namespace" . | quote }} + {{- $versionLabel := dict "app.kubernetes.io/version" ( include "common.images.version" ( dict "imageRoot" .Values.metrics.kafka.image "chart" .Chart ) ) }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.commonLabels $versionLabel ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: cluster-metrics + {{- if or .Values.metrics.kafka.service.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.metrics.kafka.service.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + sessionAffinity: {{ .Values.metrics.kafka.service.sessionAffinity }} + {{- if .Values.metrics.kafka.service.clusterIP }} + clusterIP: {{ .Values.metrics.kafka.service.clusterIP }} + {{- end }} + ports: + - name: http-metrics + port: {{ .Values.metrics.kafka.service.ports.metrics }} + protocol: TCP + targetPort: metrics + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.metrics.kafka.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: cluster-metrics +{{- end }} diff --git a/manifest/helm-charts/infra/kafka/templates/network-policy/networkpolicy-egress.yaml b/manifest/helm-charts/infra/kafka/templates/network-policy/networkpolicy-egress.yaml new file mode 100644 index 000000000..47b8ec1d0 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/templates/network-policy/networkpolicy-egress.yaml @@ -0,0 +1,23 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.networkPolicy.enabled .Values.networkPolicy.egressRules.customRules }} +kind: NetworkPolicy +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ printf "%s-egress" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 6 }} + policyTypes: + - Egress + egress: + {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.egressRules.customRules "context" $) | nindent 4 }} +{{- end }} diff --git a/manifest/helm-charts/infra/kafka/templates/network-policy/networkpolicy-ingress.yaml b/manifest/helm-charts/infra/kafka/templates/network-policy/networkpolicy-ingress.yaml new file mode 100644 index 000000000..47314bfcc --- /dev/null +++ b/manifest/helm-charts/infra/kafka/templates/network-policy/networkpolicy-ingress.yaml @@ -0,0 +1,53 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ printf "%s-ingress" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 6 }} + policyTypes: + - Ingress + ingress: + # Allow client connections + - ports: + - port: {{ .Values.listeners.client.containerPort }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "common.names.fullname" . }}-client: "true" + {{- if .Values.networkPolicy.explicitNamespacesSelector }} + namespaceSelector: {{- toYaml .Values.networkPolicy.explicitNamespacesSelector | nindent 12 }} + {{- end }} + {{- end }} + # Allow communication inter-broker + - ports: + - port: {{ .Values.listeners.interbroker.containerPort }} + from: + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 14 }} + # Allow External connection + {{- if .Values.externalAccess.enabled }} + - ports: + - port: {{ .Values.listeners.external.containerPort }} + {{- if .Values.networkPolicy.externalAccess.from }} + from: {{- include "common.tplvalues.render" ( dict "value" .Values.networkPolicy.externalAccess.from "context" $ ) | nindent 8 }} + {{- end }} + {{- end }} + {{- if .Values.metrics.kafka.enabled }} + # Allow prometheus scrapes + - ports: + - port: {{ .Values.metrics.kafka.containerPorts.metrics }} + {{- end }} +{{- end }} diff --git a/manifest/helm-charts/infra/kafka/templates/provisioning/job.yaml b/manifest/helm-charts/infra/kafka/templates/provisioning/job.yaml new file mode 100644 index 000000000..adddad9fe --- /dev/null +++ b/manifest/helm-charts/infra/kafka/templates/provisioning/job.yaml @@ -0,0 +1,267 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.provisioning.enabled }} +kind: Job +apiVersion: batch/v1 +metadata: + name: {{ printf "%s-provisioning" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: kafka-provisioning + annotations: + helm.sh/hook: post-install,post-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + template: + metadata: + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.provisioning.podLabels .Values.commonLabels ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + app.kubernetes.io/component: kafka-provisioning + {{- if .Values.provisioning.podAnnotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ template "kafka.provisioning.serviceAccountName" . }} + enableServiceLinks: {{ .Values.provisioning.enableServiceLinks }} + {{- include "kafka.imagePullSecrets" . | nindent 6 }} + {{- if .Values.provisioning.schedulerName }} + schedulerName: {{ .Values.provisioning.schedulerName | quote }} + {{- end }} + {{- if .Values.provisioning.podSecurityContext.enabled }} + securityContext: {{- omit .Values.provisioning.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + restartPolicy: OnFailure + terminationGracePeriodSeconds: 0 + {{- if .Values.provisioning.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" ( dict "value" .Values.provisioning.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.provisioning.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.tolerations "context" .) | nindent 8 }} + {{- end }} + {{- if or .Values.provisioning.initContainers .Values.provisioning.waitForKafka }} + initContainers: + {{- if .Values.provisioning.waitForKafka }} + - name: wait-for-available-kafka + image: {{ include "kafka.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.provisioning.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.provisioning.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/bash + args: + - -ec + - | + wait-for-port \ + --host={{ include "common.names.fullname" . }} \ + --state=inuse \ + --timeout=120 \ + {{ .Values.service.ports.client | int64 }}; + echo "Kafka is available"; + {{- if .Values.provisioning.resources }} + resources: {{- toYaml .Values.provisioning.resources | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.provisioning.initContainers }} + {{- include "common.tplvalues.render" ( dict "value" .Values.provisioning.initContainers "context" $ ) | nindent 8 }} + {{- end }} + {{- end }} + containers: + - name: kafka-provisioning + image: {{ include "kafka.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.provisioning.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.provisioning.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.provisioning.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.command "context" $) | nindent 12 }} + {{- else }} + command: + - /bin/bash + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.provisioning.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.args "context" $) | nindent 12 }} + {{- else }} + args: + - -ec + - | + echo "Configuring environment" + . /opt/bitnami/scripts/libkafka.sh + export CLIENT_CONF="${CLIENT_CONF:-/tmp/client.properties}" + if [ ! -f "$CLIENT_CONF" ]; then + touch $CLIENT_CONF + + kafka_common_conf_set "$CLIENT_CONF" security.protocol {{ .Values.listeners.client.protocol | quote }} + {{- if (regexFind "SSL" (upper .Values.listeners.client.protocol)) }} + kafka_common_conf_set "$CLIENT_CONF" ssl.keystore.type {{ upper .Values.provisioning.auth.tls.type | quote }} + kafka_common_conf_set "$CLIENT_CONF" ssl.truststore.type {{ upper .Values.provisioning.auth.tls.type | quote }} + ! is_empty_value "$KAFKA_CLIENT_KEY_PASSWORD" && kafka_common_conf_set "$CLIENT_CONF" ssl.key.password "$KAFKA_CLIENT_KEY_PASSWORD" + {{- if eq (upper .Values.provisioning.auth.tls.type) "PEM" }} + {{- if .Values.provisioning.auth.tls.caCert }} + file_to_multiline_property() { + awk 'NR > 1{print line" \\"}{line=$0;}END{print $0" "}' <"${1:?missing file}" + } + kafka_common_conf_set "$CLIENT_CONF" ssl.keystore.key "$(file_to_multiline_property "/certs/{{ .Values.provisioning.auth.tls.key }}")" + kafka_common_conf_set "$CLIENT_CONF" ssl.keystore.certificate.chain "$(file_to_multiline_property "/certs/{{ .Values.provisioning.auth.tls.cert }}")" + kafka_common_conf_set "$CLIENT_CONF" ssl.truststore.certificates "$(file_to_multiline_property "/certs/{{ .Values.provisioning.auth.tls.caCert }}")" + {{- else }} + kafka_common_conf_set "$CLIENT_CONF" ssl.keystore.location "/certs/{{ .Values.provisioning.auth.tls.keystore }}" + kafka_common_conf_set "$CLIENT_CONF" ssl.truststore.location "/certs/{{ .Values.provisioning.auth.tls.truststore }}" + {{- end }} + {{- else if eq (upper .Values.provisioning.auth.tls.type) "JKS" }} + kafka_common_conf_set "$CLIENT_CONF" ssl.keystore.location "/certs/{{ .Values.provisioning.auth.tls.keystore }}" + kafka_common_conf_set "$CLIENT_CONF" ssl.truststore.location "/certs/{{ .Values.provisioning.auth.tls.truststore }}" + ! is_empty_value "$KAFKA_CLIENT_KEYSTORE_PASSWORD" && kafka_common_conf_set "$CLIENT_CONF" ssl.keystore.password "$KAFKA_CLIENT_KEYSTORE_PASSWORD" + ! is_empty_value "$KAFKA_CLIENT_TRUSTSTORE_PASSWORD" && kafka_common_conf_set "$CLIENT_CONF" ssl.truststore.password "$KAFKA_CLIENT_TRUSTSTORE_PASSWORD" + {{- end }} + {{- end }} + {{- if regexFind "SASL" (upper .Values.listeners.client.protocol) }} + {{- if regexFind "PLAIN" ( upper .Values.sasl.enabledMechanisms) }} + kafka_common_conf_set "$CLIENT_CONF" sasl.mechanism PLAIN + kafka_common_conf_set "$CLIENT_CONF" sasl.jaas.config "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"$SASL_USERNAME\" password=\"$SASL_USER_PASSWORD\";" + {{- else if regexFind "SCRAM-SHA-256" ( upper .Values.sasl.enabledMechanisms) }} + kafka_common_conf_set "$CLIENT_CONF" sasl.mechanism SCRAM-SHA-256 + kafka_common_conf_set "$CLIENT_CONF" sasl.jaas.config "org.apache.kafka.common.security.scram.ScramLoginModule required username=\"$SASL_USERNAME\" password=\"$SASL_USER_PASSWORD\";" + {{- else if regexFind "SCRAM-SHA-512" ( upper .Values.sasl.enabledMechanisms) }} + kafka_common_conf_set "$CLIENT_CONF" sasl.mechanism SCRAM-SHA-512 + kafka_common_conf_set "$CLIENT_CONF" sasl.jaas.config "org.apache.kafka.common.security.scram.ScramLoginModule required username=\"$SASL_USERNAME\" password=\"$SASL_USER_PASSWORD\";" + {{- end }} + {{- end }} + fi + + echo "Running pre-provisioning script if any given" + {{ .Values.provisioning.preScript | nindent 14 }} + + kafka_provisioning_commands=( + {{- range $topic := .Values.provisioning.topics }} + "/opt/bitnami/kafka/bin/kafka-topics.sh \ + --create \ + --if-not-exists \ + --bootstrap-server ${KAFKA_SERVICE} \ + --replication-factor {{ $topic.replicationFactor | default $.Values.provisioning.replicationFactor }} \ + --partitions {{ $topic.partitions | default $.Values.provisioning.numPartitions }} \ + {{- range $name, $value := $topic.config }} + --config {{ $name }}={{ $value }} \ + {{- end }} + --command-config ${CLIENT_CONF} \ + --topic {{ $topic.name }}" + {{- end }} + {{- range $command := .Values.provisioning.extraProvisioningCommands }} + {{- $command | quote | nindent 16 }} + {{- end }} + ) + + echo "Starting provisioning" + for ((index=0; index < ${#kafka_provisioning_commands[@]}; index+={{ .Values.provisioning.parallel }})) + do + for j in $(seq ${index} $((${index}+{{ .Values.provisioning.parallel }}-1))) + do + ${kafka_provisioning_commands[j]} & # Async command + done + wait # Wait the end of the jobs + done + + echo "Running post-provisioning script if any given" + {{ .Values.provisioning.postScript | nindent 14 }} + + echo "Provisioning succeeded" + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + {{- if (regexFind "SSL" (upper .Values.listeners.client.protocol)) }} + - name: KAFKA_CLIENT_KEY_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "kafka.client.passwordsSecretName" . }} + key: {{ .Values.provisioning.auth.tls.keyPasswordSecretKey }} + - name: KAFKA_CLIENT_KEYSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "kafka.client.passwordsSecretName" . }} + key: {{ .Values.provisioning.auth.tls.keystorePasswordSecretKey }} + - name: KAFKA_CLIENT_TRUSTSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "kafka.client.passwordsSecretName" . }} + key: {{ .Values.provisioning.auth.tls.truststorePasswordSecretKey }} + {{- end }} + - name: KAFKA_SERVICE + value: {{ printf "%s:%d" (include "common.names.fullname" .) (.Values.service.ports.client | int64) }} + {{- if regexFind "SASL" (upper .Values.listeners.client.protocol) }} + - name: SASL_USERNAME + value: {{ index .Values.sasl.client.users 0 | quote }} + - name: SASL_USER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kafka.saslSecretName" . }} + key: system-user-password + {{- end }} + {{- if .Values.provisioning.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.provisioning.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.provisioning.extraEnvVarsCM .Values.provisioning.extraEnvVarsSecret }} + envFrom: + {{- if .Values.provisioning.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.provisioning.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.provisioning.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.provisioning.extraEnvVarsSecret "context" $) }} + {{- end }} + {{- end }} + {{- if .Values.provisioning.resources }} + resources: {{- toYaml .Values.provisioning.resources | nindent 12 }} + {{- end }} + volumeMounts: + {{- if or .Values.log4j .Values.existingLog4jConfigMap }} + - name: log4j-config + mountPath: /opt/bitnami/kafka/config/log4j.properties + subPath: log4j.properties + {{- end }} + {{- if (regexFind "SSL" (upper .Values.listeners.client.protocol)) }} + {{- if not (empty .Values.provisioning.auth.tls.certificatesSecret) }} + - name: kafka-client-certs + mountPath: /certs + readOnly: true + {{- end }} + {{- end }} + - name: tmp + mountPath: /tmp + {{- if .Values.provisioning.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.provisioning.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if or .Values.log4j .Values.existingLog4jConfigMap }} + - name: log4j-config + configMap: + name: {{ include "kafka.log4j.configMapName" . }} + {{- end }} + {{- if (regexFind "SSL" (upper .Values.listeners.client.protocol)) }} + {{- if not (empty .Values.provisioning.auth.tls.certificatesSecret) }} + - name: kafka-client-certs + secret: + secretName: {{ .Values.provisioning.auth.tls.certificatesSecret }} + defaultMode: 256 + {{- end }} + {{- end }} + - name: tmp + emptyDir: {} + {{- if .Values.provisioning.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.extraVolumes "context" $) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/manifest/helm-charts/infra/kafka/templates/provisioning/serviceaccount.yaml b/manifest/helm-charts/infra/kafka/templates/provisioning/serviceaccount.yaml new file mode 100644 index 000000000..dbc1776e5 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/templates/provisioning/serviceaccount.yaml @@ -0,0 +1,17 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.provisioning.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "kafka.provisioning.serviceAccountName" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.provisioning.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/manifest/helm-charts/infra/kafka/templates/provisioning/tls-secret.yaml b/manifest/helm-charts/infra/kafka/templates/provisioning/tls-secret.yaml new file mode 100644 index 000000000..2da219d18 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/templates/provisioning/tls-secret.yaml @@ -0,0 +1,21 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.provisioning.enabled (regexFind "SSL" (upper .Values.listeners.client.protocol)) (not .Values.provisioning.auth.tls.passwordsSecret) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "kafka.client.passwordsSecretName" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + truststore-password: {{ default "" .Values.provisioning.auth.tls.keystorePassword | b64enc | quote }} + keystore-password: {{ default "" .Values.provisioning.auth.tls.truststorePassword | b64enc | quote }} + key-password: {{ default "" .Values.provisioning.auth.tls.keyPassword | b64enc | quote }} +{{- end }} diff --git a/manifest/helm-charts/infra/kafka/templates/rbac/role.yaml b/manifest/helm-charts/infra/kafka/templates/rbac/role.yaml new file mode 100644 index 000000000..3c55fd757 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/templates/rbac/role.yaml @@ -0,0 +1,26 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.rbac.create }} +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +kind: Role +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +rules: + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch +{{- end }} diff --git a/manifest/helm-charts/infra/kafka/templates/rbac/rolebinding.yaml b/manifest/helm-charts/infra/kafka/templates/rbac/rolebinding.yaml new file mode 100644 index 000000000..ef6b4b10b --- /dev/null +++ b/manifest/helm-charts/infra/kafka/templates/rbac/rolebinding.yaml @@ -0,0 +1,25 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.rbac.create }} +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +kind: RoleBinding +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +roleRef: + kind: Role + name: {{ include "common.names.fullname" . }} + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: {{ template "kafka.serviceAccountName" . }} + namespace: {{ include "common.names.namespace" . }} +{{- end }} diff --git a/manifest/helm-charts/infra/kafka/templates/rbac/serviceaccount.yaml b/manifest/helm-charts/infra/kafka/templates/rbac/serviceaccount.yaml new file mode 100644 index 000000000..6b5166e4a --- /dev/null +++ b/manifest/helm-charts/infra/kafka/templates/rbac/serviceaccount.yaml @@ -0,0 +1,19 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "kafka.serviceAccountName" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if or .Values.serviceAccount.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.serviceAccount.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/manifest/helm-charts/infra/kafka/templates/scripts-configmap.yaml b/manifest/helm-charts/infra/kafka/templates/scripts-configmap.yaml new file mode 100644 index 000000000..316bb1ef9 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/templates/scripts-configmap.yaml @@ -0,0 +1,367 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- $releaseNamespace := include "common.names.namespace" . }} +{{- $fullname := include "common.names.fullname" . }} +{{- $clusterDomain := .Values.clusterDomain }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-scripts" $fullname }} + namespace: {{ $releaseNamespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + {{- if .Values.externalAccess.autoDiscovery.enabled }} + auto-discovery.sh: |- + #!/bin/bash + SVC_NAME="${MY_POD_NAME}-external" + AUTODISCOVERY_SERVICE_TYPE="${AUTODISCOVERY_SERVICE_TYPE:-}" + # Auxiliary functions + retry_while() { + local -r cmd="${1:?cmd is missing}" + local -r retries="${2:-12}" + local -r sleep_time="${3:-5}" + local return_value=1 + + read -r -a command <<< "$cmd" + for ((i = 1 ; i <= retries ; i+=1 )); do + "${command[@]}" && return_value=0 && break + sleep "$sleep_time" + done + return $return_value + } + k8s_svc_lb_ip() { + local namespace=${1:?namespace is missing} + local service=${2:?service is missing} + local service_ip=$(kubectl get svc "$service" -n "$namespace" -o jsonpath="{.status.loadBalancer.ingress[0].ip}") + local service_hostname=$(kubectl get svc "$service" -n "$namespace" -o jsonpath="{.status.loadBalancer.ingress[0].hostname}") + + if [[ -n ${service_ip} ]]; then + echo "${service_ip}" + else + echo "${service_hostname}" + fi + } + k8s_svc_lb_ip_ready() { + local namespace=${1:?namespace is missing} + local service=${2:?service is missing} + [[ -n "$(k8s_svc_lb_ip "$namespace" "$service")" ]] + } + k8s_svc_node_port() { + local namespace=${1:?namespace is missing} + local service=${2:?service is missing} + local index=${3:-0} + local node_port="$(kubectl get svc "$service" -n "$namespace" -o jsonpath="{.spec.ports[$index].nodePort}")" + echo "$node_port" + } + + if [[ "$AUTODISCOVERY_SERVICE_TYPE" = "LoadBalancer" ]]; then + # Wait until LoadBalancer IP is ready + retry_while "k8s_svc_lb_ip_ready {{ $releaseNamespace }} $SVC_NAME" || exit 1 + # Obtain LoadBalancer external IP + k8s_svc_lb_ip "{{ $releaseNamespace }}" "$SVC_NAME" | tee "/shared/external-host.txt" + elif [[ "$AUTODISCOVERY_SERVICE_TYPE" = "NodePort" ]]; then + k8s_svc_node_port "{{ $releaseNamespace }}" "$SVC_NAME" | tee "/shared/external-port.txt" + else + echo "Unsupported autodiscovery service type: '$AUTODISCOVERY_SERVICE_TYPE'" + exit 1 + fi + {{- end }} + kafka-init.sh: |- + #!/bin/bash + + set -o errexit + set -o nounset + set -o pipefail + + error(){ + local message="${1:?missing message}" + echo "ERROR: ${message}" + exit 1 + } + + retry_while() { + local -r cmd="${1:?cmd is missing}" + local -r retries="${2:-12}" + local -r sleep_time="${3:-5}" + local return_value=1 + + read -r -a command <<< "$cmd" + for ((i = 1 ; i <= retries ; i+=1 )); do + "${command[@]}" && return_value=0 && break + sleep "$sleep_time" + done + return $return_value + } + + replace_in_file() { + local filename="${1:?filename is required}" + local match_regex="${2:?match regex is required}" + local substitute_regex="${3:?substitute regex is required}" + local posix_regex=${4:-true} + + local result + + # We should avoid using 'sed in-place' substitutions + # 1) They are not compatible with files mounted from ConfigMap(s) + # 2) We found incompatibility issues with Debian10 and "in-place" substitutions + local -r del=$'\001' # Use a non-printable character as a 'sed' delimiter to avoid issues + if [[ $posix_regex = true ]]; then + result="$(sed -E "s${del}${match_regex}${del}${substitute_regex}${del}g" "$filename")" + else + result="$(sed "s${del}${match_regex}${del}${substitute_regex}${del}g" "$filename")" + fi + echo "$result" > "$filename" + } + + kafka_conf_set() { + local file="${1:?missing file}" + local key="${2:?missing key}" + local value="${3:?missing value}" + + # Check if the value was set before + if grep -q "^[#\\s]*$key\s*=.*" "$file"; then + # Update the existing key + replace_in_file "$file" "^[#\\s]*${key}\s*=.*" "${key}=${value}" false + else + # Add a new key + printf '\n%s=%s' "$key" "$value" >>"$file" + fi + } + + replace_placeholder() { + local placeholder="${1:?missing placeholder value}" + local password="${2:?missing password value}" + sed -i "s/$placeholder/$password/g" "$KAFKA_CONFIG_FILE" + } + + append_file_to_kafka_conf() { + local file="${1:?missing source file}" + local conf="${2:?missing kafka conf file}" + + cat "$1" >> "$2" + } + + configure_external_access() { + # Configure external hostname + if [[ -f "/shared/external-host.txt" ]]; then + host=$(cat "/shared/external-host.txt") + elif [[ -n "${EXTERNAL_ACCESS_HOST:-}" ]]; then + host="$EXTERNAL_ACCESS_HOST" + elif [[ -n "${EXTERNAL_ACCESS_HOSTS_LIST:-}" ]]; then + read -r -a hosts <<<"$(tr ',' ' ' <<<"${EXTERNAL_ACCESS_HOSTS_LIST}")" + host="${hosts[$POD_ID]}" + elif [[ "$EXTERNAL_ACCESS_HOST_USE_PUBLIC_IP" =~ ^(yes|true)$ ]]; then + host=$(curl -s https://ipinfo.io/ip) + else + error "External access hostname not provided" + fi + + # Configure external port + if [[ -f "/shared/external-port.txt" ]]; then + port=$(cat "/shared/external-port.txt") + elif [[ -n "${EXTERNAL_ACCESS_PORT:-}" ]]; then + if [[ "${EXTERNAL_ACCESS_PORT_AUTOINCREMENT:-}" =~ ^(yes|true)$ ]]; then + port="$((EXTERNAL_ACCESS_PORT + POD_ID))" + else + port="$EXTERNAL_ACCESS_PORT" + fi + elif [[ -n "${EXTERNAL_ACCESS_PORTS_LIST:-}" ]]; then + read -r -a ports <<<"$(tr ',' ' ' <<<"${EXTERNAL_ACCESS_PORTS_LIST}")" + port="${ports[$POD_ID]}" + else + error "External access port not provided" + fi + # Configure Kafka advertised listeners + sed -i -E "s|^(advertised\.listeners=\S+)$|\1,{{ upper .Values.listeners.external.name }}://${host}:${port}|" "$KAFKA_CONFIG_FILE" + } + {{- if (include "kafka.sslEnabled" .) }} + configure_kafka_tls() { + # Remove previously existing keystores + rm -f /certs/kafka.keystore.jks /certs/kafka.truststore.jks + if [[ "${KAFKA_TLS_TYPE}" = "PEM" ]]; then + # Copy PEM certificate and key + if [[ -f "/mounted-certs/kafka-${POD_ROLE}-${POD_ID}.crt" && "/mounted-certs/kafka-${POD_ROLE}-${POD_ID}.key" ]]; then + cp "/mounted-certs/kafka-${POD_ROLE}-${POD_ID}.crt" /certs/tls.crt + # Copy the PEM key ensuring the key used PEM format with PKCS#8 + openssl pkcs8 -topk8 -nocrypt -in "/mounted-certs/kafka-${POD_ROLE}-${POD_ID}.key" > /certs/tls.key + elif [[ -f /mounted-certs/kafka.crt && -f /mounted-certs/kafka.key ]]; then + cp "/mounted-certs/kafka.crt" /certs/tls.crt + # Copy the PEM key ensuring the key used PEM format with PKCS#8 + openssl pkcs8 -topk8 -nocrypt -in "/mounted-certs/kafka.key" > /certs/tls.key + elif [[ -f /mounted-certs/tls.crt && -f /mounted-certs/tls.key ]]; then + cp "/mounted-certs/tls.crt" /certs/tls.crt + # Copy the PEM key ensuring the key used PEM format with PKCS#8 + openssl pkcs8 -topk8 -nocrypt -in "/mounted-certs/tls.key" > /certs/tls.key + else + error "PEM key and cert files not found" + fi + + {{- if not .Values.tls.pemChainIncluded }} + # Copy CA certificate + if [[ -f /mounted-certs/kafka-ca.crt ]]; then + cp /mounted-certs/kafka-ca.crt /certs/ca.crt + elif [[ -f /mounted-certs/ca.crt ]]; then + cp /mounted-certs/ca.crt /certs/ca.crt + else + error "CA certificate file not found" + fi + {{- else }} + # Extract CA certificate from PEM cert + cat /certs/tls.crt | csplit - -s -z '/\-*END CERTIFICATE\-*/+1' '{*}' -f /certs/xx + FIND_CA_RESULT=$(find /certs -not -name 'xx00' -name 'xx*') + if [[ $(echo $FIND_CA_RESULT | wc -l) < 1 ]]; then + error "auth.tls.pemChainIncluded was set, but PEM chain only contained 1 cert" + fi + echo $FIND_CA_RESULT | sort | xargs cat >> /certs/ca.crt + cat /certs/xx00 > /certs/tls.crt + find /certs -name "xx*" -exec rm {} \; + {{- end }} + + # Create JKS keystore from PEM cert and key + openssl pkcs12 -export -in "/certs/tls.crt" \ + -passout pass:"${KAFKA_TLS_KEYSTORE_PASSWORD}" \ + -inkey "/certs/tls.key" \ + -out "/certs/kafka.keystore.p12" + keytool -importkeystore -srckeystore "/certs/kafka.keystore.p12" \ + -srcstoretype PKCS12 \ + -srcstorepass "${KAFKA_TLS_KEYSTORE_PASSWORD}" \ + -deststorepass "${KAFKA_TLS_KEYSTORE_PASSWORD}" \ + -destkeystore "/certs/kafka.keystore.jks" \ + -noprompt + # Create JKS truststore from CA cert + keytool -keystore /certs/kafka.truststore.jks -alias CARoot -import -file /certs/ca.crt -storepass "${KAFKA_TLS_TRUSTSTORE_PASSWORD}" -noprompt + # Remove extra files + rm -f "/certs/kafka.keystore.p12" "/certs/tls.crt" "/certs/tls.key" "/certs/ca.crt" + elif [[ "${KAFKA_TLS_TYPE}" = "JKS" ]]; then + if [[ -f "/mounted-certs/kafka-${POD_ROLE}-${POD_ID}.keystore.jks" ]]; then + cp "/mounted-certs/kafka-${POD_ROLE}-${POD_ID}.keystore.jks" /certs/kafka.keystore.jks + elif [[ -f /mounted-certs/kafka.keystore.jks ]]; then + cp /mounted-certs/kafka.keystore.jks /certs/kafka.keystore.jks + else + error "Keystore file not found" + fi + + if [[ -f {{ printf "/mounted-certs/%s" ( default "kafka.truststore.jks" .Values.tls.jksTruststoreKey) | quote }} ]]; then + cp {{ printf "/mounted-certs/%s" ( default "kafka.truststore.jks" .Values.tls.jksTruststoreKey) | quote }} /certs/kafka.truststore.jks + else + error "Truststore file not found" + fi + else + error "Invalid type ${KAFKA_TLS_TYPE}" + fi + + # Configure TLS password settings in Kafka configuration + [[ -n "${KAFKA_TLS_KEYSTORE_PASSWORD:-}" ]] && kafka_conf_set "$KAFKA_CONFIG_FILE" "ssl.keystore.password" "$KAFKA_TLS_KEYSTORE_PASSWORD" + [[ -n "${KAFKA_TLS_TRUSTSTORE_PASSWORD:-}" ]] && kafka_conf_set "$KAFKA_CONFIG_FILE" "ssl.truststore.password" "$KAFKA_TLS_TRUSTSTORE_PASSWORD" + [[ -n "${KAFKA_TLS_PEM_KEY_PASSWORD:-}" ]] && kafka_conf_set "$KAFKA_CONFIG_FILE" "ssl.key.password" "$KAFKA_TLS_PEM_KEY_PASSWORD" + # Avoid errors caused by previous checks + true + } + {{- end }} + {{- if and .Values.tls.zookeeper.enabled .Values.tls.zookeeper.existingSecret }} + configure_zookeeper_tls() { + # Remove previously existing keystores + rm -f /certs/zookeeper.keystore.jks /certs/zookeeper.truststore.jks + ZOOKEEPER_TRUSTSTORE={{ printf "/zookeeper-certs/%s" .Values.tls.zookeeper.existingSecretTruststoreKey | quote }} + ZOOKEEPER_KEYSTORE={{ printf "/zookeeper-certs/%s" .Values.tls.zookeeper.existingSecretKeystoreKey | quote }} + if [[ -f "$ZOOKEEPER_KEYSTORE" ]]; then + cp "$ZOOKEEPER_KEYSTORE" "/certs/zookeeper.keystore.jks" + else + error "Zookeeper keystore file not found" + fi + if [[ -f "$ZOOKEEPER_TRUSTSTORE" ]]; then + cp "$ZOOKEEPER_TRUSTSTORE" "/certs/zookeeper.truststore.jks" + else + error "Zookeeper keystore file not found" + fi + [[ -n "${KAFKA_ZOOKEEPER_TLS_KEYSTORE_PASSWORD:-}" ]] && kafka_conf_set "$KAFKA_CONFIG_FILE" "zookeeper.ssl.keystore.password" "${KAFKA_ZOOKEEPER_TLS_KEYSTORE_PASSWORD}" + [[ -n "${KAFKA_ZOOKEEPER_TLS_TRUSTSTORE_PASSWORD:-}" ]] && kafka_conf_set "$KAFKA_CONFIG_FILE" "zookeeper.ssl.truststore.password" "${KAFKA_ZOOKEEPER_TLS_TRUSTSTORE_PASSWORD}" + # Avoid errors caused by previous checks + true + } + {{- end }} + + {{- if (include "kafka.saslEnabled" .) }} + configure_kafka_sasl() { + + # Replace placeholders with passwords + {{- if regexFind "SASL" (upper .Values.listeners.interbroker.protocol) }} + replace_placeholder "interbroker-password-placeholder" "$KAFKA_INTER_BROKER_PASSWORD" + {{- end -}} + {{- if and .Values.kraft.enabled (regexFind "SASL" (upper .Values.listeners.controller.protocol)) }} + replace_placeholder "controller-password-placeholder" "$KAFKA_CONTROLLER_PASSWORD" + {{- end }} + {{- if (include "kafka.client.saslEnabled" .)}} + read -r -a passwords <<<"$(tr ',;' ' ' <<<"${KAFKA_CLIENT_PASSWORDS:-}")" + for ((i = 0; i < ${#passwords[@]}; i++)); do + replace_placeholder "password-placeholder-${i}" "${passwords[i]}" + done + {{- end }} + {{- if .Values.sasl.zookeeper.user }} + replace_placeholder "zookeeper-password-placeholder" "$KAFKA_ZOOKEEPER_PASSWORD" + {{- end }} + } + {{- end }} + + {{- if .Values.externalAccess.autoDiscovery.enabled }} + # Wait for autodiscovery to finish + if [[ "${EXTERNAL_ACCESS_ENABLED:-false}" =~ ^(yes|true)$ ]]; then + retry_while "test -f /shared/external-host.txt -o -f /shared/external-port.txt" || error "Timed out waiting for autodiscovery init-container" + fi + {{- end }} + + export KAFKA_CONFIG_FILE=/config/server.properties + cp /configmaps/server.properties $KAFKA_CONFIG_FILE + + # Get pod ID and role, last and second last fields in the pod name respectively + POD_ID=$(echo "$MY_POD_NAME" | rev | cut -d'-' -f 1 | rev) + POD_ROLE=$(echo "$MY_POD_NAME" | rev | cut -d'-' -f 2 | rev) + + # Configure node.id and/or broker.id + if [[ -f "/bitnami/kafka/data/meta.properties" ]]; then + if grep -q "broker.id" /bitnami/kafka/data/meta.properties; then + ID="$(grep "broker.id" /bitnami/kafka/data/meta.properties | awk -F '=' '{print $2}')" + {{- if or (not .Values.broker.zookeeperMigrationMode) (and (not .Values.zookeeper.enabled) (not .Values.externalZookeeper.servers)) }} + kafka_conf_set "$KAFKA_CONFIG_FILE" "node.id" "$ID" + {{- else }} + kafka_conf_set "$KAFKA_CONFIG_FILE" "broker.id" "$ID" + {{- end }} + else + ID="$(grep "node.id" /bitnami/kafka/data/meta.properties | awk -F '=' '{print $2}')" + kafka_conf_set "$KAFKA_CONFIG_FILE" "node.id" "$ID" + fi + else + ID=$((POD_ID + KAFKA_MIN_ID)) + {{- if .Values.kraft.enabled }} + kafka_conf_set "$KAFKA_CONFIG_FILE" "node.id" "$ID" + {{- end }} + {{- if or .Values.zookeeper.enabled .Values.externalZookeeper.servers }} + kafka_conf_set "$KAFKA_CONFIG_FILE" "broker.id" "$ID" + {{- end }} + fi + {{- if not .Values.listeners.advertisedListeners }} + replace_placeholder "advertised-address-placeholder" "${MY_POD_NAME}.{{ $fullname }}-${POD_ROLE}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}" + if [[ "${EXTERNAL_ACCESS_ENABLED:-false}" =~ ^(yes|true)$ ]]; then + configure_external_access + fi + {{- end }} + {{- if (include "kafka.sslEnabled" .) }} + configure_kafka_tls + {{- end }} + {{- if (include "kafka.saslEnabled" .) }} + configure_kafka_sasl + {{- end }} + {{- if and .Values.tls.zookeeper.enabled .Values.tls.zookeeper.existingSecret }} + configure_zookeeper_tls + {{- end }} + if [ -f /secret-config/server-secret.properties ]; then + append_file_to_kafka_conf /secret-config/server-secret.properties $KAFKA_CONFIG_FILE + fi + {{- include "common.tplvalues.render" ( dict "value" .Values.extraInit "context" $ ) | nindent 4 }} + diff --git a/manifest/helm-charts/infra/kafka/templates/secrets.yaml b/manifest/helm-charts/infra/kafka/templates/secrets.yaml new file mode 100644 index 000000000..2deddb12f --- /dev/null +++ b/manifest/helm-charts/infra/kafka/templates/secrets.yaml @@ -0,0 +1,121 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if (include "kafka.createSaslSecret" .) }} +{{- $secretName := printf "%s-user-passwords" (include "common.names.fullname" .) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secretName }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if (include "kafka.client.saslEnabled" .) }} + {{- $secretValue := "" }} + {{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .) $secretName).data }} + {{- if and $secretData (hasKey $secretData "client-passwords")}} + {{- $secretValue = index $secretData "client-passwords" }} + {{- end }} + {{- if or (empty $secretValue) (not (eq (len .Values.sasl.client.users) (len (splitList "," (b64dec $secretValue))))) }} + {{- $clientPasswords := .Values.sasl.client.passwords }} + {{- if empty $clientPasswords }} + {{- $clientPasswords = list }} + {{- range .Values.sasl.client.users }} + {{- $clientPasswords = append $clientPasswords (randAlphaNum 10) }} + {{- end }} + {{- end }} + {{- $secretValue = join "," $clientPasswords | toString | b64enc }} + {{- end }} + client-passwords: {{ $secretValue | quote }} + system-user-password: {{ index (splitList "," (b64dec $secretValue)) 0 | b64enc | quote }} + {{- end }} + {{- if or .Values.sasl.zookeeper.user .Values.zookeeper.auth.client.enabled }} + zookeeper-password: {{ include "common.secrets.passwords.manage" (dict "secret" $secretName "key" "zookeeper-password" "providedValues" (list "sasl.zookeeper.password" "zookeeper.auth.client.clientPassword") "failOnNew" false "context" $) }} + {{- end }} + {{- if regexFind "SASL" (upper .Values.listeners.interbroker.protocol) }} + inter-broker-password: {{ include "common.secrets.passwords.manage" (dict "secret" $secretName "key" "inter-broker-password" "providedValues" (list "sasl.interbroker.password") "failOnNew" false "context" $) }} + {{- end }} + {{- if regexFind "SASL" (upper .Values.listeners.controller.protocol) }} + controller-password: {{ include "common.secrets.passwords.manage" (dict "secret" $secretName "key" "controller-password" "providedValues" (list "sasl.controller.password") "failOnNew" false "context" $) }} + {{- end }} +{{- if .Values.serviceBindings.enabled }} + +{{- if (include "kafka.client.saslEnabled" .) }} +{{- $host := list }} +{{- $port := .Values.service.ports.client }} +{{- $bootstrapServers := list }} +{{- if not .Values.controller.controllerOnly }} + {{- range $i, $e := until (int .Values.controller.replicaCount) }} + {{- $controller := printf "%s-controller-%s.%s-headless.%s.svc.%s" (include "common.names.fullname" $) (print $i) (include "common.names.fullname" $) $.Release.Namespace $.Values.clusterDomain }} + {{- $host = append $host $controller }} + {{- $bootstrapServers = append $bootstrapServers (printf "%s:%s" $controller .Values.service.ports.client) }} + {{- end }} +{{- end }} +{{- range $i, $e := until (int .Values.broker.replicaCount) }} + {{- $broker := printf "%s-broker-%s.%s-headless.%s.svc.%s" (include "common.names.fullname" $) (print $i) (include "common.names.fullname" $) $.Release.Namespace $.Values.clusterDomain }} + {{- $host = append $host $broker }} + {{- $bootstrapServers = append $bootstrapServers (printf "%s:%s" $broker .Values.service.ports.client) }} +{{- end }} +{{- range $i, $e := until (len .Values.sasl.client.users) }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" $ }}-svcbind-user-{{ $i }} + namespace: {{ $.Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $.Values.commonLabels "context" $ ) | nindent 4 }} + {{- if $.Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: servicebinding.io/kafka +data: + provider: {{ print "bitnami" | b64enc | quote }} + type: {{ print "kafka" | b64enc | quote }} + username: {{ index .Values.sasl.client.users $i | b64enc | quote }} + password: {{ index .Values.sasl.client.passwords $i | b64enc | quote }} + host: {{ join "," $host | b64enc | quote }} + port: {{ print $port | b64enc | quote }} + bootstrap-servers: {{ join "," $bootstrapServers | b64enc | quote }} +{{- end }} +{{- else }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }}-svcbind + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: servicebinding.io/kafka +data: + provider: {{ print "bitnami" | b64enc | quote }} + type: {{ print "kafka" | b64enc | quote }} + host: {{ join "," $host | b64enc | quote }} + port: {{ print $port | b64enc | quote }} + bootstrap-servers: {{ join "," $bootstrapServers | b64enc | quote }} +{{- end }} +{{- end }} +{{- end }} +{{- if .Values.kraft.enabled }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-kraft-cluster-id" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + kraft-cluster-id: {{ include "common.secrets.passwords.manage" (dict "secret" (printf "%s-kraft-cluster-id" (include "common.names.fullname" .)) "key" "kraft-cluster-id" "providedValues" (list "kraft.clusterId") "length" 22 "context" $) }} +{{- end }} diff --git a/manifest/helm-charts/infra/kafka/templates/svc.yaml b/manifest/helm-charts/infra/kafka/templates/svc.yaml new file mode 100644 index 000000000..a37ba6274 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/templates/svc.yaml @@ -0,0 +1,63 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +apiVersion: v1 +kind: Service +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if or .Values.service.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.service.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.service.type }} + {{- if and .Values.service.clusterIP (eq .Values.service.type "ClusterIP") }} + clusterIP: {{ .Values.service.clusterIP }} + {{- end }} + {{- if or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerSourceRanges)) }} + loadBalancerSourceRanges: {{ .Values.service.loadBalancerSourceRanges }} + {{- end }} + {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if .Values.service.sessionAffinity }} + sessionAffinity: {{ .Values.service.sessionAffinity }} + {{- end }} + {{- if .Values.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + ports: + - name: tcp-client + port: {{ .Values.service.ports.client }} + protocol: TCP + targetPort: client + {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.client)) }} + nodePort: {{ .Values.service.nodePorts.client }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if .Values.externalAccess.enabled }} + - name: tcp-external + port: {{ .Values.service.ports.external }} + protocol: TCP + targetPort: external + {{- if (not (empty .Values.service.nodePorts.external)) }} + nodePort: {{ .Values.service.nodePorts.external }} + {{- end }} + {{- end }} + {{- if .Values.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: kafka + {{- if and .Values.kraft.enabled .Values.controller.controllerOnly }} + app.kubernetes.io/component: broker + {{- end }} diff --git a/manifest/helm-charts/infra/kafka/templates/tls-secret.yaml b/manifest/helm-charts/infra/kafka/templates/tls-secret.yaml new file mode 100644 index 000000000..ecf2362d9 --- /dev/null +++ b/manifest/helm-charts/infra/kafka/templates/tls-secret.yaml @@ -0,0 +1,82 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if (include "kafka.createTlsSecret" .) }} +{{- $releaseNamespace := include "common.names.namespace" . }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $fullname := include "common.names.fullname" . }} +{{- $secretName := printf "%s-tls" (include "common.names.fullname" .) }} +{{- $altNames := list (printf "%s.%s.svc.%s" $fullname $releaseNamespace $clusterDomain) (printf "%s.%s" $fullname $releaseNamespace) $fullname }} +{{- $replicaCount := int .Values.broker.replicaCount }} +{{- range $i := until $replicaCount }} +{{- $replicaHost := printf "%s-broker-%d.%s-broker-headless" $fullname $i $fullname }} +{{- $altNames = append $altNames (printf "%s.%s.svc.%s" $replicaHost $releaseNamespace $clusterDomain) }} +{{- $altNames = append $altNames (printf "%s.%s" $replicaHost $releaseNamespace) }} +{{- $altNames = append $altNames $replicaHost }} +{{- end }} +{{- $replicaCount := int .Values.controller.replicaCount }} +{{- range $i := until $replicaCount }} +{{- $replicaHost := printf "%s-controller-%d.%s-controller-headless" $fullname $i $fullname }} +{{- $altNames = append $altNames (printf "%s.%s.svc.%s" $replicaHost $releaseNamespace $clusterDomain) }} +{{- $altNames = append $altNames (printf "%s.%s" $replicaHost $releaseNamespace) }} +{{- $altNames = append $altNames $replicaHost }} +{{- end }} +{{- $ca := genCA "kafka-ca" 365 }} +{{- $cert := genSignedCert $fullname nil $altNames 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secretName }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + kafka.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "kafka.crt" "defaultValue" $cert.Cert "context" $) }} + kafka.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "kafka.key" "defaultValue" $cert.Key "context" $) }} + kafka-ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "kafka-ca.crt" "defaultValue" $ca.Cert "context" $) }} +--- +{{- end }} +{{- if (include "kafka.createTlsPasswordsSecret" .) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-tls-passwords" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{ .Values.tls.passwordsSecretKeystoreKey }}: {{ include "common.secrets.passwords.manage" (dict "secret" (printf "%s-tls-passwords" (include "common.names.fullname" .)) "key" .Values.tls.passwordsSecretKeystoreKey "providedValues" (list "tls.keystorePassword") "context" $) }} + {{ .Values.tls.passwordsSecretTruststoreKey }}: {{ include "common.secrets.passwords.manage" (dict "secret" (printf "%s-tls-passwords" (include "common.names.fullname" .)) "key" .Values.tls.passwordsSecretTruststoreKey "providedValues" (list "tls.truststorePassword") "context" $) }} + {{- if .Values.tls.keyPassword }} + {{ default "key-password" .Values.tls.passwordsSecretPemPasswordKey }}: {{ .Values.tls.keyPassword | b64enc | quote }} + {{- end }} +--- +{{- end }} +{{- if (include "kafka.zookeeper.createTlsPasswordsSecret" .) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-zookeeper-tls-passwords" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if .Values.tls.zookeeper.keystorePassword }} + {{ .Values.tls.zookeeper.passwordsSecretKeystoreKey }}: {{ .Values.tls.zookeeper.keystorePassword | b64enc | quote }} + {{- end }} + {{- if .Values.tls.zookeeper.truststorePassword }} + {{ .Values.tls.zookeeper.passwordsSecretTruststoreKey }}: {{ .Values.tls.zookeeper.truststorePassword | b64enc | quote }} + {{- end }} +--- +{{- end }} diff --git a/manifest/helm-charts/infra/kafka/values.yaml b/manifest/helm-charts/infra/kafka/values.yaml new file mode 100644 index 000000000..8198c07fa --- /dev/null +++ b/manifest/helm-charts/infra/kafka/values.yaml @@ -0,0 +1,2352 @@ +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 + +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass +## + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.storageClass Global StorageClass for Persistent Volume(s) +## +global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + storageClass: "" + +## @section Common parameters +## + +## @param kubeVersion Override Kubernetes version +## +kubeVersion: "" +## @param nameOverride String to partially override common.names.fullname +## +nameOverride: "" +## @param fullnameOverride String to fully override common.names.fullname +## +fullnameOverride: "" +## @param clusterDomain Default Kubernetes cluster domain +## +clusterDomain: cluster.local +## @param commonLabels Labels to add to all deployed objects +## +commonLabels: {} +## @param commonAnnotations Annotations to add to all deployed objects +## +commonAnnotations: {} +## @param extraDeploy Array of extra objects to deploy with the release +## +extraDeploy: [] +## @param serviceBindings.enabled Create secret for service binding (Experimental) +## Ref: https://servicebinding.io/service-provider/ +## +serviceBindings: + enabled: false +## Enable diagnostic mode in the statefulset +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the statefulset + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the statefulset + ## + args: + - infinity + +## @section Kafka parameters +## + +## Bitnami Kafka image version +## ref: https://hub.docker.com/r/bitnami/kafka/tags/ +## @param image.registry Kafka image registry +## @param image.repository Kafka image repository +## @param image.tag Kafka image tag (immutable tags are recommended) +## @param image.digest Kafka image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag +## @param image.pullPolicy Kafka image pull policy +## @param image.pullSecrets Specify docker-registry secret names as an array +## @param image.debug Specify if debug values should be set +## +image: + registry: docker.io + repository: bitnami/kafka + tag: 3.5.1-debian-11-r44 + digest: "" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Set to true if you would like to see extra information on logs + ## + debug: false +## @param extraInit Additional content for the kafka init script, rendered as a template. +## +extraInit: "" +## @param config Configuration file for Kafka, rendered as a template. Auto-generated based on chart values when not specified. +## @param existingConfigmap ConfigMap with Kafka Configuration +## NOTE: This will override the configuration based on values, please act carefully +## If both are set, the existingConfigMap will be used. +## +config: "" +existingConfigmap: "" +## @param extraConfig Additional configuration to be appended at the end of the generated Kafka configuration file. +## +extraConfig: "" +## @param secretConfig Additional configuration to be appended at the end of the generated Kafka configuration file. +## This value will be stored in a secret. +## +secretConfig: "" +## @param existingSecretConfig Secret with additonal configuration that will be appended to the end of the generated Kafka configuration file +## The key for the configuration should be: server-secret.properties +## NOTE: This will override secretConfig value +## +existingSecretConfig: "" +## @param log4j An optional log4j.properties file to overwrite the default of the Kafka brokers +## An optional log4j.properties file to overwrite the default of the Kafka brokers +## ref: https://github.com/apache/kafka/blob/trunk/config/log4j.properties +## +log4j: "" +## @param existingLog4jConfigMap The name of an existing ConfigMap containing a log4j.properties file +## The name of an existing ConfigMap containing a log4j.properties file +## NOTE: this will override `log4j` +## +existingLog4jConfigMap: "" +## @param heapOpts Kafka Java Heap size +## +heapOpts: -Xmx1024m -Xms1024m +## @param interBrokerProtocolVersion Override the setting 'inter.broker.protocol.version' during the ZK migration. +## Ref. https://docs.confluent.io/platform/current/installation/migrate-zk-kraft.html +## +interBrokerProtocolVersion: "" +## Kafka listeners configuration +## +listeners: + ## @param listeners.client.name Name for the Kafka client listener + ## @param listeners.client.containerPort Port for the Kafka client listener + ## @param listeners.client.protocol Security protocol for the Kafka client listener. Allowed values are 'PLAINTEXT', 'SASL_PLAINTEXT', 'SASL_SSL' and 'SSL' + ## @param listeners.client.sslClientAuth Optional. If SASL_SSL is enabled, configure mTLS TLS authentication type. If SSL protocol is enabled, overrides tls.authType for this listener. Allowed values are 'none', 'requested' and 'required' + client: + containerPort: 9092 + protocol: SASL_PLAINTEXT + name: CLIENT + sslClientAuth: "" + ## @param listeners.controller.name Name for the Kafka controller listener + ## @param listeners.controller.containerPort Port for the Kafka controller listener + ## @param listeners.controller.protocol Security protocol for the Kafka controller listener. Allowed values are 'PLAINTEXT', 'SASL_PLAINTEXT', 'SASL_SSL' and 'SSL' + ## @param listeners.controller.sslClientAuth Optional. If SASL_SSL is enabled, configure mTLS TLS authentication type. If SSL protocol is enabled, overrides tls.authType for this listener. Allowed values are 'none', 'requested' and 'required' + ## Ref: https://cwiki.apache.org/confluence/display/KAFKA/KIP-684+-+Support+mutual+TLS+authentication+on+SASL_SSL+listeners + controller: + name: CONTROLLER + containerPort: 9093 + protocol: SASL_PLAINTEXT + sslClientAuth: "" + ## @param listeners.interbroker.name Name for the Kafka inter-broker listener + ## @param listeners.interbroker.containerPort Port for the Kafka inter-broker listener + ## @param listeners.interbroker.protocol Security protocol for the Kafka inter-broker listener. Allowed values are 'PLAINTEXT', 'SASL_PLAINTEXT', 'SASL_SSL' and 'SSL' + ## @param listeners.interbroker.sslClientAuth Optional. If SASL_SSL is enabled, configure mTLS TLS authentication type. If SSL protocol is enabled, overrides tls.authType for this listener. Allowed values are 'none', 'requested' and 'required' + interbroker: + containerPort: 9094 + protocol: SASL_PLAINTEXT + name: INTERNAL + sslClientAuth: "" + ## @param listeners.external.containerPort Port for the Kafka external listener + ## @param listeners.external.protocol Security protocol for the Kafka external listener. . Allowed values are 'PLAINTEXT', 'SASL_PLAINTEXT', 'SASL_SSL' and 'SSL' + ## @param listeners.external.name Name for the Kafka external listener + ## @param listeners.external.sslClientAuth Optional. If SASL_SSL is enabled, configure mTLS TLS authentication type. If SSL protocol is enabled, overrides tls.sslClientAuth for this listener. Allowed values are 'none', 'requested' and 'required' + external: + containerPort: 9095 + protocol: SASL_PLAINTEXT + name: EXTERNAL + sslClientAuth: "" + ## @param listeners.extraListeners Array of listener objects to be appended to already existing listeners + ## E.g. + ## extraListeners: + ## - name: CUSTOM + ## containerPort: 9097 + ## protocol: SASL_PLAINTEXT + ## sslClientAuth: "" + ## + extraListeners: [] + ## NOTE: If set, below values will override configuration set using the above values (extraListeners.*, controller.*, interbroker.*, client.* and external.*) + ## @param listeners.overrideListeners Overrides the Kafka 'listeners' configuration setting. + ## @param listeners.advertisedListeners Overrides the Kafka 'advertised.listener' configuration setting. + ## @param listeners.securityProtocolMap Overrides the Kafka 'security.protocol.map' configuration setting. + overrideListeners: "" + advertisedListeners: "" + securityProtocolMap: "" + +## @section Kafka SASL parameters +## Kafka SASL settings for authentication, required if SASL_PLAINTEXT or SASL_SSL listeners are configured +## +sasl: + ## @param sasl.enabledMechanisms Comma-separated list of allowed SASL mechanisms when SASL listeners are configured. Allowed types: `PLAIN`, `SCRAM-SHA-256`, `SCRAM-SHA-512` + ## NOTE: At the moment, Kafka Raft mode does not support SCRAM, that is why only PLAIN is configured. + ## + enabledMechanisms: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512 + ## @param sasl.interBrokerMechanism SASL mechanism for inter broker communication. + ## + interBrokerMechanism: PLAIN + ## @param sasl.controllerMechanism SASL mechanism for controller communications. + ## + controllerMechanism: PLAIN + ## Credentials for inter-broker communications. + ## @param sasl.interbroker.user Username for inter-broker communications when SASL is enabled + ## @param sasl.interbroker.password Password for inter-broker communications when SASL is enabled. If not set and SASL is enabled for the controller listener, a random password will be generated. + ## + interbroker: + user: inter_broker_user + password: "" + ## Credentials for controller communications. + ## @param sasl.controller.user Username for controller communications when SASL is enabled + ## @param sasl.controller.password Password for controller communications when SASL is enabled. If not set and SASL is enabled for the inter-broker listener, a random password will be generated. + ## + controller: + user: controller_user + password: "" + ## Credentials for client communications. + ## @param sasl.client.users Comma-separated list of usernames for client communications when SASL is enabled + ## @param sasl.client.passwords Comma-separated list of passwords for client communications when SASL is enabled, must match the number of client.users + ## + client: + users: + - user1 + passwords: "" + ## Credentials for Zookeeper communications. + ## @param sasl.zookeeper.user Username for zookeeper communications when SASL is enabled. + ## @param sasl.zookeeper.password Password for zookeeper communications when SASL is enabled. + ## + zookeeper: + user: "" + password: "" + ## @param sasl.existingSecret Name of the existing secret containing credentials for clientUsers, interBrokerUser, controllerUser and zookeeperUser + ## Create this secret running the command below where SECRET_NAME is the name of the secret you want to create: + ## kubectl create secret generic SECRET_NAME --from-literal=client-passwords=CLIENT_PASSWORD1,CLIENT_PASSWORD2 --from-literal=inter-broker-password=INTER_BROKER_PASSWORD --from-literal=controller-password=CONTROLLER_PASSWORD --from-literal=zookeeper-password=ZOOKEEPER_PASSWORD + ## + existingSecret: "" + +## @section Kafka TLS parameters +## Kafka TLS settings, required if SSL or SASL_SSL listeners are configured +## +tls: + ## @param tls.type Format to use for TLS certificates. Allowed types: `JKS` and `PEM` + ## + type: JKS + ## @param tls.pemChainIncluded Flag to denote that the Certificate Authority (CA) certificates are bundled with the endpoint cert. + ## Certificates must be in proper order, where the top certificate is the leaf and the bottom certificate is the top-most intermediate CA. + ## + pemChainIncluded: false + ## @param tls.existingSecret Name of the existing secret containing the TLS certificates for the Kafka nodes. + ## When using 'jks' format for certificates, each secret should contain a truststore and a keystore. + ## Create these secrets following the steps below: + ## 1) Generate your truststore and keystore files. Helpful script: https://raw.githubusercontent.com/confluentinc/confluent-platform-security-tools/master/kafka-generate-ssl.sh + ## 2) Rename your truststore to `kafka.truststore.jks`. + ## 3) Rename your keystores to `kafka--X.keystore.jks` where X is the replica number of the . + ## 4) Run the command below one time per broker to create its associated secret (SECRET_NAME_X is the name of the secret you want to create): + ## kubectl create secret generic SECRET_NAME_0 --from-file=kafka.truststore.jks=./kafka.truststore.jks \ + ## --from-file=kafka-controller-0.keystore.jks=./kafka-controller-0.keystore.jks --from-file=kafka-broker-0.keystore.jks=./kafka-broker-0.keystore.jks ... + ## + ## NOTE: Alternatively, a single keystore can be provided for all nodes under the key 'kafka.keystore.jks', this keystore will be used by all nodes unless overridden by the 'kafka--X.keystore.jks' file + ## + ## When using 'pem' format for certificates, each secret should contain a public CA certificate, a public certificate and one private key. + ## Create these secrets following the steps below: + ## 1) Create a certificate key and signing request per Kafka broker, and sign the signing request with your CA + ## 2) Rename your CA file to `kafka.ca.crt`. + ## 3) Rename your certificates to `kafka-X.tls.crt` where X is the ID of each Kafka broker. + ## 3) Rename your keys to `kafka-X.tls.key` where X is the ID of each Kafka broker. + ## 4) Run the command below one time per broker to create its associated secret (SECRET_NAME_X is the name of the secret you want to create): + ## kubectl create secret generic SECRET_NAME_0 --from-file=kafka-ca.crt=./kafka-ca.crt --from-file=kafka-controller-0.crt=./kafka-controller-0.crt --from-file=kafka-controller-0.key=./kafka-controller-0.key \ + ## --from-file=kafka-broker-0.crt=./kafka-broker-0.crt --from-file=kafka-broker-0.key=./kafka-broker-0.key ... + ## + ## NOTE: Alternatively, a single key and certificate can be provided for all nodes under the keys 'kafka.crt' and 'kafka.key'. These certificates will be used by all nodes unless overridden by the 'kafka--X.key' and 'kafka--X.crt' files + ## NOTE: Alternatively, a single key and certificate can be provided for all nodes under the keys 'tls.crt' and 'tls.key'. These certificates will be used by all nodes unless overridden by the 'kafka--X.key' and 'kafka--X.crt' files + ## + existingSecret: "" + ## @param tls.autoGenerated Generate automatically self-signed TLS certificates for Kafka brokers. Currently only supported if `tls.type` is `PEM` + ## Note: ignored when using 'jks' format or `tls.existingSecret` is not empty + ## + autoGenerated: false + ## @param tls.passwordsSecret Name of the secret containing the password to access the JKS files or PEM key when they are password-protected. (`key`: `password`) + ## + passwordsSecret: "" + ## @param tls.passwordsSecretKeystoreKey The secret key from the tls.passwordsSecret containing the password for the Keystore. + ## + passwordsSecretKeystoreKey: keystore-password + ## @param tls.passwordsSecretTruststoreKey The secret key from the tls.passwordsSecret containing the password for the Truststore. + ## + passwordsSecretTruststoreKey: truststore-password + ## @param tls.passwordsSecretPemPasswordKey The secret key from the tls.passwordsSecret containing the password for the PEM key inside 'tls.passwordsSecret'. + ## + passwordsSecretPemPasswordKey: "" + ## @param tls.keystorePassword Password to access the JKS keystore when it is password-protected. Ignored when 'tls.passwordsSecret' is provided. + ## When using tls.type=PEM, the generated keystore will use this password or randomly generate one. + ## + keystorePassword: "" + ## @param tls.truststorePassword Password to access the JKS truststore when it is password-protected. Ignored when 'tls.passwordsSecret' is provided. + ## When using tls.type=PEM, the generated keystore will use this password or randomly generate one. + ## + truststorePassword: "" + ## @param tls.keyPassword Password to access the PEM key when it is password-protected. + ## Note: ignored when using 'tls.passwordsSecret' + ## + keyPassword: "" + ## @param tls.jksTruststoreSecret Name of the existing secret containing your truststore if truststore not existing or different from the one in the `tls.existingSecret` + ## Note: ignored when using 'pem' format for certificates. + ## + jksTruststoreSecret: "" + ## @param tls.jksTruststoreKey The secret key from the `tls.existingSecret` or `tls.jksTruststoreSecret` containing the truststore + ## Note: ignored when using 'pem' format for certificates. + ## + jksTruststoreKey: "" + ## @param tls.endpointIdentificationAlgorithm The endpoint identification algorithm to validate server hostname using server certificate + ## Disable server host name verification by setting it to an empty string. + ## ref: https://docs.confluent.io/current/kafka/authentication_ssl.html#optional-settings + ## + endpointIdentificationAlgorithm: https + ## @param tls.sslClientAuth Sets the default value for the ssl.client.auth Kafka setting. + ## ref: https://docs.confluent.io/current/kafka/authentication_ssl.html#optional-settings + ## + sslClientAuth: "required" + ## Zookeeper TLS connection configuration for Kafka + ## + zookeeper: + ## @param tls.zookeeper.enabled Enable TLS for Zookeeper client connections. + ## + enabled: false + ## @param tls.zookeeper.verifyHostname Hostname validation. + ## + verifyHostname: true + ## @param tls.zookeeper.existingSecret Name of the existing secret containing the TLS certificates for ZooKeeper client communications. + ## + existingSecret: "" + ## @param tls.zookeeper.existingSecretKeystoreKey The secret key from the tls.zookeeper.existingSecret containing the Keystore. + ## + existingSecretKeystoreKey: zookeeper.keystore.jks + ## @param tls.zookeeper.existingSecretTruststoreKey The secret key from the tls.zookeeper.existingSecret containing the Truststore. + ## + existingSecretTruststoreKey: zookeeper.truststore.jks + ## @param tls.zookeeper.passwordsSecret Existing secret containing Keystore and Truststore passwords. + ## + passwordsSecret: "" + ## @param tls.zookeeper.passwordsSecretKeystoreKey The secret key from the tls.zookeeper.passwordsSecret containing the password for the Keystore. + ## If no keystore password is included in the passwords secret, set this value to an empty string. + ## + passwordsSecretKeystoreKey: keystore-password + ## @param tls.zookeeper.passwordsSecretTruststoreKey The secret key from the tls.zookeeper.passwordsSecret containing the password for the Truststore. + ## If no truststore password is included in the passwords secret, set this value to an empty string. + ## + passwordsSecretTruststoreKey: truststore-password + ## @param tls.zookeeper.keystorePassword Password to access the JKS keystore when it is password-protected. Ignored when 'tls.passwordsSecret' is provided. + ## When using tls.type=PEM, the generated keystore will use this password or randomly generate one. + ## + keystorePassword: "" + ## @param tls.zookeeper.truststorePassword Password to access the JKS truststore when it is password-protected. Ignored when 'tls.passwordsSecret' is provided. + ## When using tls.type=PEM, the generated keystore will use this password or randomly generate one. + ## + truststorePassword: "" + +## @param extraEnvVars Extra environment variables to add to Kafka pods +## ref: https://github.com/bitnami/containers/tree/main/bitnami/kafka#configuration +## e.g: +## extraEnvVars: +## - name: KAFKA_CFG_BACKGROUND_THREADS +## value: "10" +## +extraEnvVars: [] +## @param extraEnvVarsCM ConfigMap with extra environment variables +## +extraEnvVarsCM: "" +## @param extraEnvVarsSecret Secret with extra environment variables +## +extraEnvVarsSecret: "" +## @param extraVolumes Optionally specify extra list of additional volumes for the Kafka pod(s) +## e.g: +## extraVolumes: +## - name: kafka-jaas +## secret: +## secretName: kafka-jaas +## +extraVolumes: [] +## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Kafka container(s) +## extraVolumeMounts: +## - name: kafka-jaas +## mountPath: /bitnami/kafka/config/kafka_jaas.conf +## subPath: kafka_jaas.conf +## +extraVolumeMounts: [] +## @param sidecars Add additional sidecar containers to the Kafka pod(s) +## e.g: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +sidecars: [] +## @param initContainers Add additional Add init containers to the Kafka pod(s) +## e.g: +## initContainers: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +initContainers: [] + +## @section Controller-eligible statefulset parameters +## +controller: + ## @param controller.replicaCount Number of Kafka controller-eligible nodes + ## Ignore this section if running in Zookeeper mode. + ## + replicaCount: 3 + ## @param controller.controllerOnly If set to true, controller nodes will be deployed as dedicated controllers, instead of controller+broker processes. + ## + controllerOnly: false + ## @param controller.minId Minimal node.id values for controller-eligible nodes. Do not change after first initialization. + ## Broker-only id increment their ID starting at this minimal value. + ## We recommend setting this this value high enough, as IDs under this value will be used by controller-elegible nodes + ## + minId: 0 + ## @param controller.zookeeperMigrationMode Set to true to deploy cluster controller quorum + ## This allows configuring both kraft and zookeeper modes simultaneously in order to perform the migration of the Kafka metadata. + ## Ref. https://docs.confluent.io/platform/current/installation/migrate-zk-kraft.html + ## + zookeeperMigrationMode: false + ## @param controller.config Configuration file for Kafka controller-eligible nodes, rendered as a template. Auto-generated based on chart values when not specified. + ## @param controller.existingConfigmap ConfigMap with Kafka Configuration for controller-eligible nodes. + ## NOTE: This will override the configuration based on values, please act carefully + ## If both are set, the existingConfigMap will be used. + ## + config: "" + existingConfigmap: "" + ## @param controller.extraConfig Additional configuration to be appended at the end of the generated Kafka controller-eligible nodes configuration file. + ## + extraConfig: "" + ## @param controller.secretConfig Additional configuration to be appended at the end of the generated Kafka controller-eligible nodes configuration file. + ## This value will be stored in a secret. + ## + secretConfig: "" + ## @param controller.existingSecretConfig Secret with additonal configuration that will be appended to the end of the generated Kafka controller-eligible nodes configuration file + ## The key for the configuration should be: server-secret.properties + ## NOTE: This will override controller.secretConfig value + ## + existingSecretConfig: "" + ## @param controller.heapOpts Kafka Java Heap size for controller-eligible nodes + ## + heapOpts: -Xmx1024m -Xms1024m + ## @param controller.command Override Kafka container command + ## + command: [] + ## @param controller.args Override Kafka container arguments + ## + args: [] + ## @param controller.extraEnvVars Extra environment variables to add to Kafka pods + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/kafka#configuration + ## e.g: + ## extraEnvVars: + ## - name: KAFKA_CFG_BACKGROUND_THREADS + ## value: "10" + ## + extraEnvVars: [] + ## @param controller.extraEnvVarsCM ConfigMap with extra environment variables + ## + extraEnvVarsCM: "" + ## @param controller.extraEnvVarsSecret Secret with extra environment variables + ## + extraEnvVarsSecret: "" + ## @param controller.extraContainerPorts Kafka controller-eligible extra containerPorts. + ## + extraContainerPorts: [] + ## Configure extra options for Kafka containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param controller.livenessProbe.enabled Enable livenessProbe on Kafka containers + ## @param controller.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param controller.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param controller.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param controller.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param controller.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 10 + timeoutSeconds: 5 + failureThreshold: 3 + periodSeconds: 10 + successThreshold: 1 + ## @param controller.readinessProbe.enabled Enable readinessProbe on Kafka containers + ## @param controller.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param controller.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param controller.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param controller.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param controller.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + failureThreshold: 6 + timeoutSeconds: 5 + periodSeconds: 10 + successThreshold: 1 + ## @param controller.startupProbe.enabled Enable startupProbe on Kafka containers + ## @param controller.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param controller.startupProbe.periodSeconds Period seconds for startupProbe + ## @param controller.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param controller.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param controller.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + ## @param controller.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param controller.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param controller.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param controller.lifecycleHooks lifecycleHooks for the Kafka container to automate configuration before or after startup + ## + lifecycleHooks: {} + ## Kafka resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param controller.resources.limits The resources limits for the container + ## @param controller.resources.requests The requested resources for the container + ## + resources: + limits: {} + requests: {} + ## Kafka pods' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param controller.podSecurityContext.enabled Enable security context for the pods + ## @param controller.podSecurityContext.fsGroup Set Kafka pod's Security Context fsGroup + ## @param controller.podSecurityContext.seccompProfile.type Set Kafka pods's Security Context seccomp profile + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + seccompProfile: + type: "RuntimeDefault" + ## Kafka containers' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param controller.containerSecurityContext.enabled Enable Kafka containers' Security Context + ## @param controller.containerSecurityContext.runAsUser Set Kafka containers' Security Context runAsUser + ## @param controller.containerSecurityContext.runAsNonRoot Set Kafka containers' Security Context runAsNonRoot + ## @param controller.containerSecurityContext.allowPrivilegeEscalation Force the child process to be run as non-privileged + ## @param controller.containerSecurityContext.readOnlyRootFilesystem Allows the pod to mount the RootFS as ReadOnly only + ## @param controller.containerSecurityContext.capabilities.drop Set Kafka containers' server Security Context capabilities to be dropped + ## e.g: + ## containerSecurityContext: + ## enabled: true + ## capabilities: + ## drop: ["NET_RAW"] + ## readOnlyRootFilesystem: true + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: ["ALL"] + ## @param controller.hostAliases Kafka pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param controller.hostNetwork Specify if host network should be enabled for Kafka pods + ## + hostNetwork: false + ## @param controller.hostIPC Specify if host IPC should be enabled for Kafka pods + ## + hostIPC: false + ## @param controller.podLabels Extra labels for Kafka pods + ## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param controller.podAnnotations Extra annotations for Kafka pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param controller.podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param controller.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node affinity preset + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param controller.nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param controller.nodeAffinityPreset.key Node label key to match Ignored if `affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param controller.nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param controller.affinity Affinity for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param controller.nodeSelector Node labels for pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param controller.tolerations Tolerations for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param controller.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param controller.terminationGracePeriodSeconds Seconds the pod needs to gracefully terminate + ## ref: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#hook-handler-execution + ## + terminationGracePeriodSeconds: "" + ## @param controller.podManagementPolicy StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel + ## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pod-management-policy + ## + podManagementPolicy: Parallel + ## @param controller.priorityClassName Name of the existing priority class to be used by kafka pods + ## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + ## + priorityClassName: "" + ## @param controller.runtimeClassName Name of the runtime class to be used by pod(s) + ## ref: https://kubernetes.io/docs/concepts/containers/runtime-class/ + ## + runtimeClassName: "" + ## @param controller.enableServiceLinks Whether information about services should be injected into pod's environment variable + ## The environment variables injected by service links are not used, but can lead to slow kafka boot times or slow running of the scripts when there are many services in the current namespace. + ## If you experience slow pod startups or slow running of the scripts you probably want to set this to `false`. + ## + enableServiceLinks: true + ## @param controller.schedulerName Name of the k8s scheduler (other than default) + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param controller.updateStrategy.type Kafka statefulset strategy type + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + ## @param controller.extraVolumes Optionally specify extra list of additional volumes for the Kafka pod(s) + ## e.g: + ## extraVolumes: + ## - name: kafka-jaas + ## secret: + ## secretName: kafka-jaas + ## + extraVolumes: [] + ## @param controller.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Kafka container(s) + ## extraVolumeMounts: + ## - name: kafka-jaas + ## mountPath: /bitnami/kafka/config/kafka_jaas.conf + ## subPath: kafka_jaas.conf + ## + extraVolumeMounts: [] + ## @param controller.sidecars Add additional sidecar containers to the Kafka pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param controller.initContainers Add additional Add init containers to the Kafka pod(s) + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + initContainers: [] + ## Kafka Pod Disruption Budget + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + ## @param controller.pdb.create Deploy a pdb object for the Kafka pod + ## @param controller.pdb.minAvailable Maximum number/percentage of unavailable Kafka replicas + ## @param controller.pdb.maxUnavailable Maximum number/percentage of unavailable Kafka replicas + ## + pdb: + create: false + minAvailable: "" + maxUnavailable: 1 + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param controller.persistence.enabled Enable Kafka data persistence using PVC, note that ZooKeeper persistence is unaffected + ## + enabled: true + ## @param controller.persistence.existingClaim A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template + ## + existingClaim: "" + ## @param controller.persistence.storageClass PVC Storage Class for Kafka data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. + ## + storageClass: "" + ## @param controller.persistence.accessModes Persistent Volume Access Modes + ## + accessModes: + - ReadWriteOnce + ## @param controller.persistence.size PVC Storage Request for Kafka data volume + ## + size: 8Gi + ## @param controller.persistence.annotations Annotations for the PVC + ## + annotations: {} + ## @param controller.persistence.labels Labels for the PVC + ## + labels: {} + ## @param controller.persistence.selector Selector to match an existing Persistent Volume for Kafka data PVC. If set, the PVC can't have a PV dynamically provisioned for it + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param controller.persistence.mountPath Mount path of the Kafka data volume + ## + mountPath: /bitnami/kafka + ## Log Persistence parameters + ## + logPersistence: + ## @param controller.logPersistence.enabled Enable Kafka logs persistence using PVC, note that ZooKeeper persistence is unaffected + ## + enabled: false + ## @param controller.logPersistence.existingClaim A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template + ## + existingClaim: "" + ## @param controller.logPersistence.storageClass PVC Storage Class for Kafka logs volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. + ## + storageClass: "" + ## @param controller.logPersistence.accessModes Persistent Volume Access Modes + ## + accessModes: + - ReadWriteOnce + ## @param controller.logPersistence.size PVC Storage Request for Kafka logs volume + ## + size: 8Gi + ## @param controller.logPersistence.annotations Annotations for the PVC + ## + annotations: {} + ## @param controller.logPersistence.selector Selector to match an existing Persistent Volume for Kafka log data PVC. If set, the PVC can't have a PV dynamically provisioned for it + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param controller.logPersistence.mountPath Mount path of the Kafka logs volume + ## + mountPath: /opt/bitnami/kafka/logs + +## @section Broker-only statefulset parameters +## +broker: + ## @param broker.replicaCount Number of Kafka broker-only nodes + ## Ignore this section if running in Zookeeper mode. + ## + replicaCount: 0 + ## @param broker.minId Minimal node.id values for broker-only nodes. Do not change after first initialization. + ## Broker-only id increment their ID starting at this minimal value. + ## We recommend setting this this value high enough, as IDs under this value will be used by controller-eligible nodes + ## + ## + minId: 100 + ## @param broker.zookeeperMigrationMode Set to true to deploy cluster controller quorum + ## This allows configuring both kraft and zookeeper modes simultaneously in order to perform the migration of the Kafka metadata. + ## Ref. https://docs.confluent.io/platform/current/installation/migrate-zk-kraft.html + ## + zookeeperMigrationMode: false + ## @param broker.config Configuration file for Kafka broker-only nodes, rendered as a template. Auto-generated based on chart values when not specified. + ## @param broker.existingConfigmap ConfigMap with Kafka Configuration for broker-only nodes. + ## NOTE: This will override the configuration based on values, please act carefully + ## If both are set, the existingConfigMap will be used. + ## + config: "" + existingConfigmap: "" + ## @param broker.extraConfig Additional configuration to be appended at the end of the generated Kafka broker-only nodes configuration file. + ## + extraConfig: "" + ## @param broker.secretConfig Additional configuration to be appended at the end of the generated Kafka broker-only nodes configuration file. + ## This value will be stored in a secret. + ## + secretConfig: "" + ## @param broker.existingSecretConfig Secret with additonal configuration that will be appended to the end of the generated Kafka broker-only nodes configuration file + ## The key for the configuration should be: server-secret.properties + ## NOTE: This will override broker.secretConfig value + ## + existingSecretConfig: "" + ## @param broker.heapOpts Kafka Java Heap size for broker-only nodes + ## + heapOpts: -Xmx1024m -Xms1024m + ## @param broker.command Override Kafka container command + ## + command: [] + ## @param broker.args Override Kafka container arguments + ## + args: [] + ## @param broker.extraEnvVars Extra environment variables to add to Kafka pods + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/kafka#configuration + ## e.g: + ## extraEnvVars: + ## - name: KAFKA_CFG_BACKGROUND_THREADS + ## value: "10" + ## + extraEnvVars: [] + ## @param broker.extraEnvVarsCM ConfigMap with extra environment variables + ## + extraEnvVarsCM: "" + ## @param broker.extraEnvVarsSecret Secret with extra environment variables + ## + extraEnvVarsSecret: "" + ## @param broker.extraContainerPorts Kafka broker-only extra containerPorts. + ## + extraContainerPorts: [] + ## Configure extra options for Kafka containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param broker.livenessProbe.enabled Enable livenessProbe on Kafka containers + ## @param broker.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param broker.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param broker.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param broker.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param broker.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 10 + timeoutSeconds: 5 + failureThreshold: 3 + periodSeconds: 10 + successThreshold: 1 + ## @param broker.readinessProbe.enabled Enable readinessProbe on Kafka containers + ## @param broker.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param broker.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param broker.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param broker.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param broker.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + failureThreshold: 6 + timeoutSeconds: 5 + periodSeconds: 10 + successThreshold: 1 + ## @param broker.startupProbe.enabled Enable startupProbe on Kafka containers + ## @param broker.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param broker.startupProbe.periodSeconds Period seconds for startupProbe + ## @param broker.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param broker.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param broker.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + ## @param broker.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param broker.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param broker.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param broker.lifecycleHooks lifecycleHooks for the Kafka container to automate configuration before or after startup + ## + lifecycleHooks: {} + ## Kafka resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param broker.resources.limits The resources limits for the container + ## @param broker.resources.requests The requested resources for the container + ## + resources: + limits: {} + requests: {} + ## Kafka pods' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param broker.podSecurityContext.enabled Enable security context for the pods + ## @param broker.podSecurityContext.fsGroup Set Kafka pod's Security Context fsGroup + ## @param broker.podSecurityContext.seccompProfile.type Set Kafka pod's Security Context seccomp profile + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + seccompProfile: + type: "RuntimeDefault" + ## Kafka containers' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param broker.containerSecurityContext.enabled Enable Kafka containers' Security Context + ## @param broker.containerSecurityContext.runAsUser Set Kafka containers' Security Context runAsUser + ## @param broker.containerSecurityContext.runAsNonRoot Set Kafka containers' Security Context runAsNonRoot + ## @param broker.containerSecurityContext.allowPrivilegeEscalation Force the child process to be run as non-privileged + ## @param broker.containerSecurityContext.readOnlyRootFilesystem Allows the pod to mount the RootFS as ReadOnly only + ## @param broker.containerSecurityContext.capabilities.drop Set Kafka containers' server Security Context capabilities to be dropped + ## e.g: + ## containerSecurityContext: + ## enabled: true + ## capabilities: + ## drop: ["NET_RAW"] + ## readOnlyRootFilesystem: true + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: ["ALL"] + ## @param broker.hostAliases Kafka pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param broker.hostNetwork Specify if host network should be enabled for Kafka pods + ## + hostNetwork: false + ## @param broker.hostIPC Specify if host IPC should be enabled for Kafka pods + ## + hostIPC: false + ## @param broker.podLabels Extra labels for Kafka pods + ## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param broker.podAnnotations Extra annotations for Kafka pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param broker.podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param broker.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node affinity preset + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param broker.nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param broker.nodeAffinityPreset.key Node label key to match Ignored if `affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param broker.nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param broker.affinity Affinity for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param broker.nodeSelector Node labels for pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param broker.tolerations Tolerations for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param broker.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param broker.terminationGracePeriodSeconds Seconds the pod needs to gracefully terminate + ## ref: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#hook-handler-execution + ## + terminationGracePeriodSeconds: "" + ## @param broker.podManagementPolicy StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel + ## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pod-management-policy + ## + podManagementPolicy: Parallel + ## @param broker.priorityClassName Name of the existing priority class to be used by kafka pods + ## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + ## + priorityClassName: "" + ## @param broker.runtimeClassName Name of the runtime class to be used by pod(s) + ## ref: https://kubernetes.io/docs/concepts/containers/runtime-class/ + ## + runtimeClassName: "" + ## @param broker.enableServiceLinks Whether information about services should be injected into pod's environment variable + ## The environment variables injected by service links are not used, but can lead to slow kafka boot times or slow running of the scripts when there are many services in the current namespace. + ## If you experience slow pod startups or slow running of the scripts you probably want to set this to `false`. + ## + enableServiceLinks: true + ## @param broker.schedulerName Name of the k8s scheduler (other than default) + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param broker.updateStrategy.type Kafka statefulset strategy type + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + ## @param broker.extraVolumes Optionally specify extra list of additional volumes for the Kafka pod(s) + ## e.g: + ## extraVolumes: + ## - name: kafka-jaas + ## secret: + ## secretName: kafka-jaas + ## + extraVolumes: [] + ## @param broker.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Kafka container(s) + ## extraVolumeMounts: + ## - name: kafka-jaas + ## mountPath: /bitnami/kafka/config/kafka_jaas.conf + ## subPath: kafka_jaas.conf + ## + extraVolumeMounts: [] + ## @param broker.sidecars Add additional sidecar containers to the Kafka pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param broker.initContainers Add additional Add init containers to the Kafka pod(s) + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + initContainers: [] + ## Kafka Pod Disruption Budget + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + ## @param broker.pdb.create Deploy a pdb object for the Kafka pod + ## @param broker.pdb.minAvailable Maximum number/percentage of unavailable Kafka replicas + ## @param broker.pdb.maxUnavailable Maximum number/percentage of unavailable Kafka replicas + ## + pdb: + create: false + minAvailable: "" + maxUnavailable: 1 + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param broker.persistence.enabled Enable Kafka data persistence using PVC, note that ZooKeeper persistence is unaffected + ## + enabled: true + ## @param broker.persistence.existingClaim A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template + ## + existingClaim: "" + ## @param broker.persistence.storageClass PVC Storage Class for Kafka data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. + ## + storageClass: "" + ## @param broker.persistence.accessModes Persistent Volume Access Modes + ## + accessModes: + - ReadWriteOnce + ## @param broker.persistence.size PVC Storage Request for Kafka data volume + ## + size: 8Gi + ## @param broker.persistence.annotations Annotations for the PVC + ## + annotations: {} + ## @param broker.persistence.labels Labels for the PVC + ## + labels: {} + ## @param broker.persistence.selector Selector to match an existing Persistent Volume for Kafka data PVC. If set, the PVC can't have a PV dynamically provisioned for it + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param broker.persistence.mountPath Mount path of the Kafka data volume + ## + mountPath: /bitnami/kafka + ## Log Persistence parameters + ## + logPersistence: + ## @param broker.logPersistence.enabled Enable Kafka logs persistence using PVC, note that ZooKeeper persistence is unaffected + ## + enabled: false + ## @param broker.logPersistence.existingClaim A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template + ## + existingClaim: "" + ## @param broker.logPersistence.storageClass PVC Storage Class for Kafka logs volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. + ## + storageClass: "" + ## @param broker.logPersistence.accessModes Persistent Volume Access Modes + ## + accessModes: + - ReadWriteOnce + ## @param broker.logPersistence.size PVC Storage Request for Kafka logs volume + ## + size: 8Gi + ## @param broker.logPersistence.annotations Annotations for the PVC + ## + annotations: {} + ## @param broker.logPersistence.selector Selector to match an existing Persistent Volume for Kafka log data PVC. If set, the PVC can't have a PV dynamically provisioned for it + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param broker.logPersistence.mountPath Mount path of the Kafka logs volume + ## + mountPath: /opt/bitnami/kafka/logs + + +## @section Traffic Exposure parameters +## + +## Service parameters +## +service: + ## @param service.type Kubernetes Service type + ## + type: ClusterIP + ## @param service.ports.client Kafka svc port for client connections + ## @param service.ports.controller Kafka svc port for controller connections. It is used if "kraft.enabled: true" + ## @param service.ports.interbroker Kafka svc port for inter-broker connections + ## @param service.ports.external Kafka svc port for external connections + ## + ports: + client: 9092 + controller: 9093 + interbroker: 9094 + external: 9095 + ## @param service.extraPorts Extra ports to expose in the Kafka service (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param service.nodePorts.client Node port for the Kafka client connections + ## @param service.nodePorts.external Node port for the Kafka external connections + ## NOTE: choose port between <30000-32767> + ## + nodePorts: + client: "" + external: "" + ## @param service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/user-guide/services/ + ## + sessionAffinity: None + ## @param service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## @param service.clusterIP Kafka service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## @param service.loadBalancerIP Kafka service Load Balancer IP + ## ref: https://kubernetes.io/docs/user-guide/services/#type-loadbalancer + ## + loadBalancerIP: "" + ## @param service.loadBalancerSourceRanges Kafka service Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param service.externalTrafficPolicy Kafka service external traffic policy + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param service.annotations Additional custom annotations for Kafka service + ## + annotations: {} + ## Headless service properties + ## + headless: + controller: + ## @param service.headless.controller.annotations Annotations for the controller-eligible headless service. + ## + annotations: {} + ## @param service.headless.controller.labels Labels for the controller-eligible headless service. + ## + labels: {} + broker: + ## @param service.headless.broker.annotations Annotations for the broker-only headless service. + ## + annotations: {} + ## @param service.headless.broker.labels Labels for the broker-only headless service. + ## + labels: {} +## External Access to Kafka brokers configuration +## +externalAccess: + ## @param externalAccess.enabled Enable Kubernetes external cluster access to Kafka brokers + ## + enabled: false + ## External IPs auto-discovery configuration + ## An init container is used to auto-detect LB IPs or node ports by querying the K8s API + ## Note: RBAC might be required + ## + autoDiscovery: + ## @param externalAccess.autoDiscovery.enabled Enable using an init container to auto-detect external IPs/ports by querying the K8s API + ## + enabled: false + ## Bitnami Kubectl image + ## ref: https://hub.docker.com/r/bitnami/kubectl/tags/ + ## @param externalAccess.autoDiscovery.image.registry Init container auto-discovery image registry + ## @param externalAccess.autoDiscovery.image.repository Init container auto-discovery image repository + ## @param externalAccess.autoDiscovery.image.tag Init container auto-discovery image tag (immutable tags are recommended) + ## @param externalAccess.autoDiscovery.image.digest Kubectl image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param externalAccess.autoDiscovery.image.pullPolicy Init container auto-discovery image pull policy + ## @param externalAccess.autoDiscovery.image.pullSecrets Init container auto-discovery image pull secrets + ## + image: + registry: docker.io + repository: bitnami/kubectl + tag: 1.25.13-debian-11-r11 + digest: "" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init Container resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param externalAccess.autoDiscovery.resources.limits The resources limits for the auto-discovery init container + ## @param externalAccess.autoDiscovery.resources.requests The requested resources for the auto-discovery init container + ## + resources: + limits: {} + requests: {} + ## Service settings + controller: + ## @param externalAccess.controller.forceExpose If set to true, force exposing controller-eligible nodes although they are configured as controller-only nodes + ## + forceExpose: false + ## Parameters to configure K8s service(s) used to externally access Kafka brokers + ## Note: A new service per broker will be created + ## + service: + ## @param externalAccess.controller.service.type Kubernetes Service type for external access. It can be NodePort, LoadBalancer or ClusterIP + ## + type: LoadBalancer + ## @param externalAccess.controller.service.ports.external Kafka port used for external access when service type is LoadBalancer + ## + ports: + external: 9094 + ## @param externalAccess.controller.service.loadBalancerIPs Array of load balancer IPs for each Kafka broker. Length must be the same as replicaCount + ## e.g: + ## loadBalancerIPs: + ## - X.X.X.X + ## - Y.Y.Y.Y + ## + loadBalancerIPs: [] + ## @param externalAccess.controller.service.loadBalancerNames Array of load balancer Names for each Kafka broker. Length must be the same as replicaCount + ## e.g: + ## loadBalancerNames: + ## - broker1.external.example.com + ## - broker2.external.example.com + ## + loadBalancerNames: [] + ## @param externalAccess.controller.service.loadBalancerAnnotations Array of load balancer annotations for each Kafka broker. Length must be the same as replicaCount + ## e.g: + ## loadBalancerAnnotations: + ## - external-dns.alpha.kubernetes.io/hostname: broker1.external.example.com. + ## - external-dns.alpha.kubernetes.io/hostname: broker2.external.example.com. + ## + loadBalancerAnnotations: [] + ## @param externalAccess.controller.service.loadBalancerSourceRanges Address(es) that are allowed when service is LoadBalancer + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param externalAccess.controller.service.nodePorts Array of node ports used for each Kafka broker. Length must be the same as replicaCount + ## e.g: + ## nodePorts: + ## - 30001 + ## - 30002 + ## + nodePorts: [] + ## @param externalAccess.controller.service.externalIPs Use distinct service host IPs to configure Kafka external listener when service type is NodePort. Length must be the same as replicaCount + ## e.g: + ## externalIPs: + ## - X.X.X.X + ## - Y.Y.Y.Y + ## + externalIPs: [] + ## @param externalAccess.controller.service.useHostIPs Use service host IPs to configure Kafka external listener when service type is NodePort + ## + useHostIPs: false + ## @param externalAccess.controller.service.usePodIPs using the MY_POD_IP address for external access. + ## + usePodIPs: false + ## @param externalAccess.controller.service.domain Domain or external ip used to configure Kafka external listener when service type is NodePort or ClusterIP + ## NodePort: If not specified, the container will try to get the kubernetes node external IP + ## ClusterIP: Must be specified, ingress IP or domain where tcp for external ports is configured + ## + domain: "" + ## @param externalAccess.controller.service.publishNotReadyAddresses Indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready + ## ref: https://kubernetes.io/docs/reference/kubernetes-api/service-resources/service-v1/ + ## + publishNotReadyAddresses: false + ## @param externalAccess.controller.service.labels Service labels for external access + ## + labels: {} + ## @param externalAccess.controller.service.annotations Service annotations for external access + ## + annotations: {} + ## @param externalAccess.controller.service.extraPorts Extra ports to expose in the Kafka external service + ## + extraPorts: [] + broker: + ## Parameters to configure K8s service(s) used to externally access Kafka brokers + ## Note: A new service per broker will be created + ## + service: + ## @param externalAccess.broker.service.type Kubernetes Service type for external access. It can be NodePort, LoadBalancer or ClusterIP + ## + type: LoadBalancer + ## @param externalAccess.broker.service.ports.external Kafka port used for external access when service type is LoadBalancer + ## + ports: + external: 9094 + ## @param externalAccess.broker.service.loadBalancerIPs Array of load balancer IPs for each Kafka broker. Length must be the same as replicaCount + ## e.g: + ## loadBalancerIPs: + ## - X.X.X.X + ## - Y.Y.Y.Y + ## + loadBalancerIPs: [] + ## @param externalAccess.broker.service.loadBalancerNames Array of load balancer Names for each Kafka broker. Length must be the same as replicaCount + ## e.g: + ## loadBalancerNames: + ## - broker1.external.example.com + ## - broker2.external.example.com + ## + loadBalancerNames: [] + ## @param externalAccess.broker.service.loadBalancerAnnotations Array of load balancer annotations for each Kafka broker. Length must be the same as replicaCount + ## e.g: + ## loadBalancerAnnotations: + ## - external-dns.alpha.kubernetes.io/hostname: broker1.external.example.com. + ## - external-dns.alpha.kubernetes.io/hostname: broker2.external.example.com. + ## + loadBalancerAnnotations: [] + ## @param externalAccess.broker.service.loadBalancerSourceRanges Address(es) that are allowed when service is LoadBalancer + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param externalAccess.broker.service.nodePorts Array of node ports used for each Kafka broker. Length must be the same as replicaCount + ## e.g: + ## nodePorts: + ## - 30001 + ## - 30002 + ## + nodePorts: [] + ## @param externalAccess.broker.service.externalIPs Use distinct service host IPs to configure Kafka external listener when service type is NodePort. Length must be the same as replicaCount + ## e.g: + ## externalIPs: + ## - X.X.X.X + ## - Y.Y.Y.Y + ## + externalIPs: [] + ## @param externalAccess.broker.service.useHostIPs Use service host IPs to configure Kafka external listener when service type is NodePort + ## + useHostIPs: false + ## @param externalAccess.broker.service.usePodIPs using the MY_POD_IP address for external access. + ## + usePodIPs: false + ## @param externalAccess.broker.service.domain Domain or external ip used to configure Kafka external listener when service type is NodePort or ClusterIP + ## NodePort: If not specified, the container will try to get the kubernetes node external IP + ## ClusterIP: Must be specified, ingress IP or domain where tcp for external ports is configured + ## + domain: "" + ## @param externalAccess.broker.service.publishNotReadyAddresses Indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready + ## ref: https://kubernetes.io/docs/reference/kubernetes-api/service-resources/service-v1/ + ## + publishNotReadyAddresses: false + ## @param externalAccess.broker.service.labels Service labels for external access + ## + labels: {} + ## @param externalAccess.broker.service.annotations Service annotations for external access + ## + annotations: {} + ## @param externalAccess.broker.service.extraPorts Extra ports to expose in the Kafka external service + ## + extraPorts: [] +## Network policies +## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## @param networkPolicy.enabled Specifies whether a NetworkPolicy should be created + ## + enabled: false + ## @param networkPolicy.allowExternal Don't require client label for connections + ## When set to false, only pods with the correct client label will have network access to the port Kafka is + ## listening on. When true, zookeeper accept connections from any source (with the correct destination port). + ## + allowExternal: true + ## @param networkPolicy.explicitNamespacesSelector A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed + ## If explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the kafka. + ## But sometimes, we want the kafka to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + ## e.g: + ## explicitNamespacesSelector: + ## matchLabels: + ## role: frontend + ## matchExpressions: + ## - {key: role, operator: In, values: [frontend]} + ## + explicitNamespacesSelector: {} + ## @param networkPolicy.externalAccess.from customize the from section for External Access on tcp-external port + ## e.g: + ## - ipBlock: + ## cidr: 172.9.0.0/16 + ## except: + ## - 172.9.1.0/24 + ## + externalAccess: + from: [] + ## @param networkPolicy.egressRules.customRules [object] Custom network policy rule + ## + egressRules: + ## Additional custom egress rules + ## e.g: + ## customRules: + ## - to: + ## - namespaceSelector: + ## matchLabels: + ## label: example + ## + customRules: [] + +## @section Volume Permissions parameters +## + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each node +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume + ## + enabled: false + ## @param volumePermissions.image.registry Init container volume-permissions image registry + ## @param volumePermissions.image.repository Init container volume-permissions image repository + ## @param volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended) + ## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy + ## @param volumePermissions.image.pullSecrets Init container volume-permissions image pull secrets + ## + image: + registry: docker.io + repository: bitnami/os-shell + tag: 11-debian-11-r60 + digest: "" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init container resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param volumePermissions.resources.limits Init container volume-permissions resource limits + ## @param volumePermissions.resources.requests Init container volume-permissions resource requests + ## + resources: + limits: {} + requests: {} + ## Init container' Security Context + ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser + ## and not the below volumePermissions.containerSecurityContext.runAsUser + ## @param volumePermissions.containerSecurityContext.runAsUser User ID for the init container + ## + containerSecurityContext: + runAsUser: 0 + +## @section Other Parameters +## + +## ServiceAccount for Kafka +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for Kafka pods + ## + create: true + ## @param serviceAccount.name The name of the service account to use. If not set and `create` is `true`, a name is generated + ## If not set and create is true, a name is generated using the kafka.serviceAccountName template + ## + name: "" + ## @param serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created + ## Can be set to false if pods using this serviceAccount do not need to use K8s API + ## + automountServiceAccountToken: true + ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} +## Role Based Access Control +## ref: https://kubernetes.io/docs/admin/authorization/rbac/ +## +rbac: + ## @param rbac.create Whether to create & use RBAC resources or not + ## binding Kafka ServiceAccount to a role + ## that allows Kafka pods querying the K8s API + ## + create: false + +## @section Metrics parameters +## + +## Prometheus Exporters / Metrics +## +metrics: + ## Prometheus Kafka exporter: exposes complimentary metrics to JMX exporter + ## + kafka: + ## @param metrics.kafka.enabled Whether or not to create a standalone Kafka exporter to expose Kafka metrics + ## + enabled: false + ## Bitnami Kafka exporter image + ## ref: https://hub.docker.com/r/bitnami/kafka-exporter/tags/ + ## @param metrics.kafka.image.registry Kafka exporter image registry + ## @param metrics.kafka.image.repository Kafka exporter image repository + ## @param metrics.kafka.image.tag Kafka exporter image tag (immutable tags are recommended) + ## @param metrics.kafka.image.digest Kafka exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param metrics.kafka.image.pullPolicy Kafka exporter image pull policy + ## @param metrics.kafka.image.pullSecrets Specify docker-registry secret names as an array + ## + image: + registry: docker.io + repository: bitnami/kafka-exporter + tag: 1.7.0-debian-11-r102 + digest: "" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + + ## @param metrics.kafka.certificatesSecret Name of the existing secret containing the optional certificate and key files + ## for Kafka exporter client authentication + ## + certificatesSecret: "" + ## @param metrics.kafka.tlsCert The secret key from the certificatesSecret if 'client-cert' key different from the default (cert-file) + ## + tlsCert: cert-file + ## @param metrics.kafka.tlsKey The secret key from the certificatesSecret if 'client-key' key different from the default (key-file) + ## + tlsKey: key-file + ## @param metrics.kafka.tlsCaSecret Name of the existing secret containing the optional ca certificate for Kafka exporter client authentication + ## + tlsCaSecret: "" + ## @param metrics.kafka.tlsCaCert The secret key from the certificatesSecret or tlsCaSecret if 'ca-cert' key different from the default (ca-file) + ## + tlsCaCert: ca-file + ## @param metrics.kafka.extraFlags Extra flags to be passed to Kafka exporter + ## e.g: + ## extraFlags: + ## tls.insecure-skip-tls-verify: "" + ## web.telemetry-path: "/metrics" + ## + extraFlags: {} + ## @param metrics.kafka.command Override Kafka exporter container command + ## + command: [] + ## @param metrics.kafka.args Override Kafka exporter container arguments + ## + args: [] + ## @param metrics.kafka.containerPorts.metrics Kafka exporter metrics container port + ## + containerPorts: + metrics: 9308 + ## Kafka exporter resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param metrics.kafka.resources.limits The resources limits for the container + ## @param metrics.kafka.resources.requests The requested resources for the container + ## + resources: + limits: {} + requests: {} + ## Kafka exporter pods' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param metrics.kafka.podSecurityContext.enabled Enable security context for the pods + ## @param metrics.kafka.podSecurityContext.fsGroup Set Kafka exporter pod's Security Context fsGroup + ## @param metrics.kafka.podSecurityContext.seccompProfile.type Set Kafka exporter pod's Security Context seccomp profile + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + seccompProfile: + type: "RuntimeDefault" + ## Kafka exporter containers' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param metrics.kafka.containerSecurityContext.enabled Enable Kafka exporter containers' Security Context + ## @param metrics.kafka.containerSecurityContext.runAsUser Set Kafka exporter containers' Security Context runAsUser + ## @param metrics.kafka.containerSecurityContext.runAsNonRoot Set Kafka exporter containers' Security Context runAsNonRoot + ## @param metrics.kafka.containerSecurityContext.allowPrivilegeEscalation Set Kafka exporter containers' Security Context allowPrivilegeEscalation + ## @param metrics.kafka.containerSecurityContext.readOnlyRootFilesystem Set Kafka exporter containers' Security Context readOnlyRootFilesystem + ## @param metrics.kafka.containerSecurityContext.capabilities.drop Set Kafka exporter containers' Security Context capabilities to be dropped + ## e.g: + ## containerSecurityContext: + ## enabled: true + ## capabilities: + ## drop: ["NET_RAW"] + ## readOnlyRootFilesystem: true + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: ["ALL"] + ## @param metrics.kafka.hostAliases Kafka exporter pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param metrics.kafka.podLabels Extra labels for Kafka exporter pods + ## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param metrics.kafka.podAnnotations Extra annotations for Kafka exporter pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param metrics.kafka.podAffinityPreset Pod affinity preset. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param metrics.kafka.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard` + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node metrics.kafka.affinity preset + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param metrics.kafka.nodeAffinityPreset.type Node affinity preset type. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param metrics.kafka.nodeAffinityPreset.key Node label key to match Ignored if `metrics.kafka.affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param metrics.kafka.nodeAffinityPreset.values Node label values to match. Ignored if `metrics.kafka.affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param metrics.kafka.affinity Affinity for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: metrics.kafka.podAffinityPreset, metrics.kafka.podAntiAffinityPreset, and metrics.kafka.nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param metrics.kafka.nodeSelector Node labels for pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param metrics.kafka.tolerations Tolerations for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param metrics.kafka.schedulerName Name of the k8s scheduler (other than default) for Kafka exporter + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param metrics.kafka.enableServiceLinks Whether information about services should be injected into pod's environment variable + ## The environment variables injected by service links are not used, but can lead to slow kafka boot times or slow running of the scripts when there are many services in the current namespace. + ## If you experience slow pod startups or slow running of the scripts you probably want to set this to `false`. + ## + enableServiceLinks: true + ## @param metrics.kafka.priorityClassName Kafka exporter pods' priorityClassName + ## + priorityClassName: "" + ## @param metrics.kafka.topologySpreadConstraints Topology Spread Constraints for pod assignment + ## https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## The value is evaluated as a template + ## + topologySpreadConstraints: [] + ## @param metrics.kafka.extraVolumes Optionally specify extra list of additional volumes for the Kafka exporter pod(s) + ## e.g: + ## extraVolumes: + ## - name: kafka-jaas + ## secret: + ## secretName: kafka-jaas + ## + extraVolumes: [] + ## @param metrics.kafka.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Kafka exporter container(s) + ## extraVolumeMounts: + ## - name: kafka-jaas + ## mountPath: /bitnami/kafka/config/kafka_jaas.conf + ## subPath: kafka_jaas.conf + ## + extraVolumeMounts: [] + ## @param metrics.kafka.sidecars Add additional sidecar containers to the Kafka exporter pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param metrics.kafka.initContainers Add init containers to the Kafka exporter pods + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + initContainers: [] + ## Kafka exporter service configuration + ## + service: + ## @param metrics.kafka.service.ports.metrics Kafka exporter metrics service port + ## + ports: + metrics: 9308 + ## @param metrics.kafka.service.clusterIP Static clusterIP or None for headless services + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address + ## + clusterIP: "" + ## @param metrics.kafka.service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/user-guide/services/ + ## + sessionAffinity: None + ## @param metrics.kafka.service.annotations [object] Annotations for the Kafka exporter service + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.kafka.service.ports.metrics }}" + prometheus.io/path: "/metrics" + ## Kafka exporter pods ServiceAccount + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + ## @param metrics.kafka.serviceAccount.create Enable creation of ServiceAccount for Kafka exporter pods + ## + create: true + ## @param metrics.kafka.serviceAccount.name The name of the service account to use. If not set and `create` is `true`, a name is generated + ## If not set and create is true, a name is generated using the kafka.metrics.kafka.serviceAccountName template + ## + name: "" + ## @param metrics.kafka.serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created + ## Can be set to false if pods using this serviceAccount do not need to use K8s API + ## + automountServiceAccountToken: true + ## Prometheus JMX exporter: exposes the majority of Kafka metrics + ## + jmx: + ## @param metrics.jmx.enabled Whether or not to expose JMX metrics to Prometheus + ## + enabled: false + ## @param metrics.jmx.kafkaJmxPort JMX port where the exporter will collect metrics, exposed in the Kafka container. + ## + kafkaJmxPort: 5555 + ## Bitnami JMX exporter image + ## ref: https://hub.docker.com/r/bitnami/jmx-exporter/tags/ + ## @param metrics.jmx.image.registry JMX exporter image registry + ## @param metrics.jmx.image.repository JMX exporter image repository + ## @param metrics.jmx.image.tag JMX exporter image tag (immutable tags are recommended) + ## @param metrics.jmx.image.digest JMX exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param metrics.jmx.image.pullPolicy JMX exporter image pull policy + ## @param metrics.jmx.image.pullSecrets Specify docker-registry secret names as an array + ## + image: + registry: docker.io + repository: bitnami/jmx-exporter + tag: 0.19.0-debian-11-r66 + digest: "" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Prometheus JMX exporter containers' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param metrics.jmx.containerSecurityContext.enabled Enable Prometheus JMX exporter containers' Security Context + ## @param metrics.jmx.containerSecurityContext.runAsUser Set Prometheus JMX exporter containers' Security Context runAsUser + ## @param metrics.jmx.containerSecurityContext.runAsNonRoot Set Prometheus JMX exporter containers' Security Context runAsNonRoot + ## @param metrics.jmx.containerSecurityContext.allowPrivilegeEscalation Set Prometheus JMX exporter containers' Security Context allowPrivilegeEscalation + ## @param metrics.jmx.containerSecurityContext.readOnlyRootFilesystem Set Prometheus JMX exporter containers' Security Context readOnlyRootFilesystem + ## @param metrics.jmx.containerSecurityContext.capabilities.drop Set Prometheus JMX exporter containers' Security Context capabilities to be dropped + ## e.g: + ## containerSecurityContext: + ## enabled: true + ## capabilities: + ## drop: ["NET_RAW"] + ## readOnlyRootFilesystem: true + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: ["ALL"] + ## @param metrics.jmx.containerPorts.metrics Prometheus JMX exporter metrics container port + ## + containerPorts: + metrics: 5556 + ## Prometheus JMX exporter resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param metrics.jmx.resources.limits The resources limits for the JMX exporter container + ## @param metrics.jmx.resources.requests The requested resources for the JMX exporter container + ## + resources: + limits: {} + requests: {} + ## Prometheus JMX exporter service configuration + ## + service: + ## @param metrics.jmx.service.ports.metrics Prometheus JMX exporter metrics service port + ## + ports: + metrics: 5556 + ## @param metrics.jmx.service.clusterIP Static clusterIP or None for headless services + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address + ## + clusterIP: "" + ## @param metrics.jmx.service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/user-guide/services/ + ## + sessionAffinity: None + ## @param metrics.jmx.service.annotations [object] Annotations for the Prometheus JMX exporter service + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.jmx.service.ports.metrics }}" + prometheus.io/path: "/" + ## @param metrics.jmx.whitelistObjectNames Allows setting which JMX objects you want to expose to via JMX stats to JMX exporter + ## Only whitelisted values will be exposed via JMX exporter. They must also be exposed via Rules. To expose all metrics + ## (warning its crazy excessive and they aren't formatted in a prometheus style) (1) `whitelistObjectNames: []` + ## (2) commented out above `overrideConfig`. + ## + whitelistObjectNames: + - kafka.controller:* + - kafka.server:* + - java.lang:* + - kafka.network:* + - kafka.log:* + ## @param metrics.jmx.config [string] Configuration file for JMX exporter + ## Specify content for jmx-kafka-prometheus.yml. Evaluated as a template + ## + ## Credits to the incubator/kafka chart for the JMX configuration. + ## https://github.com/helm/charts/tree/master/incubator/kafka + ## + config: |- + jmxUrl: service:jmx:rmi:///jndi/rmi://127.0.0.1:{{ .Values.metrics.jmx.kafkaJmxPort }}/jmxrmi + lowercaseOutputName: true + lowercaseOutputLabelNames: true + ssl: false + {{- if .Values.metrics.jmx.whitelistObjectNames }} + whitelistObjectNames: ["{{ join "\",\"" .Values.metrics.jmx.whitelistObjectNames }}"] + {{- end }} + ## @param metrics.jmx.existingConfigmap Name of existing ConfigMap with JMX exporter configuration + ## NOTE: This will override metrics.jmx.config + ## + existingConfigmap: "" + ## @param metrics.jmx.extraRules Add extra rules to JMX exporter configuration + ## e.g: + ## extraRules: |- + ## - pattern: kafka.server<>(connection-count) + ## name: kafka_server_socket_server_metrics_$3 + ## labels: + ## listener: $1 + ## + extraRules: "" + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled if `true`, creates a Prometheus Operator ServiceMonitor (requires `metrics.kafka.enabled` or `metrics.jmx.enabled` to be `true`) + ## + enabled: false + ## @param metrics.serviceMonitor.namespace Namespace in which Prometheus is running + ## + namespace: "" + ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + interval: "" + ## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.labels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus + ## + labels: {} + ## @param metrics.serviceMonitor.selector Prometheus instance selector labels + ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration + ## + selector: {} + ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping + ## + relabelings: [] + ## @param metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion + ## + metricRelabelings: [] + ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. + ## + jobLabel: "" + + prometheusRule: + ## @param metrics.prometheusRule.enabled if `true`, creates a Prometheus Operator PrometheusRule (requires `metrics.kafka.enabled` or `metrics.jmx.enabled` to be `true`) + ## + enabled: false + ## @param metrics.prometheusRule.namespace Namespace in which Prometheus is running + ## + namespace: "" + ## @param metrics.prometheusRule.labels Additional labels that can be used so PrometheusRule will be discovered by Prometheus + ## + labels: {} + ## @param metrics.prometheusRule.groups Prometheus Rule Groups for Kafka + ## + groups: [] + +## @section Kafka provisioning parameters +## + +## Kafka provisioning +## +provisioning: + ## @param provisioning.enabled Enable kafka provisioning Job + ## + enabled: false + ## @param provisioning.numPartitions Default number of partitions for topics when unspecified + ## + numPartitions: 1 + ## @param provisioning.replicationFactor Default replication factor for topics when unspecified + ## + replicationFactor: 1 + ## @param provisioning.topics Kafka topics to provision + ## - name: topic-name + ## partitions: 1 + ## replicationFactor: 1 + ## ## https://kafka.apache.org/documentation/#topicconfigs + ## config: + ## max.message.bytes: 64000 + ## flush.messages: 1 + ## + topics: [] + ## @param provisioning.nodeSelector Node labels for pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param provisioning.tolerations Tolerations for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param provisioning.extraProvisioningCommands Extra commands to run to provision cluster resources + ## - echo "Allow user to consume from any topic" + ## - >- + ## /opt/bitnami/kafka/bin/kafka-acls.sh + ## --bootstrap-server $KAFKA_SERVICE + ## --command-config $CLIENT_CONF + ## --add + ## --allow-principal User:user + ## --consumer --topic '*' + ## - "/opt/bitnami/kafka/bin/kafka-acls.sh + ## --bootstrap-server $KAFKA_SERVICE + ## --command-config $CLIENT_CONF + ## --list" + ## + extraProvisioningCommands: [] + ## @param provisioning.parallel Number of provisioning commands to run at the same time + ## + parallel: 1 + ## @param provisioning.preScript Extra bash script to run before topic provisioning. $CLIENT_CONF is path to properties file with most needed configurations + ## + preScript: "" + ## @param provisioning.postScript Extra bash script to run after topic provisioning. $CLIENT_CONF is path to properties file with most needed configurations + ## + postScript: "" + ## Auth Configuration for kafka provisioning Job + ## + auth: + ## TLS configuration for kafka provisioning Job + ## + tls: + ## @param provisioning.auth.tls.type Format to use for TLS certificates. Allowed types: `JKS` and `PEM`. + ## Note: ignored if auth.tls.client.protocol different from one of these values: "SSL" "SASL_SSL" + ## + type: jks + ## @param provisioning.auth.tls.certificatesSecret Existing secret containing the TLS certificates for the Kafka provisioning Job. + ## When using 'jks' format for certificates, the secret should contain a truststore and a keystore. + ## When using 'pem' format for certificates, the secret should contain one of the following: + ## 1. A public CA certificate, a public certificate and one private key. + ## 2. A truststore and a keystore in PEM format + ## If caCert is set, option 1 will be taken, otherwise option 2. + ## + certificatesSecret: "" + ## @param provisioning.auth.tls.cert The secret key from the certificatesSecret if 'cert' key different from the default (tls.crt) + ## + cert: tls.crt + ## @param provisioning.auth.tls.key The secret key from the certificatesSecret if 'key' key different from the default (tls.key) + ## + key: tls.key + ## @param provisioning.auth.tls.caCert The secret key from the certificatesSecret if 'caCert' key different from the default (ca.crt) + ## + caCert: ca.crt + ## @param provisioning.auth.tls.keystore The secret key from the certificatesSecret if 'keystore' key different from the default (keystore.jks) + ## + keystore: keystore.jks + ## @param provisioning.auth.tls.truststore The secret key from the certificatesSecret if 'truststore' key different from the default (truststore.jks) + ## + truststore: truststore.jks + ## @param provisioning.auth.tls.passwordsSecret Name of the secret containing passwords to access the JKS files or PEM key when they are password-protected. + ## It should contain two keys called "keystore-password" and "truststore-password", or "key-password" if using a password-protected PEM key. + ## + passwordsSecret: "" + ## @param provisioning.auth.tls.keyPasswordSecretKey The secret key from the passwordsSecret if 'keyPasswordSecretKey' key different from the default (key-password) + ## Note: must not be used if `passwordsSecret` is not defined. + ## + keyPasswordSecretKey: key-password + ## @param provisioning.auth.tls.keystorePasswordSecretKey The secret key from the passwordsSecret if 'keystorePasswordSecretKey' key different from the default (keystore-password) + ## Note: must not be used if `passwordsSecret` is not defined. + ## + keystorePasswordSecretKey: keystore-password + ## @param provisioning.auth.tls.truststorePasswordSecretKey The secret key from the passwordsSecret if 'truststorePasswordSecretKey' key different from the default (truststore-password) + ## Note: must not be used if `passwordsSecret` is not defined. + ## + truststorePasswordSecretKey: truststore-password + ## @param provisioning.auth.tls.keyPassword Password to access the password-protected PEM key if necessary. Ignored if 'passwordsSecret' is provided. + ## + keyPassword: "" + ## @param provisioning.auth.tls.keystorePassword Password to access the JKS keystore. Ignored if 'passwordsSecret' is provided. + ## + keystorePassword: "" + ## @param provisioning.auth.tls.truststorePassword Password to access the JKS truststore. Ignored if 'passwordsSecret' is provided. + ## + truststorePassword: "" + ## @param provisioning.command Override provisioning container command + ## + command: [] + ## @param provisioning.args Override provisioning container arguments + ## + args: [] + ## @param provisioning.extraEnvVars Extra environment variables to add to the provisioning pod + ## e.g: + ## extraEnvVars: + ## - name: KAFKA_CFG_BACKGROUND_THREADS + ## value: "10" + ## + extraEnvVars: [] + ## @param provisioning.extraEnvVarsCM ConfigMap with extra environment variables + ## + extraEnvVarsCM: "" + ## @param provisioning.extraEnvVarsSecret Secret with extra environment variables + ## + extraEnvVarsSecret: "" + ## @param provisioning.podAnnotations Extra annotations for Kafka provisioning pods + ## + podAnnotations: {} + ## @param provisioning.podLabels Extra labels for Kafka provisioning pods + ## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## Kafka provisioning pods ServiceAccount + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + ## @param provisioning.serviceAccount.create Enable creation of ServiceAccount for Kafka provisioning pods + ## + create: false + ## @param provisioning.serviceAccount.name The name of the service account to use. If not set and `create` is `true`, a name is generated + ## If not set and create is true, a name is generated using the provisioning.serviceAccount.name template + ## + name: "" + ## @param provisioning.serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created + ## Can be set to false if pods using this serviceAccount do not need to use K8s API + ## + automountServiceAccountToken: true + ## Kafka provisioning resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param provisioning.resources.limits The resources limits for the Kafka provisioning container + ## @param provisioning.resources.requests The requested resources for the Kafka provisioning container + ## + resources: + limits: {} + requests: {} + ## Kafka provisioning pods' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param provisioning.podSecurityContext.enabled Enable security context for the pods + ## @param provisioning.podSecurityContext.fsGroup Set Kafka provisioning pod's Security Context fsGroup + ## @param provisioning.podSecurityContext.seccompProfile.type Set Kafka provisioning pod's Security Context seccomp profile + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + seccompProfile: + type: "RuntimeDefault" + ## Kafka provisioning containers' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param provisioning.containerSecurityContext.enabled Enable Kafka provisioning containers' Security Context + ## @param provisioning.containerSecurityContext.runAsUser Set Kafka provisioning containers' Security Context runAsUser + ## @param provisioning.containerSecurityContext.runAsNonRoot Set Kafka provisioning containers' Security Context runAsNonRoot + ## @param provisioning.containerSecurityContext.allowPrivilegeEscalation Set Kafka provisioning containers' Security Context allowPrivilegeEscalation + ## @param provisioning.containerSecurityContext.readOnlyRootFilesystem Set Kafka provisioning containers' Security Context readOnlyRootFilesystem + ## @param provisioning.containerSecurityContext.capabilities.drop Set Kafka provisioning containers' Security Context capabilities to be dropped + ## e.g: + ## containerSecurityContext: + ## enabled: true + ## capabilities: + ## drop: ["NET_RAW"] + ## readOnlyRootFilesystem: true + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: ["ALL"] + ## @param provisioning.schedulerName Name of the k8s scheduler (other than default) for kafka provisioning + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param provisioning.enableServiceLinks Whether information about services should be injected into pod's environment variable + ## The environment variables injected by service links are not used, but can lead to slow kafka boot times or slow running of the scripts when there are many services in the current namespace. + ## If you experience slow pod startups or slow running of the scripts you probably want to set this to `false`. + ## + enableServiceLinks: true + ## @param provisioning.extraVolumes Optionally specify extra list of additional volumes for the Kafka provisioning pod(s) + ## e.g: + ## extraVolumes: + ## - name: kafka-jaas + ## secret: + ## secretName: kafka-jaas + ## + extraVolumes: [] + ## @param provisioning.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Kafka provisioning container(s) + ## extraVolumeMounts: + ## - name: kafka-jaas + ## mountPath: /bitnami/kafka/config/kafka_jaas.conf + ## subPath: kafka_jaas.conf + ## + extraVolumeMounts: [] + ## @param provisioning.sidecars Add additional sidecar containers to the Kafka provisioning pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param provisioning.initContainers Add additional Add init containers to the Kafka provisioning pod(s) + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + initContainers: [] + ## @param provisioning.waitForKafka If true use an init container to wait until kafka is ready before starting provisioning + ## + waitForKafka: true + +## @section KRaft chart parameters + +## KRaft configuration +## Kafka mode without Zookeeper. Kafka nodes can work as controllers in this mode. +## +kraft: + ## @param kraft.enabled Switch to enable or disable the KRaft mode for Kafka + ## + enabled: true + ## @param kraft.clusterId Kafka Kraft cluster ID. If not set, a random cluster ID will be generated the first time Kraft is initialized. + ## NOTE: Already initialized Kafka nodes will use cluster ID stored in their persisted storage. + ## If reusing existing PVCs or migrating from Zookeeper mode, make sure the cluster ID is set matching the stored cluster ID, otherwise new nodes will fail to join the cluster. + ## In case the cluster ID stored in the secret does not match the value stored in /bitnami/kafka/data/meta.properties, remove the secret and upgrade the chart setting the correct value. + ## + clusterId: "" + ## @param kraft.controllerQuorumVoters Override the Kafka controller quorum voters of the Kafka Kraft cluster. If not set, it will be automatically configured to use all controller-elegible nodes. + ## + controllerQuorumVoters: "" + +## @section ZooKeeper chart parameters +## +## @param zookeeperChrootPath Path which puts data under some path in the global ZooKeeper namespace +## ref: https://kafka.apache.org/documentation/#brokerconfigs_zookeeper.connect +## +zookeeperChrootPath: "" +## ZooKeeper chart configuration +## https://github.com/bitnami/charts/blob/main/bitnami/zookeeper/values.yaml +## +zookeeper: + ## @param zookeeper.enabled Switch to enable or disable the ZooKeeper helm chart. Must be false if you use KRaft mode. + ## + enabled: false + ## @param zookeeper.replicaCount Number of ZooKeeper nodes + ## + replicaCount: 1 + ## ZooKeeper authentication + ## + auth: + client: + ## @param zookeeper.auth.client.enabled Enable ZooKeeper auth + ## + enabled: false + ## @param zookeeper.auth.client.clientUser User that will use ZooKeeper client (zkCli.sh) to authenticate. Must exist in the serverUsers comma-separated list. + ## + clientUser: "" + ## @param zookeeper.auth.client.clientPassword Password that will use ZooKeeper client (zkCli.sh) to authenticate. Must exist in the serverPasswords comma-separated list. + ## + clientPassword: "" + ## @param zookeeper.auth.client.serverUsers Comma, semicolon or whitespace separated list of user to be created. Specify them as a string, for example: "user1,user2,admin" + ## + serverUsers: "" + ## @param zookeeper.auth.client.serverPasswords Comma, semicolon or whitespace separated list of passwords to assign to users when created. Specify them as a string, for example: "pass4user1, pass4user2, pass4admin" + ## + serverPasswords: "" + ## ZooKeeper Persistence parameters + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## @param zookeeper.persistence.enabled Enable persistence on ZooKeeper using PVC(s) + ## @param zookeeper.persistence.storageClass Persistent Volume storage class + ## @param zookeeper.persistence.accessModes Persistent Volume access modes + ## @param zookeeper.persistence.size Persistent Volume size + ## + persistence: + enabled: true + storageClass: "" + accessModes: + - ReadWriteOnce + size: 8Gi + +## External Zookeeper Configuration +## +externalZookeeper: + ## @param externalZookeeper.servers List of external zookeeper servers to use. Typically used in combination with 'zookeeperChrootPath'. Must be empty if you use KRaft mode. + ## + servers: [] diff --git a/manifest/helm-charts/infra/minio/.helmignore b/manifest/helm-charts/infra/minio/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/manifest/helm-charts/infra/minio/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/manifest/helm-charts/infra/minio/Chart.lock b/manifest/helm-charts/infra/minio/Chart.lock new file mode 100644 index 000000000..490998493 --- /dev/null +++ b/manifest/helm-charts/infra/minio/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: oci://registry-1.docker.io/bitnamicharts + version: 2.11.1 +digest: sha256:ead8f26c76a9ec082f23629a358e8efd8f88d87aaed734bf41febcb8a7bc5d4c +generated: "2023-09-16T12:11:09.671932275Z" diff --git a/manifest/helm-charts/infra/minio/Chart.yaml b/manifest/helm-charts/infra/minio/Chart.yaml new file mode 100644 index 000000000..f8603f884 --- /dev/null +++ b/manifest/helm-charts/infra/minio/Chart.yaml @@ -0,0 +1,36 @@ +annotations: + category: Infrastructure + images: | + - name: minio-client + image: docker.io/bitnami/minio-client:2023.9.20-debian-11-r0 + - name: minio + image: docker.io/bitnami/minio:2023.9.20-debian-11-r0 + - name: os-shell + image: docker.io/bitnami/os-shell:11-debian-11-r72 + licenses: Apache-2.0 +apiVersion: v2 +appVersion: 2023.9.20 +dependencies: +- name: common + repository: oci://registry-1.docker.io/bitnamicharts + tags: + - bitnami-common + version: 2.x.x +description: MinIO(R) is an object storage server, compatible with Amazon S3 cloud + storage service, mainly used for storing unstructured data (such as photos, videos, + log files, etc.). +home: https://bitnami.com +icon: https://bitnami.com/assets/stacks/minio/img/minio-stack-220x234.png +keywords: +- minio +- storage +- object-storage +- s3 +- cluster +maintainers: +- name: VMware, Inc. + url: https://github.com/bitnami/charts +name: minio +sources: +- https://github.com/bitnami/charts/tree/main/bitnami/minio +version: 12.8.8 diff --git a/manifest/helm-charts/infra/minio/README.md b/manifest/helm-charts/infra/minio/README.md new file mode 100644 index 000000000..85ab003b2 --- /dev/null +++ b/manifest/helm-charts/infra/minio/README.md @@ -0,0 +1,531 @@ + + +# Bitnami Object Storage based on MinIO(R) + +MinIO(R) is an object storage server, compatible with Amazon S3 cloud storage service, mainly used for storing unstructured data (such as photos, videos, log files, etc.). + +[Overview of Bitnami Object Storage based on MinIO®](https://min.io/) + +Disclaimer: All software products, projects and company names are trademark(TM) or registered(R) trademarks of their respective holders, and use of them does not imply any affiliation or endorsement. This software is licensed to you subject to one or more open source licenses and VMware provides the software on an AS-IS basis. MinIO(R) is a registered trademark of the MinIO Inc. in the US and other countries. Bitnami is not affiliated, associated, authorized, endorsed by, or in any way officially connected with MinIO Inc. MinIO(R) is licensed under GNU AGPL v3.0. + +## TL;DR + +```console +helm install my-release oci://registry-1.docker.io/bitnamicharts/minio +``` + +## Introduction + +This chart bootstraps a [MinIO®](https://github.com/bitnami/containers/tree/main/bitnami/minio) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. + +Looking to use Bitnami Object Storage based on MinIOreg; in production? Try [VMware Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +helm install my-release oci://registry-1.docker.io/bitnamicharts/minio +``` + +These commands deploy MinIO® on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +### Global parameters + +| Name | Description | Value | +| ------------------------- | ----------------------------------------------- | ----- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | + +### Common parameters + +| Name | Description | Value | +| ------------------- | -------------------------------------------------------------------------------------------- | --------------- | +| `nameOverride` | String to partially override common.names.fullname template (will maintain the release name) | `""` | +| `fullnameOverride` | String to fully override common.names.fullname template | `""` | +| `commonLabels` | Labels to add to all deployed objects | `{}` | +| `commonAnnotations` | Annotations to add to all deployed objects | `{}` | +| `kubeVersion` | Force target Kubernetes version (using Helm capabilities if not set) | `""` | +| `clusterDomain` | Default Kubernetes cluster domain | `cluster.local` | +| `extraDeploy` | Array of extra objects to deploy with the release | `[]` | + +### MinIO® parameters + +| Name | Description | Value | +| -------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------ | +| `image.registry` | MinIO® image registry | `docker.io` | +| `image.repository` | MinIO® image repository | `bitnami/minio` | +| `image.tag` | MinIO® image tag (immutable tags are recommended) | `2023.9.20-debian-11-r0` | +| `image.digest` | MinIO® image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `image.debug` | Specify if debug logs should be enabled | `false` | +| `clientImage.registry` | MinIO® Client image registry | `docker.io` | +| `clientImage.repository` | MinIO® Client image repository | `bitnami/minio-client` | +| `clientImage.tag` | MinIO® Client image tag (immutable tags are recommended) | `2023.9.20-debian-11-r0` | +| `clientImage.digest` | MinIO® Client image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `mode` | MinIO® server mode (`standalone` or `distributed`) | `standalone` | +| `auth.rootUser` | MinIO® root username | `admin` | +| `auth.rootPassword` | Password for MinIO® root user | `""` | +| `auth.existingSecret` | Use existing secret for credentials details (`auth.rootUser` and `auth.rootPassword` will be ignored and picked up from this secret). The secret has to contain the keys `root-user` and `root-password`) | `""` | +| `auth.forcePassword` | Force users to specify required passwords | `false` | +| `auth.useCredentialsFiles` | Mount credentials as a files instead of using an environment variable | `false` | +| `auth.forceNewKeys` | Force root credentials (user and password) to be reconfigured every time they change in the secrets | `false` | +| `defaultBuckets` | Comma, semi-colon or space separated list of buckets to create at initialization (only in standalone mode) | `""` | +| `disableWebUI` | Disable MinIO® Web UI | `false` | +| `tls.enabled` | Enable tls in front of the container | `false` | +| `tls.autoGenerated` | Generate automatically self-signed TLS certificates | `false` | +| `tls.existingSecret` | Name of an existing secret holding the certificate information | `""` | +| `tls.mountPath` | The mount path where the secret will be located | `""` | +| `extraEnvVars` | Extra environment variables to be set on MinIO® container | `[]` | +| `extraEnvVarsCM` | ConfigMap with extra environment variables | `""` | +| `extraEnvVarsSecret` | Secret with extra environment variables | `""` | +| `command` | Default container command (useful when using custom images). Use array form | `[]` | +| `args` | Default container args (useful when using custom images). Use array form | `[]` | + +### MinIO® deployment/statefulset parameters + +| Name | Description | Value | +| ---------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------- | +| `schedulerName` | Specifies the schedulerName, if it's nil uses kube-scheduler | `""` | +| `terminationGracePeriodSeconds` | In seconds, time the given to the MinIO pod needs to terminate gracefully | `""` | +| `deployment.updateStrategy.type` | Deployment strategy type | `Recreate` | +| `statefulset.updateStrategy.type` | StatefulSet strategy type | `RollingUpdate` | +| `statefulset.podManagementPolicy` | StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel | `Parallel` | +| `statefulset.replicaCount` | Number of pods per zone (only for MinIO® distributed mode). Should be even and `>= 4` | `4` | +| `statefulset.zones` | Number of zones (only for MinIO® distributed mode) | `1` | +| `statefulset.drivesPerNode` | Number of drives attached to every node (only for MinIO® distributed mode) | `1` | +| `provisioning.enabled` | Enable MinIO® provisioning Job | `false` | +| `provisioning.schedulerName` | Name of the k8s scheduler (other than default) for MinIO® provisioning | `""` | +| `provisioning.podLabels` | Extra labels for provisioning pods | `{}` | +| `provisioning.podAnnotations` | Provisioning Pod annotations. | `{}` | +| `provisioning.command` | Default provisioning container command (useful when using custom images). Use array form | `[]` | +| `provisioning.args` | Default provisioning container args (useful when using custom images). Use array form | `[]` | +| `provisioning.extraCommands` | Optionally specify extra list of additional commands for MinIO® provisioning pod | `[]` | +| `provisioning.extraVolumes` | Optionally specify extra list of additional volumes for MinIO® provisioning pod | `[]` | +| `provisioning.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for MinIO® provisioning container | `[]` | +| `provisioning.resources.limits` | The resources limits for the container | `{}` | +| `provisioning.resources.requests` | The requested resources for the container | `{}` | +| `provisioning.policies` | MinIO® policies provisioning | `[]` | +| `provisioning.users` | MinIO® users provisioning. Can be used in addition to provisioning.usersExistingSecrets. | `[]` | +| `provisioning.usersExistingSecrets` | Array if existing secrets containing MinIO® users to be provisioned. Can be used in addition to provisioning.users. | `[]` | +| `provisioning.groups` | MinIO® groups provisioning | `[]` | +| `provisioning.buckets` | MinIO® buckets, versioning, lifecycle, quota and tags provisioning | `[]` | +| `provisioning.config` | MinIO® config provisioning | `[]` | +| `provisioning.podSecurityContext.enabled` | Enable pod Security Context | `true` | +| `provisioning.podSecurityContext.fsGroup` | Group ID for the container | `1001` | +| `provisioning.containerSecurityContext.enabled` | Enable container Security Context | `true` | +| `provisioning.containerSecurityContext.runAsUser` | User ID for the container | `1001` | +| `provisioning.containerSecurityContext.runAsNonRoot` | Avoid running as root User | `true` | +| `provisioning.cleanupAfterFinished.enabled` | Enables Cleanup for Finished Jobs | `false` | +| `provisioning.cleanupAfterFinished.seconds` | Sets the value of ttlSecondsAfterFinished | `600` | +| `hostAliases` | MinIO® pod host aliases | `[]` | +| `containerPorts.api` | MinIO® container port to open for MinIO® API | `9000` | +| `containerPorts.console` | MinIO® container port to open for MinIO® Console | `9001` | +| `podSecurityContext.enabled` | Enable pod Security Context | `true` | +| `podSecurityContext.fsGroup` | Group ID for the container | `1001` | +| `containerSecurityContext.enabled` | Enable container Security Context | `true` | +| `containerSecurityContext.runAsUser` | User ID for the container | `1001` | +| `containerSecurityContext.runAsNonRoot` | Avoid running as root User | `true` | +| `podLabels` | Extra labels for MinIO® pods | `{}` | +| `podAnnotations` | Annotations for MinIO® pods | `{}` | +| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `nodeAffinityPreset.key` | Node label key to match. Ignored if `affinity` is set. | `""` | +| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` | +| `affinity` | Affinity for pod assignment. Evaluated as a template. | `{}` | +| `nodeSelector` | Node labels for pod assignment. Evaluated as a template. | `{}` | +| `tolerations` | Tolerations for pod assignment. Evaluated as a template. | `[]` | +| `topologySpreadConstraints` | Topology Spread Constraints for MinIO® pods assignment spread across your cluster among failure-domains | `[]` | +| `priorityClassName` | MinIO® pods' priorityClassName | `""` | +| `resources.limits` | The resources limits for the MinIO® container | `{}` | +| `resources.requests` | The requested resources for the MinIO® container | `{}` | +| `livenessProbe.enabled` | Enable livenessProbe | `true` | +| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` | +| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `5` | +| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `5` | +| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `readinessProbe.enabled` | Enable readinessProbe | `true` | +| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `5` | +| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `1` | +| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `5` | +| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `startupProbe.enabled` | Enable startupProbe | `false` | +| `startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `0` | +| `startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | +| `startupProbe.failureThreshold` | Failure threshold for startupProbe | `60` | +| `startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `customLivenessProbe` | Override default liveness probe | `{}` | +| `customReadinessProbe` | Override default readiness probe | `{}` | +| `customStartupProbe` | Override default startup probe | `{}` | +| `lifecycleHooks` | for the MinIO® container(s) to automate configuration before or after startup | `{}` | +| `extraVolumes` | Optionally specify extra list of additional volumes for MinIO® pods | `[]` | +| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for MinIO® container(s) | `[]` | +| `initContainers` | Add additional init containers to the MinIO® pods | `[]` | +| `sidecars` | Add additional sidecar containers to the MinIO® pods | `[]` | + +### Traffic exposure parameters + +| Name | Description | Value | +| ---------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | ------------------------ | +| `service.type` | MinIO® service type | `ClusterIP` | +| `service.ports.api` | MinIO® API service port | `9000` | +| `service.ports.console` | MinIO® Console service port | `9001` | +| `service.nodePorts.api` | Specify the MinIO® API nodePort value for the LoadBalancer and NodePort service types | `""` | +| `service.nodePorts.console` | Specify the MinIO® Console nodePort value for the LoadBalancer and NodePort service types | `""` | +| `service.clusterIP` | Service Cluster IP | `""` | +| `service.loadBalancerIP` | loadBalancerIP if service type is `LoadBalancer` (optional, cloud specific) | `""` | +| `service.loadBalancerSourceRanges` | Addresses that are allowed when service is LoadBalancer | `[]` | +| `service.externalTrafficPolicy` | Enable client source IP preservation | `Cluster` | +| `service.extraPorts` | Extra ports to expose in the service (normally used with the `sidecar` value) | `[]` | +| `service.annotations` | Annotations for MinIO® service | `{}` | +| `service.headless.annotations` | Annotations for the headless service. | `{}` | +| `ingress.enabled` | Enable ingress controller resource for MinIO Console | `false` | +| `ingress.apiVersion` | Force Ingress API version (automatically detected if not set) | `""` | +| `ingress.ingressClassName` | IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) | `""` | +| `ingress.hostname` | Default host for the ingress resource | `minio.local` | +| `ingress.path` | The Path to MinIO®. You may need to set this to '/*' in order to use this with ALB ingress controllers. | `/` | +| `ingress.pathType` | Ingress path type | `ImplementationSpecific` | +| `ingress.servicePort` | Service port to be used | `minio-console` | +| `ingress.annotations` | Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. | `{}` | +| `ingress.tls` | Enable TLS configuration for the hostname defined at `ingress.hostname` parameter | `false` | +| `ingress.selfSigned` | Create a TLS secret for this ingress record using self-signed certificates generated by Helm | `false` | +| `ingress.extraHosts` | The list of additional hostnames to be covered with this ingress record. | `[]` | +| `ingress.extraPaths` | Any additional paths that may need to be added to the ingress under the main host | `[]` | +| `ingress.extraTls` | The tls configuration for additional hostnames to be covered with this ingress record. | `[]` | +| `ingress.secrets` | If you're providing your own certificates, please use this to add the certificates as secrets | `[]` | +| `ingress.extraRules` | Additional rules to be covered with this ingress record | `[]` | +| `apiIngress.enabled` | Enable ingress controller resource for MinIO API | `false` | +| `apiIngress.apiVersion` | Force Ingress API version (automatically detected if not set) | `""` | +| `apiIngress.ingressClassName` | IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) | `""` | +| `apiIngress.hostname` | Default host for the ingress resource | `minio.local` | +| `apiIngress.path` | The Path to MinIO®. You may need to set this to '/*' in order to use this with ALB ingress controllers. | `/` | +| `apiIngress.pathType` | Ingress path type | `ImplementationSpecific` | +| `apiIngress.servicePort` | Service port to be used | `minio-api` | +| `apiIngress.annotations` | Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. | `{}` | +| `apiIngress.tls` | Enable TLS configuration for the hostname defined at `apiIngress.hostname` parameter | `false` | +| `apiIngress.selfSigned` | Create a TLS secret for this ingress record using self-signed certificates generated by Helm | `false` | +| `apiIngress.extraHosts` | The list of additional hostnames to be covered with this ingress record. | `[]` | +| `apiIngress.extraPaths` | Any additional paths that may need to be added to the ingress under the main host | `[]` | +| `apiIngress.extraTls` | The tls configuration for additional hostnames to be covered with this ingress record. | `[]` | +| `apiIngress.secrets` | If you're providing your own certificates, please use this to add the certificates as secrets | `[]` | +| `apiIngress.extraRules` | Additional rules to be covered with this ingress record | `[]` | +| `networkPolicy.enabled` | Enable the default NetworkPolicy policy | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.extraFromClauses` | Allows to add extra 'from' clauses to the NetworkPolicy | `[]` | + +### Persistence parameters + +| Name | Description | Value | +| --------------------------- | -------------------------------------------------------------------- | --------------------- | +| `persistence.enabled` | Enable MinIO® data persistence using PVC. If false, use emptyDir | `true` | +| `persistence.storageClass` | PVC Storage Class for MinIO® data volume | `""` | +| `persistence.mountPath` | Data volume mount path | `/bitnami/minio/data` | +| `persistence.accessModes` | PVC Access Modes for MinIO® data volume | `["ReadWriteOnce"]` | +| `persistence.size` | PVC Storage Request for MinIO® data volume | `8Gi` | +| `persistence.annotations` | Annotations for the PVC | `{}` | +| `persistence.existingClaim` | Name of an existing PVC to use (only in `standalone` mode) | `""` | + +### Volume Permissions parameters + +| Name | Description | Value | +| ------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------- | ------------------ | +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/os-shell` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag (immutable tags are recommended) | `11-debian-11-r72` | +| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` | +| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` | +| `volumePermissions.containerSecurityContext.runAsUser` | User ID for the init container | `0` | + +### RBAC parameters + +| Name | Description | Value | +| --------------------------------------------- | ----------------------------------------------------------- | ------ | +| `serviceAccount.create` | Enable the creation of a ServiceAccount for MinIO® pods | `true` | +| `serviceAccount.name` | Name of the created ServiceAccount | `""` | +| `serviceAccount.automountServiceAccountToken` | Enable/disable auto mounting of the service account token | `true` | +| `serviceAccount.annotations` | Custom annotations for MinIO® ServiceAccount | `{}` | + +### Other parameters + +| Name | Description | Value | +| -------------------- | --------------------------------------------------------------------------------- | ------- | +| `pdb.create` | Enable/disable a Pod Disruption Budget creation | `false` | +| `pdb.minAvailable` | Minimum number/percentage of pods that must still be available after the eviction | `1` | +| `pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable after the eviction | `""` | + +### Metrics parameters + +| Name | Description | Value | +| ------------------------------------------ | ----------------------------------------------------------------------------------------------------------------------------- | --------------------------- | +| `metrics.prometheusAuthType` | Authentication mode for Prometheus (`jwt` or `public`) | `public` | +| `metrics.serviceMonitor.enabled` | If the operator is installed in your cluster, set to true to create a Service Monitor Entry | `false` | +| `metrics.serviceMonitor.namespace` | Namespace which Prometheus is running in | `""` | +| `metrics.serviceMonitor.labels` | Extra labels for the ServiceMonitor | `{}` | +| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in Prometheus | `""` | +| `metrics.serviceMonitor.path` | HTTP path to scrape for metrics | `/minio/v2/metrics/cluster` | +| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped | `30s` | +| `metrics.serviceMonitor.scrapeTimeout` | Specify the timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` | +| `metrics.serviceMonitor.relabelings` | Metrics relabelings to add to the scrape endpoint, applied before scraping | `[]` | +| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | +| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | +| `metrics.serviceMonitor.apiVersion` | ApiVersion for the serviceMonitor Resource (defaults to "monitoring.coreos.com/v1") | `""` | +| `metrics.serviceMonitor.tlsConfig` | Additional TLS configuration for metrics endpoint with "https" scheme | `{}` | +| `metrics.prometheusRule.enabled` | Create a Prometheus Operator PrometheusRule (also requires `metrics.enabled` to be `true` and `metrics.prometheusRule.rules`) | `false` | +| `metrics.prometheusRule.namespace` | Namespace for the PrometheusRule Resource (defaults to the Release Namespace) | `""` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so PrometheusRule will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.rules` | Prometheus Rule definitions | `[]` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +helm install my-release \ + --set auth.rootUser=minio-admin \ + --set auth.rootPassword=minio-secret-password \ + oci://registry-1.docker.io/bitnamicharts/minio +``` + +The above command sets the MinIO® Server root user and password to `minio-admin` and `minio-secret-password`, respectively. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +helm install my-release -f values.yaml oci://registry-1.docker.io/bitnamicharts/minio +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Distributed mode + +By default, this chart provisions a MinIO® server in standalone mode. You can start MinIO® server in [distributed mode](https://docs.minio.io/docs/distributed-minio-quickstart-guide) with the following parameter: `mode=distributed` + +This chart bootstrap MinIO® server in distributed mode with 4 nodes by default. You can change the number of nodes using the `statefulset.replicaCount` parameter. For instance, you can deploy the chart with 8 nodes using the following parameters: + +```console +mode=distributed +statefulset.replicaCount=8 +``` + +You can also bootstrap MinIO® server in distributed mode in several zones, and using multiple drives per node. For instance, you can deploy the chart with 2 nodes per zone on 2 zones, using 2 drives per node: + +```console +mode=distributed +statefulset.replicaCount=2 +statefulset.zones=2 +statefulset.drivesPerNode=2 +``` + +> Note: The total number of drives should be greater than 4 to guarantee erasure coding. Please set a combination of nodes, and drives per node that match this condition. + +### Prometheus exporter + +MinIO® exports Prometheus metrics at `/minio/v2/metrics/cluster`. To allow Prometheus collecting your MinIO® metrics, modify the `values.yaml` adding the corresponding annotations: + +```diff +- podAnnotations: {} ++ podAnnotations: ++ prometheus.io/scrape: "true" ++ prometheus.io/path: "/minio/v2/metrics/cluster" ++ prometheus.io/port: "9000" +``` + +> Find more information about MinIO® metrics at + +## Persistence + +The [Bitnami Object Storage based on MinIO(®)](https://github.com/bitnami/containers/tree/main/bitnami/minio) image stores data at the `/data` path of the container. + +The chart mounts a [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) at this location. The volume is created using dynamic volume provisioning. + +### Adjust permissions of persistent volume mountpoint + +As the image run as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it. + +By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. +As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination. + +You can enable this initContainer by setting `volumePermissions.enabled` to `true`. + +### Ingress + +This chart provides support for Ingress resources. If you have an ingress controller installed on your cluster, such as [nginx-ingress-controller](https://github.com/bitnami/charts/tree/main/bitnami/nginx-ingress-controller) or [contour](https://github.com/bitnami/charts/tree/main/bitnami/contour) you can utilize the ingress controller to serve your application. + +To enable Ingress integration, set `ingress.enabled` to `true`. The `ingress.hostname` property can be used to set the host name. The `ingress.tls` parameter can be used to add the TLS configuration for this host. It is also possible to have more than one host, with a separate TLS configuration for each host. [Learn more about configuring and using Ingress](https://docs.bitnami.com/kubernetes/infrastructure/minio/configuration/configure-ingress/). + +### TLS secrets + +The chart also facilitates the creation of TLS secrets for use with the Ingress controller, with different options for certificate management. [Learn more about TLS secrets](https://docs.bitnami.com/kubernetes/infrastructure/minio/administration/enable-tls-ingress/). + +### Adding extra environment variables + +In case you want to add extra environment variables (useful for advanced operations like custom init scripts), you can use the `extraEnvVars` property. + +```yaml +extraEnvVars: + - name: MINIO_LOG_LEVEL + value: DEBUG +``` + +Alternatively, you can use a ConfigMap or a Secret with the environment variables. To do so, use the `extraEnvVarsCM` or the `extraEnvVarsSecret` values. + +### Sidecars and Init Containers + +If you have a need for additional containers to run within the same pod as the MinIO® app (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. + +```yaml +sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +Similarly, you can add extra init containers using the `initContainers` parameter. + +```yaml +initContainers: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +### Setting Pod's affinity + +This chart allows you to set your custom affinity using the `affinity` parameter. Find more information about Pod's affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/main/bitnami/common#affinities) chart. To do so, set the `podAffinityPreset`, `podAntiAffinityPreset`, or `nodeAffinityPreset` parameters. + +### Deploying extra resources + +There are cases where you may want to deploy extra objects, such a ConfigMap containing your app's configuration or some extra deployment with a micro service used by your app. For covering this case, the chart allows adding the full specification of other objects using the `extraDeploy` parameter. + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading + +### To 12.0.0 + +This version updates MinIO® to major version 2023. All gateway features have been removed from Minio since upstream completely dropped this feature. The related options have been removed in version 12.1.0. + +### To 11.0.0 + +This version deprecates the usage of `MINIO_ACCESS_KEY` and `MINIO_SECRET_KEY` environment variables in MINIO® container in favor of `MINIO_ROOT_USER` and `MINIO_ROOT_PASSWORD`. + +If you were already using the new variables, no issues are expected during upgrade. + +### To 9.0.0 + +This version updates MinIO® authentication parameters so they're aligned with the [current terminology](https://docs.min.io/minio/baremetal/security/minio-identity-management/user-management.html#minio-users-root). As a result the following parameters have been affected: + +- `accessKey.password` has been renamed to `auth.rootUser`. +- `secretKey.password` has been renamed to `auth.rootPassword`. +- `accessKey.forcePassword` and `secretKey.forcePassword` have been unified into `auth.forcePassword`. +- `existingSecret`, `useCredentialsFile` and `forceNewKeys` have been renamed to `auth.existingSecret`, `auth.useCredentialsFiles` and `forceNewKeys`, respectively. + +### To 8.0.0 + +This version updates MinIO® after some major changes, affecting its Web UI. MinIO® has replaced its MinIO® Browser with the MinIO® Console, and Web UI has been moved to a separated port. As a result the following variables have been affected: + +- `service.port` has been slit into `service.ports.api` (default: 9000) and `service.ports.console` (default: 9001). +- `containerPort` has been slit into `containerPorts.api` (default: 9000) and `containerPort.console` (default: 9001). +- `service.nodePort`has been slit into `service.nodePorts.api` and `service.nodePorts.console`. +- Service port `minio` has been replaced with `minio-api` and `minio-console` with target ports minio-api and minio-console respectively. +- Liveness, readiness and startup probes now use port `minio-console` instead of `minio`. + +Please note that Web UI, previously running on port 9000 will now use port 9001 leaving port 9000 for the MinIO® Server API. + +### To 7.0.0 + +This version introduces pod and container securityContext support. The previous configuration of `securityContext` has moved to `podSecurityContext` and `containerSecurityContext`. Apart from this case, no issues are expected to appear when upgrading. + +### To 5.0.0 + +This version standardizes the way of defining Ingress rules. When configuring a single hostname for the Ingress rule, set the `ingress.hostname` value. When defining more than one, set the `ingress.extraHosts` array. Apart from this case, no issues are expected to appear when upgrading. + +### To 4.1.0 + +This version introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/main/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade. + +### To 4.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +#### What changes were introduced in this major version? + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +#### Considerations when upgrading to this version + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +#### Useful links + +- +- +- + +## License + +Copyright © 2023 VMware, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/manifest/helm-charts/infra/minio/charts/common/.helmignore b/manifest/helm-charts/infra/minio/charts/common/.helmignore new file mode 100644 index 000000000..50af03172 --- /dev/null +++ b/manifest/helm-charts/infra/minio/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/manifest/helm-charts/infra/minio/charts/common/Chart.yaml b/manifest/helm-charts/infra/minio/charts/common/Chart.yaml new file mode 100644 index 000000000..3be88e6aa --- /dev/null +++ b/manifest/helm-charts/infra/minio/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure + licenses: Apache-2.0 +apiVersion: v2 +appVersion: 2.11.1 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://bitnami.com +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- name: VMware, Inc. + url: https://github.com/bitnami/charts +name: common +sources: +- https://github.com/bitnami/charts +type: library +version: 2.11.1 diff --git a/manifest/helm-charts/infra/minio/charts/common/README.md b/manifest/helm-charts/infra/minio/charts/common/README.md new file mode 100644 index 000000000..fe6a01000 --- /dev/null +++ b/manifest/helm-charts/infra/minio/charts/common/README.md @@ -0,0 +1,235 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between Bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 2.x.x + repository: oci://registry-1.docker.io/bitnamicharts +``` + +```console +helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. + +Looking to use our applications in production? Try [VMware Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ + +## Parameters + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets (evaluated as templates). + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 -d) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 -d) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +#### What changes were introduced in this major version? + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +#### Considerations when upgrading to this version + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +#### Useful links + +- +- +- + +## License + +Copyright © 2023 VMware, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/manifest/helm-charts/infra/minio/charts/common/templates/_affinities.tpl b/manifest/helm-charts/infra/minio/charts/common/templates/_affinities.tpl new file mode 100644 index 000000000..e85b1df45 --- /dev/null +++ b/manifest/helm-charts/infra/minio/charts/common/templates/_affinities.tpl @@ -0,0 +1,139 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a topologyKey definition +{{ include "common.affinities.topologyKey" (dict "topologyKey" "BAR") -}} +*/}} +{{- define "common.affinities.topologyKey" -}} +{{ .topologyKey | default "kubernetes.io/hostname" -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "customLabels" .Values.podLabels "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "extraPodAffinityTerms" .Values.extraPodAffinityTerms "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +{{- $customLabels := default (dict) .customLabels -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +{{- $extraPodAffinityTerms := default (list) .extraPodAffinityTerms -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" .context )) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} + weight: 1 + {{- range $extraPodAffinityTerms }} + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" $.context )) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := .extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} + weight: {{ .weight | default 1 -}} + {{- end -}} +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "customLabels" .Values.podLabels "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "extraPodAffinityTerms" .Values.extraPodAffinityTerms "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +{{- $customLabels := default (dict) .customLabels -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +{{- $extraPodAffinityTerms := default (list) .extraPodAffinityTerms -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" .context )) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} + {{- range $extraPodAffinityTerms }} + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" $.context )) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := .extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} + {{- end -}} +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/manifest/helm-charts/infra/minio/charts/common/templates/_capabilities.tpl b/manifest/helm-charts/infra/minio/charts/common/templates/_capabilities.tpl new file mode 100644 index 000000000..c6d115fe5 --- /dev/null +++ b/manifest/helm-charts/infra/minio/charts/common/templates/_capabilities.tpl @@ -0,0 +1,185 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for poddisruptionbudget. +*/}} +{{- define "common.capabilities.policy.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "policy/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "common.capabilities.networkPolicy.apiVersion" -}} +{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for cronjob. +*/}} +{{- define "common.capabilities.cronjob.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "batch/v1beta1" -}} +{{- else -}} +{{- print "batch/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for daemonset. +*/}} +{{- define "common.capabilities.daemonset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for RBAC resources. +*/}} +{{- define "common.capabilities.rbac.apiVersion" -}} +{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for CRDs. +*/}} +{{- define "common.capabilities.crd.apiVersion" -}} +{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiextensions.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiextensions.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for APIService. +*/}} +{{- define "common.capabilities.apiService.apiVersion" -}} +{{- if semverCompare "<1.10-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiregistration.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiregistration.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for Horizontal Pod Autoscaler. +*/}} +{{- define "common.capabilities.hpa.apiVersion" -}} +{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}} +{{- if .beta2 -}} +{{- print "autoscaling/v2beta2" -}} +{{- else -}} +{{- print "autoscaling/v2beta1" -}} +{{- end -}} +{{- else -}} +{{- print "autoscaling/v2" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for Vertical Pod Autoscaler. +*/}} +{{- define "common.capabilities.vpa.apiVersion" -}} +{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}} +{{- if .beta2 -}} +{{- print "autoscaling/v2beta2" -}} +{{- else -}} +{{- print "autoscaling/v2beta1" -}} +{{- end -}} +{{- else -}} +{{- print "autoscaling/v2" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the used Helm version is 3.3+. +A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure. +This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error. +**To be removed when the catalog's minimun Helm version is 3.3** +*/}} +{{- define "common.capabilities.supportsHelmVersion" -}} +{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/manifest/helm-charts/infra/minio/charts/common/templates/_errors.tpl b/manifest/helm-charts/infra/minio/charts/common/templates/_errors.tpl new file mode 100644 index 000000000..07ded6f64 --- /dev/null +++ b/manifest/helm-charts/infra/minio/charts/common/templates/_errors.tpl @@ -0,0 +1,28 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}} + {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}} + {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}} + {{- $errorString = print $errorString "\n%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/manifest/helm-charts/infra/minio/charts/common/templates/_images.tpl b/manifest/helm-charts/infra/minio/charts/common/templates/_images.tpl new file mode 100644 index 000000000..e248d6d08 --- /dev/null +++ b/manifest/helm-charts/infra/minio/charts/common/templates/_images.tpl @@ -0,0 +1,101 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" .Values.global ) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $separator := ":" -}} +{{- $termination := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- if .imageRoot.digest }} + {{- $separator = "@" -}} + {{- $termination = .imageRoot.digest | toString -}} +{{- end -}} +{{- if $registryName }} + {{- printf "%s/%s%s%s" $registryName $repositoryName $separator $termination -}} +{{- else -}} + {{- printf "%s%s%s" $repositoryName $separator $termination -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets | uniq }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names evaluating values as templates +{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }} +*/}} +{{- define "common.images.renderPullSecrets" -}} + {{- $pullSecrets := list }} + {{- $context := .context }} + + {{- if $context.Values.global }} + {{- range $context.Values.global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets | uniq }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Return the proper image version (ingores image revision/prerelease info & fallbacks to chart appVersion) +{{ include "common.images.version" ( dict "imageRoot" .Values.path.to.the.image "chart" .Chart ) }} +*/}} +{{- define "common.images.version" -}} +{{- $imageTag := .imageRoot.tag | toString -}} +{{/* regexp from https://github.com/Masterminds/semver/blob/23f51de38a0866c5ef0bfc42b3f735c73107b700/version.go#L41-L44 */}} +{{- if regexMatch `^([0-9]+)(\.[0-9]+)?(\.[0-9]+)?(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?$` $imageTag -}} + {{- $version := semver $imageTag -}} + {{- printf "%d.%d.%d" $version.Major $version.Minor $version.Patch -}} +{{- else -}} + {{- print .chart.AppVersion -}} +{{- end -}} +{{- end -}} + diff --git a/manifest/helm-charts/infra/minio/charts/common/templates/_ingress.tpl b/manifest/helm-charts/infra/minio/charts/common/templates/_ingress.tpl new file mode 100644 index 000000000..efa5b85c7 --- /dev/null +++ b/manifest/helm-charts/infra/minio/charts/common/templates/_ingress.tpl @@ -0,0 +1,73 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} +{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} +serviceName: {{ .serviceName }} +servicePort: {{ .servicePort }} +{{- else -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }} + number: {{ .servicePort | int }} + {{- end }} +{{- end -}} +{{- end -}} + +{{/* +Print "true" if the API pathType field is supported +Usage: +{{ include "common.ingress.supportsPathType" . }} +*/}} +{{- define "common.ingress.supportsPathType" -}} +{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the ingressClassname field is supported +Usage: +{{ include "common.ingress.supportsIngressClassname" . }} +*/}} +{{- define "common.ingress.supportsIngressClassname" -}} +{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if cert-manager required annotations for TLS signed +certificates are set in the Ingress annotations +Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations +Usage: +{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }} +*/}} +{{- define "common.ingress.certManagerRequest" -}} +{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") (hasKey .annotations "kubernetes.io/tls-acme") }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/manifest/helm-charts/infra/minio/charts/common/templates/_labels.tpl b/manifest/helm-charts/infra/minio/charts/common/templates/_labels.tpl new file mode 100644 index 000000000..a3cdc2bfd --- /dev/null +++ b/manifest/helm-charts/infra/minio/charts/common/templates/_labels.tpl @@ -0,0 +1,40 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Kubernetes standard labels +{{ include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) -}} +*/}} +{{- define "common.labels.standard" -}} +{{- if and (hasKey . "customLabels") (hasKey . "context") -}} +{{ merge (include "common.tplvalues.render" (dict "value" .customLabels "context" .context) | fromYaml) (dict "app.kubernetes.io/name" (include "common.names.name" .context) "helm.sh/chart" (include "common.names.chart" .context) "app.kubernetes.io/instance" .context.Release.Name "app.kubernetes.io/managed-by" .context.Release.Service "app.kubernetes.io/version" .context.Chart.AppVersion) | toYaml }} +{{- else -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end -}} +{{- end -}} + +{{/* +Labels used on immutable fields such as deploy.spec.selector.matchLabels or svc.spec.selector +{{ include "common.labels.matchLabels" (dict "customLabels" .Values.podLabels "context" $) -}} + +We don't want to loop over custom labels appending them to the selector +since it's very likely that it will break deployments, services, etc. +However, it's important to overwrite the standard labels if the user +overwrote them on metadata.labels fields. +*/}} +{{- define "common.labels.matchLabels" -}} +{{- if and (hasKey . "customLabels") (hasKey . "context") -}} +{{ merge (pick (include "common.tplvalues.render" (dict "value" .customLabels "context" .context) | fromYaml) "app.kubernetes.io/name" "app.kubernetes.io/instance") (dict "app.kubernetes.io/name" (include "common.names.name" .context) "app.kubernetes.io/instance" .context.Release.Name ) | toYaml }} +{{- else -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} +{{- end -}} diff --git a/manifest/helm-charts/infra/minio/charts/common/templates/_names.tpl b/manifest/helm-charts/infra/minio/charts/common/templates/_names.tpl new file mode 100644 index 000000000..a222924f1 --- /dev/null +++ b/manifest/helm-charts/infra/minio/charts/common/templates/_names.tpl @@ -0,0 +1,71 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified dependency name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +Usage: +{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }} +*/}} +{{- define "common.names.dependency.fullname" -}} +{{- if .chartValues.fullnameOverride -}} +{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .chartName .chartValues.nameOverride -}} +{{- if contains $name .context.Release.Name -}} +{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts. +*/}} +{{- define "common.names.namespace" -}} +{{- default .Release.Namespace .Values.namespaceOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a fully qualified app name adding the installation's namespace. +*/}} +{{- define "common.names.fullname.namespace" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) (include "common.names.namespace" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/manifest/helm-charts/infra/minio/charts/common/templates/_secrets.tpl b/manifest/helm-charts/infra/minio/charts/common/templates/_secrets.tpl new file mode 100644 index 000000000..a193c46b6 --- /dev/null +++ b/manifest/helm-charts/infra/minio/charts/common/templates/_secrets.tpl @@ -0,0 +1,172 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- with .name -}} +{{- $name = . -}} +{{- end -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. + - failOnNew - Boolean - Optional - Default to true. If set to false, skip errors adding new keys to existing secrets. +The order in which this function returns a secret password: + 1. Already existing 'Secret' resource + (If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned) + 2. Password provided via the values.yaml + (If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned) + 3. Randomly generated secret password + (A new random secret password with the length specified in the 'length' parameter will be generated and returned) + +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $failOnNew := default true .failOnNew }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret).data }} +{{- if $secretData }} + {{- if hasKey $secretData .key }} + {{- $password = index $secretData .key | quote }} + {{- else if $failOnNew }} + {{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}} + {{- end -}} +{{- else if $providedPasswordValue }} + {{- $password = $providedPasswordValue | toString | b64enc | quote }} +{{- else }} + + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} + {{- else }} + {{- $password = randAlphaNum $passwordLength | b64enc | quote }} + {{- end }} +{{- end -}} +{{- printf "%s" $password -}} +{{- end -}} + +{{/* +Reuses the value from an existing secret, otherwise sets its value to a default value. + +Usage: +{{ include "common.secrets.lookup" (dict "secret" "secret-name" "key" "keyName" "defaultValue" .Values.myValue "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - defaultValue - String - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - context - Context - Required - Parent context. + +*/}} +{{- define "common.secrets.lookup" -}} +{{- $value := "" -}} +{{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret).data -}} +{{- if and $secretData (hasKey $secretData .key) -}} + {{- $value = index $secretData .key -}} +{{- else if .defaultValue -}} + {{- $value = .defaultValue | toString | b64enc -}} +{{- end -}} +{{- if $value -}} +{{- printf "%s" $value -}} +{{- end -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/manifest/helm-charts/infra/minio/charts/common/templates/_storage.tpl b/manifest/helm-charts/infra/minio/charts/common/templates/_storage.tpl new file mode 100644 index 000000000..16405a0f8 --- /dev/null +++ b/manifest/helm-charts/infra/minio/charts/common/templates/_storage.tpl @@ -0,0 +1,28 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/manifest/helm-charts/infra/minio/charts/common/templates/_tplvalues.tpl b/manifest/helm-charts/infra/minio/charts/common/templates/_tplvalues.tpl new file mode 100644 index 000000000..a8ed7637e --- /dev/null +++ b/manifest/helm-charts/infra/minio/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,38 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template perhaps with scope if the scope is present. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $ ) }} +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $ "scope" $app ) }} +*/}} +{{- define "common.tplvalues.render" -}} +{{- $value := typeIs "string" .value | ternary .value (.value | toYaml) }} +{{- if contains "{{" (toJson .value) }} + {{- if .scope }} + {{- tpl (cat "{{- with $.RelativeScope -}}" $value "{{- end }}") (merge (dict "RelativeScope" .scope) .context) }} + {{- else }} + {{- tpl $value .context }} + {{- end }} +{{- else }} + {{- $value }} +{{- end }} +{{- end -}} + +{{/* +Merge a list of values that contains template after rendering them. +Merge precedence is consistent with http://masterminds.github.io/sprig/dicts.html#merge-mustmerge +Usage: +{{ include "common.tplvalues.merge" ( dict "values" (list .Values.path.to.the.Value1 .Values.path.to.the.Value2) "context" $ ) }} +*/}} +{{- define "common.tplvalues.merge" -}} +{{- $dst := dict -}} +{{- range .values -}} +{{- $dst = include "common.tplvalues.render" (dict "value" . "context" $.context "scope" $.scope) | fromYaml | merge $dst -}} +{{- end -}} +{{ $dst | toYaml }} +{{- end -}} diff --git a/manifest/helm-charts/infra/minio/charts/common/templates/_utils.tpl b/manifest/helm-charts/infra/minio/charts/common/templates/_utils.tpl new file mode 100644 index 000000000..c87040cd9 --- /dev/null +++ b/manifest/helm-charts/infra/minio/charts/common/templates/_utils.tpl @@ -0,0 +1,67 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ include "common.names.namespace" .context | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 -d) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} diff --git a/manifest/helm-charts/infra/minio/charts/common/templates/_warnings.tpl b/manifest/helm-charts/infra/minio/charts/common/templates/_warnings.tpl new file mode 100644 index 000000000..66dffc1fe --- /dev/null +++ b/manifest/helm-charts/infra/minio/charts/common/templates/_warnings.tpl @@ -0,0 +1,19 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/manifest/helm-charts/infra/minio/charts/common/templates/validations/_cassandra.tpl b/manifest/helm-charts/infra/minio/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 000000000..eda9aada5 --- /dev/null +++ b/manifest/helm-charts/infra/minio/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,77 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/manifest/helm-charts/infra/minio/charts/common/templates/validations/_mariadb.tpl b/manifest/helm-charts/infra/minio/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 000000000..17d83a2fd --- /dev/null +++ b/manifest/helm-charts/infra/minio/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,108 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/manifest/helm-charts/infra/minio/charts/common/templates/validations/_mongodb.tpl b/manifest/helm-charts/infra/minio/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 000000000..bbb445b86 --- /dev/null +++ b/manifest/helm-charts/infra/minio/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,113 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB® required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB® values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/manifest/helm-charts/infra/minio/charts/common/templates/validations/_mysql.tpl b/manifest/helm-charts/infra/minio/charts/common/templates/validations/_mysql.tpl new file mode 100644 index 000000000..ca3953f86 --- /dev/null +++ b/manifest/helm-charts/infra/minio/charts/common/templates/validations/_mysql.tpl @@ -0,0 +1,108 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MySQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.mysql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MySQL values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mysql.passwords" -}} + {{- $existingSecret := include "common.mysql.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mysql.values.enabled" . -}} + {{- $architecture := include "common.mysql.values.architecture" . -}} + {{- $authPrefix := include "common.mysql.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mysql-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mysql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mysql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mysql.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mysql.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mysql. + +Usage: +{{ include "common.mysql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mysql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mysql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mysql.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mysql.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mysql.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.key.auth" -}} + {{- if .subchart -}} + mysql.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/manifest/helm-charts/infra/minio/charts/common/templates/validations/_postgresql.tpl b/manifest/helm-charts/infra/minio/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 000000000..8c9aa570e --- /dev/null +++ b/manifest/helm-charts/infra/minio/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,134 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/manifest/helm-charts/infra/minio/charts/common/templates/validations/_redis.tpl b/manifest/helm-charts/infra/minio/charts/common/templates/validations/_redis.tpl new file mode 100644 index 000000000..fc0d208dd --- /dev/null +++ b/manifest/helm-charts/infra/minio/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,81 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis® required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $standarizedVersion := include "common.redis.values.standarized.version" . }} + + {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }} + {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }} + + {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }} + {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}} + {{- if eq $useAuth "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} + +{{/* +Checks whether the redis chart's includes the standarizations (version >= 14) + +Usage: +{{ include "common.redis.values.standarized.version" (dict "context" $) }} +*/}} +{{- define "common.redis.values.standarized.version" -}} + + {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}} + {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }} + + {{- if $standarizedAuthValues -}} + {{- true -}} + {{- end -}} +{{- end -}} diff --git a/manifest/helm-charts/infra/minio/charts/common/templates/validations/_validations.tpl b/manifest/helm-charts/infra/minio/charts/common/templates/validations/_validations.tpl new file mode 100644 index 000000000..31ceda871 --- /dev/null +++ b/manifest/helm-charts/infra/minio/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,51 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/manifest/helm-charts/infra/minio/charts/common/values.schema.json b/manifest/helm-charts/infra/minio/charts/common/values.schema.json new file mode 100644 index 000000000..2124b3e4a --- /dev/null +++ b/manifest/helm-charts/infra/minio/charts/common/values.schema.json @@ -0,0 +1,11 @@ +{ + "title": "Chart Values", + "type": "object", + "properties": { + "exampleValue": { + "type": "string", + "description": "", + "default": "common-chart" + } + } +} \ No newline at end of file diff --git a/manifest/helm-charts/infra/minio/charts/common/values.yaml b/manifest/helm-charts/infra/minio/charts/common/values.yaml new file mode 100644 index 000000000..9abe0e154 --- /dev/null +++ b/manifest/helm-charts/infra/minio/charts/common/values.yaml @@ -0,0 +1,8 @@ +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 + +## bitnami/common +## It is required by CI/CD tools and processes. +## @skip exampleValue +## +exampleValue: common-chart diff --git a/manifest/helm-charts/infra/minio/templates/NOTES.txt b/manifest/helm-charts/infra/minio/templates/NOTES.txt new file mode 100644 index 000000000..4ed938a8b --- /dev/null +++ b/manifest/helm-charts/infra/minio/templates/NOTES.txt @@ -0,0 +1,76 @@ +CHART NAME: {{ .Chart.Name }} +CHART VERSION: {{ .Chart.Version }} +APP VERSION: {{ .Chart.AppVersion }} + +** Please be patient while the chart is being deployed ** + +MinIO® can be accessed via port {{ .Values.service.port }} on the following DNS name from within your cluster: + + {{ include "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + +To get your credentials run: + + export ROOT_USER=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "minio.secretName" . }} -o jsonpath="{.data.root-user}" | base64 -d) + export ROOT_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "minio.secretName" . }} -o jsonpath="{.data.root-password}" | base64 -d) + +To connect to your MinIO® server using a client: + +- Run a MinIO® Client pod and append the desired command (e.g. 'admin info'): + + kubectl run --namespace {{ .Release.Namespace }} {{ include "common.names.fullname" . }}-client \ + --rm --tty -i --restart='Never' \ + --env MINIO_SERVER_ROOT_USER=$ROOT_USER \ + --env MINIO_SERVER_ROOT_PASSWORD=$ROOT_PASSWORD \ + --env MINIO_SERVER_HOST={{ include "common.names.fullname" . }} \ + {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} + --labels="{{ include "common.names.fullname" . }}-client=true" \ + {{- end }} + --image {{ template "minio.clientImage" . }} -- admin info minio + +{{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} + + NOTE: Since NetworkPolicy is enabled, only pods with label + "{{ template "common.names.fullname" . }}-client=true" will be able to connect to MinIO®. + +{{- end }} +{{- if (not .Values.disableWebUI) }} + +To access the MinIO® web UI: + +- Get the MinIO® URL: + +{{- if .Values.ingress.enabled }} + + You should be able to access your new MinIO® web UI through + + {{ if .Values.ingress.tls }}https{{ else }}http{{ end }}://{{ .Values.ingress.hostname }}/minio/ +{{- else if contains "LoadBalancer" .Values.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ include "common.names.fullname" . }}' + + {{- $port:=.Values.service.ports.console | toString }} + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "common.names.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") + echo "MinIO® web URL: http://$SERVICE_IP{{- if ne $port "80" }}:{{ .Values.service.ports.console }}{{ end }}/minio" + +{{- else if contains "ClusterIP" .Values.service.type }} + + echo "MinIO® web URL: http://127.0.0.1:{{ .Values.containerPorts.console }}/minio" + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "common.names.fullname" . }} {{ .Values.containerPorts.console }}:{{ .Values.service.ports.console }} + +{{- else if contains "NodePort" .Values.service.type }} + + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "common.names.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo "MinIO® web URL: http://$NODE_IP:$NODE_PORT/minio" + +{{- end }} +{{- else }} + + WARN: MinIO® Web UI is disabled. +{{- end }} + +{{- include "common.warnings.rollingTag" .Values.image }} +{{- include "common.warnings.rollingTag" .Values.clientImage }} +{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }} +{{- include "minio.validateValues" . }} diff --git a/manifest/helm-charts/infra/minio/templates/_helpers.tpl b/manifest/helm-charts/infra/minio/templates/_helpers.tpl new file mode 100644 index 000000000..eca588446 --- /dev/null +++ b/manifest/helm-charts/infra/minio/templates/_helpers.tpl @@ -0,0 +1,244 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the proper MinIO® image name +*/}} +{{- define "minio.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} + +{{- end -}} + +{{/* +Return the proper MinIO® Client image name +*/}} +{{- define "minio.clientImage" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.clientImage "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "minio.volumePermissions.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "minio.imagePullSecrets" -}} +{{- include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.clientImage .Values.volumePermissions.image) "global" .Values.global) -}} +{{- end -}} + +{{/* +Returns the available value for certain key in an existing secret (if it exists), +otherwise it generates a random value. +*/}} +{{- define "getValueFromSecret" }} +{{- $len := (default 16 .Length) | int -}} +{{- $obj := (lookup "v1" "Secret" .Namespace .Name).data -}} +{{- if $obj }} +{{- index $obj .Key | b64dec -}} +{{- else -}} +{{- randAlphaNum $len -}} +{{- end -}} +{{- end }} + +{{/* +Get the user to use to access MinIO® +*/}} +{{- define "minio.secret.userValue" -}} +{{- if .Values.auth.rootUser }} + {{- .Values.auth.rootUser -}} +{{- else if (not .Values.auth.forcePassword) }} + {{- include "getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" (include "common.names.fullname" .) "Length" 10 "Key" "root-user") -}} +{{- else -}} + {{ required "A root username is required!" .Values.auth.rootUser }} +{{- end -}} +{{- end -}} + +{{/* +Get the password to use to access MinIO® +*/}} +{{- define "minio.secret.passwordValue" -}} +{{- if .Values.auth.rootPassword }} + {{- .Values.auth.rootPassword -}} +{{- else if (not .Values.auth.forcePassword) }} + {{- include "getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" (include "common.names.fullname" .) "Length" 10 "Key" "root-password") -}} +{{- else -}} + {{ required "A root password is required!" .Values.auth.rootPassword }} +{{- end -}} +{{- end -}} + +{{/* +Get the credentials secret. +*/}} +{{- define "minio.secretName" -}} +{{- if .Values.auth.existingSecret -}} + {{- printf "%s" (tpl .Values.auth.existingSecret $) -}} +{{- else -}} + {{- printf "%s" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret object should be created +*/}} +{{- define "minio.createSecret" -}} +{{- if .Values.auth.existingSecret -}} +{{- else -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a PVC object should be created (only in standalone mode) +*/}} +{{- define "minio.createPVC" -}} +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (eq .Values.mode "standalone") }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the PVC name (only in standalone mode) +*/}} +{{- define "minio.claimName" -}} +{{- if and .Values.persistence.existingClaim }} + {{- printf "%s" (tpl .Values.persistence.existingClaim $) -}} +{{- else -}} + {{- printf "%s" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Returns the proper service account name depending if an explicit service account name is set +in the values file. If the name is not set it will default to either common.names.fullname if serviceAccount.create +is true or default otherwise. +*/}} +{{- define "minio.serviceAccountName" -}} + {{- if .Values.serviceAccount.create -}} + {{ default (include "common.names.fullname" .) .Values.serviceAccount.name }} + {{- else -}} + {{ default "default" .Values.serviceAccount.name }} + {{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "minio.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "minio.validateValues.mode" .) -}} +{{- $messages := append $messages (include "minio.validateValues.totalDrives" .) -}} +{{- $messages := append $messages (include "minio.validateValues.tls" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of MinIO® - must provide a valid mode ("distributed" or "standalone") +*/}} +{{- define "minio.validateValues.mode" -}} +{{- $allowedValues := list "distributed" "standalone" }} +{{- if not (has .Values.mode $allowedValues) -}} +minio: mode + Invalid mode selected. Valid values are "distributed" and + "standalone". Please set a valid mode (--set mode="xxxx") +{{- end -}} +{{- end -}} + +{{/* +Validate values of MinIO® - total number of drives should be greater than 4 +*/}} +{{- define "minio.validateValues.totalDrives" -}} +{{- $replicaCount := int .Values.statefulset.replicaCount }} +{{- $drivesPerNode := int .Values.statefulset.drivesPerNode }} +{{- $totalDrives := mul $replicaCount $drivesPerNode }} +{{- if and (eq .Values.mode "distributed") (lt $totalDrives 4) -}} +minio: total drives + The total number of drives should be greater than 4 to guarantee erasure coding! + Please set a combination of nodes, and drives per node that match this condition. + For instance (--set statefulset.replicaCount=2 --set statefulset.drivesPerNode=2) +{{- end -}} +{{- end -}} + +{{/* +Validate values of MinIO® - TLS secret must provided if TLS is enabled +*/}} +{{- define "minio.validateValues.tls" -}} +{{- if and .Values.tls.enabled (not .Values.tls.existingSecret) (not .Values.tls.autoGenerated) }} +minio: tls.existingSecret, tls.autoGenerated + In order to enable TLS, you also need to provide + an existing secret containing the TLS certificates or + enable auto-generated certificates. +{{- end -}} +{{- end -}} + +{{/* +Return the secret containing MinIO TLS certificates +*/}} +{{- define "minio.tlsSecretName" -}} +{{- if .Values.tls.existingSecret -}} + {{- printf "%s" (tpl .Values.tls.existingSecret $) -}} +{{- else -}} + {{- printf "%s-crt" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a TLS secret object should be created +*/}} +{{- define "minio.createTlsSecret" -}} +{{- if and .Values.tls.enabled .Values.tls.autoGenerated (not .Values.tls.existingSecret) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Provisioning job labels (exclude matchLabels from standard labels) +*/}} +{{- define "minio.labels.provisioning" -}} +{{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.provisioning.podLabels .Values.commonLabels ) "context" . ) }} +{{- $provisioningLabels := (include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | fromYaml ) -}} +{{- range (include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | fromYaml | keys ) -}} +{{- $_ := unset $provisioningLabels . -}} +{{- end -}} +{{- print ($provisioningLabels | toYaml) -}} +{{- end -}} + +{{/* +Return the ingress anotation +*/}} +{{- define "minio.ingress.annotations" -}} +{{ .Values.ingress.annotations | toYaml }} +{{- end -}} + +{{/* +Return the api ingress anotation +*/}} +{{- define "minio.apiIngress.annotations" -}} +{{ .Values.apiIngress.annotations | toYaml }} +{{- end -}} + +{{/* +Return the ingress hostname +*/}} +{{- define "minio.ingress.hostname" -}} +{{- tpl .Values.ingress.hostname $ -}} +{{- end -}} + +{{/* +Return the api ingress hostname +*/}} +{{- define "minio.apiIngress.hostname" -}} +{{- tpl .Values.apiIngress.hostname $ -}} +{{- end -}} diff --git a/manifest/helm-charts/infra/minio/templates/api-ingress.yaml b/manifest/helm-charts/infra/minio/templates/api-ingress.yaml new file mode 100644 index 000000000..c9ca45723 --- /dev/null +++ b/manifest/helm-charts/infra/minio/templates/api-ingress.yaml @@ -0,0 +1,60 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.apiIngress.enabled -}} +apiVersion: {{ include "common.capabilities.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ include "common.names.fullname" . }}-api + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list (include "minio.apiIngress.annotations" . | fromYaml) .Values.commonAnnotations ) "context" . ) }} + {{- if $annotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.apiIngress.ingressClassName (include "common.ingress.supportsIngressClassname" .) }} + ingressClassName: {{ .Values.apiIngress.ingressClassName | quote }} + {{- end }} + rules: + {{- if (include "minio.apiIngress.hostname" .) }} + - host: {{ include "minio.apiIngress.hostname" . }} + http: + paths: + {{- if .Values.apiIngress.extraPaths }} + {{- toYaml .Values.apiIngress.extraPaths | nindent 10 }} + {{- end }} + - path: {{ .Values.apiIngress.path }} + {{- if eq "true" (include "common.ingress.supportsPathType" .) }} + pathType: {{ .Values.apiIngress.pathType }} + {{- end }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" .) "servicePort" "minio-api" "context" $) | nindent 14 }} + {{- end }} + {{- range .Values.apiIngress.extraHosts }} + - host: {{ .name | quote }} + http: + paths: + - path: {{ default "/" .path }} + {{- if eq "true" (include "common.ingress.supportsPathType" $) }} + pathType: {{ default "ImplementationSpecific" .pathType }} + {{- end }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" $) "servicePort" "minio-api" "context" $) | nindent 14 }} + {{- end }} + {{- if .Values.apiIngress.extraRules }} + {{- include "common.tplvalues.render" (dict "value" .Values.apiIngress.extraRules "context" $) | nindent 4 }} + {{- end }} + {{- $annotationsMap := include "common.tplvalues.render" (dict "value" $annotations "context" $) | fromYaml }} + {{- if or (and .Values.apiIngress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" $annotationsMap )) .Values.apiIngress.selfSigned)) .Values.apiIngress.extraTls }} + tls: + {{- if and .Values.apiIngress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" $annotationsMap )) .Values.apiIngress.selfSigned) }} + - hosts: + - {{ include "minio.apiIngress.hostname" . }} + secretName: {{ printf "%s-tls" (include "minio.apiIngress.hostname" .) }} + {{- end }} + {{- if .Values.apiIngress.extraTls }} + {{- include "common.tplvalues.render" ( dict "value" .Values.apiIngress.extraTls "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/manifest/helm-charts/infra/minio/templates/distributed/headless-svc.yaml b/manifest/helm-charts/infra/minio/templates/distributed/headless-svc.yaml new file mode 100644 index 000000000..5c3bf2a5a --- /dev/null +++ b/manifest/helm-charts/infra/minio/templates/distributed/headless-svc.yaml @@ -0,0 +1,30 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if (eq .Values.mode "distributed") }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-headless" (include "common.names.fullname" .) | trunc 63 }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if or .Values.service.headless.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.service.headless.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" (dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: minio-api + port: {{ .Values.service.ports.api }} + targetPort: minio-api + - name: minio-console + port: {{ .Values.service.ports.console }} + targetPort: minio-console + publishNotReadyAddresses: true + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} +{{- end }} diff --git a/manifest/helm-charts/infra/minio/templates/distributed/pdb.yaml b/manifest/helm-charts/infra/minio/templates/distributed/pdb.yaml new file mode 100644 index 000000000..91d1c59c2 --- /dev/null +++ b/manifest/helm-charts/infra/minio/templates/distributed/pdb.yaml @@ -0,0 +1,26 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.pdb.create (eq .Values.mode "distributed") }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.pdb.minAvailable }} + minAvailable: {{ .Values.pdb.minAvailable }} + {{- end }} + {{- if .Values.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.pdb.maxUnavailable }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} +{{- end }} diff --git a/manifest/helm-charts/infra/minio/templates/distributed/statefulset.yaml b/manifest/helm-charts/infra/minio/templates/distributed/statefulset.yaml new file mode 100644 index 000000000..3dd966ef8 --- /dev/null +++ b/manifest/helm-charts/infra/minio/templates/distributed/statefulset.yaml @@ -0,0 +1,338 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if (eq .Values.mode "distributed") }} +{{- $fullname := include "common.names.fullname" . }} +{{- $headlessService := printf "%s-headless" (include "common.names.fullname" .) | trunc 63 }} +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $apiPort := toString .Values.containerPorts.api }} +{{- $replicaCount := int .Values.statefulset.replicaCount }} +{{- $zoneCount := int .Values.statefulset.zones }} +{{- $drivesPerNode := int .Values.statefulset.drivesPerNode }} +{{- $mountPath := .Values.persistence.mountPath }} +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ $fullname }} + namespace: {{ $releaseNamespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + serviceName: {{ $headlessService }} + replicas: {{ mul $zoneCount $replicaCount }} + podManagementPolicy: {{ .Values.statefulset.podManagementPolicy }} + {{- if .Values.statefulset.updateStrategy }} + updateStrategy: {{- toYaml .Values.statefulset.updateStrategy | nindent 4 }} + {{- end }} + template: + metadata: + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + {{- if or .Values.podAnnotations (include "minio.createSecret" .) }} + annotations: + {{- if (include "minio.createSecret" .) }} + checksum/credentials-secret: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.podAnnotations "context" $) | nindent 8 }} + {{- end }} + {{- end }} + spec: + {{- include "minio.imagePullSecrets" . | nindent 6 }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName }} + {{- end }} + serviceAccountName: {{ template "minio.serviceAccountName" . }} + {{- if .Values.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAffinityPreset "customLabels" $podLabels "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAntiAffinityPreset "customLabels" $podLabels "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.nodeAffinityPreset.type "key" .Values.nodeAffinityPreset.key "values" .Values.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName | quote }} + {{- end }} + {{- if .Values.podSecurityContext.enabled }} + securityContext: {{- omit .Values.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if or .Values.initContainers (and .Values.volumePermissions.enabled .Values.persistence.enabled) }} + initContainers: + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} + - name: volume-permissions + image: {{ template "minio.volumePermissions.image" . }} + imagePullPolicy: {{ default "" .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + {{- if and .Values.persistence.enabled (gt $drivesPerNode 1) }} + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} {{ range $diskId := until $drivesPerNode }}{{ $mountPath }}-{{ $diskId }} {{ end }} + {{- else }} + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} {{ $mountPath }} + {{- end }} + securityContext: {{- .Values.volumePermissions.containerSecurityContext | toYaml | nindent 12 }} + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + {{- if and .Values.persistence.enabled (gt $drivesPerNode 1) }} + {{- range $diskId := until $drivesPerNode }} + - name: data-{{ $diskId }} + mountPath: {{ $mountPath }}-{{ $diskId }} + {{- end }} + {{- else }} + - name: data + mountPath: {{ $mountPath }} + {{- end }} + {{- end }} + {{- end }} + containers: + - name: minio + image: {{ include "minio.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: MINIO_DISTRIBUTED_MODE_ENABLED + value: "yes" + - name: MINIO_DISTRIBUTED_NODES + {{- $clusters := list }} + {{- range $i := until $zoneCount }} + {{- $factor := mul $i $replicaCount }} + {{- $endIndex := sub (add $factor $replicaCount) 1 }} + {{- $beginIndex := mul $i $replicaCount }} + {{- $bucket := ternary (printf "%s-{0...%d}" $mountPath (sub $drivesPerNode 1)) $mountPath (gt $drivesPerNode 1) }} + {{- $clusters = append $clusters (printf "%s-{%d...%d}.%s.%s.svc.%s:%s%s" $fullname $beginIndex $endIndex $headlessService $releaseNamespace $clusterDomain $apiPort $bucket) }} + {{- end }} + value: {{ join "," $clusters | quote }} + - name: MINIO_SCHEME + value: {{ ternary "https" "http" .Values.tls.enabled | quote }} + - name: MINIO_FORCE_NEW_KEYS + value: {{ ternary "yes" "no" .Values.auth.forceNewKeys | quote }} + {{- if .Values.auth.useCredentialsFiles }} + - name: MINIO_ROOT_USER_FILE + value: "/opt/bitnami/minio/secrets/root-user" + {{- else }} + - name: MINIO_ROOT_USER + valueFrom: + secretKeyRef: + name: {{ include "minio.secretName" . }} + key: root-user + {{- end }} + {{- if .Values.auth.useCredentialsFiles }} + - name: MINIO_ROOT_PASSWORD_FILE + value: "/opt/bitnami/minio/secrets/root-password" + {{- else }} + - name: MINIO_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "minio.secretName" . }} + key: root-password + {{- end }} + - name: MINIO_SKIP_CLIENT + value: {{ ternary "yes" "no" (empty .Values.defaultBuckets) | quote }} + {{- if .Values.defaultBuckets }} + - name: MINIO_DEFAULT_BUCKETS + value: {{ .Values.defaultBuckets }} + {{- end }} + - name: MINIO_BROWSER + value: {{ ternary "off" "on" .Values.disableWebUI | quote }} + - name: MINIO_PROMETHEUS_AUTH_TYPE + value: {{ .Values.metrics.prometheusAuthType | quote }} + {{- if .Values.tls.mountPath }} + - name: MINIO_CERTS_DIR + value: {{ .Values.tls.mountPath | quote }} + - name: MINIO_CONSOLE_PORT_NUMBER + value: {{ .Values.containerPorts.console | quote }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsSecret "context" $) }} + {{- end }} + ports: + - name: minio-api + containerPort: {{ .Values.containerPorts.api }} + protocol: TCP + - name: minio-console + containerPort: {{ .Values.containerPorts.console }} + protocol: TCP + {{- if .Values.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: /minio/health/live + port: minio-api + scheme: {{ ternary "HTTPS" "HTTP" .Values.tls.enabled | quote }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.readinessProbe.enabled }} + readinessProbe: + tcpSocket: + port: minio-api + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + {{- if .Values.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.startupProbe.enabled }} + startupProbe: + tcpSocket: + port: minio-api + initialDelaySeconds: {{ .Values.startupProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.startupProbe.periodSeconds }} + timeoutSeconds: {{ .Values.startupProbe.timeoutSeconds }} + successThreshold: {{ .Values.startupProbe.successThreshold }} + failureThreshold: {{ .Values.startupProbe.failureThreshold }} + {{- end }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + {{- if .Values.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.auth.useCredentialsFiles }} + - name: minio-credentials + mountPath: /opt/bitnami/minio/secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: minio-certs + mountPath: {{ default "/certs" .Values.tls.mountPath }} + {{- end }} + {{- if gt $drivesPerNode 1 }} + {{- range $diskId := until $drivesPerNode }} + - name: data-{{ $diskId }} + mountPath: {{ $mountPath }}-{{ $diskId }} + {{- end }} + {{- else }} + - name: data + mountPath: {{ $mountPath }} + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if .Values.auth.useCredentialsFiles }} + - name: minio-credentials + secret: + secretName: {{ include "minio.secretName" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: minio-certs + secret: + secretName: {{ include "minio.tlsSecretName" . }} + items: + - key: tls.crt + path: public.crt + - key: tls.key + path: private.key + - key: ca.crt + path: CAs/public.crt + {{- end }} + {{- if .Values.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if and (not .Values.persistence.enabled) (gt $drivesPerNode 1) }} + {{- range $diskId := until $drivesPerNode }} + - name: data-{{ $diskId }} + emptyDir: {} + {{- end }} + {{- else if not .Values.persistence.enabled }} + - name: data + emptyDir: {} + {{- else }} + volumeClaimTemplates: + {{- if gt $drivesPerNode 1 }} + {{- range $diskId := until $drivesPerNode }} + - metadata: + name: data-{{ $diskId }} + labels: {{- include "common.labels.matchLabels" ( dict "customLabels" $.Values.commonLabels "context" $ ) | nindent 10 }} + {{- if $.Values.persistence.annotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.persistence.annotations "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range $.Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ $.Values.persistence.size | quote }} + {{- include "common.storage.class" (dict "persistence" $.Values.persistence "global" $.Values.global) | nindent 8 }} + {{- end }} + {{- else }} + - metadata: + name: data + labels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 10 }} + {{- if .Values.persistence.annotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.persistence.annotations "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{- include "common.storage.class" (dict "persistence" .Values.persistence "global" .Values.global) | nindent 8 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/manifest/helm-charts/infra/minio/templates/extra-list.yaml b/manifest/helm-charts/infra/minio/templates/extra-list.yaml new file mode 100644 index 000000000..2d35a580e --- /dev/null +++ b/manifest/helm-charts/infra/minio/templates/extra-list.yaml @@ -0,0 +1,9 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/manifest/helm-charts/infra/minio/templates/ingress.yaml b/manifest/helm-charts/infra/minio/templates/ingress.yaml new file mode 100644 index 000000000..f350bae19 --- /dev/null +++ b/manifest/helm-charts/infra/minio/templates/ingress.yaml @@ -0,0 +1,60 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.ingress.enabled (not .Values.disableWebUI ) -}} +apiVersion: {{ include "common.capabilities.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list (include "minio.ingress.annotations" . | fromYaml) .Values.commonAnnotations ) "context" . ) }} + {{- if $annotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.ingressClassName (include "common.ingress.supportsIngressClassname" .) }} + ingressClassName: {{ .Values.ingress.ingressClassName | quote }} + {{- end }} + rules: + {{- if (include "minio.ingress.hostname" .) }} + - host: {{ include "minio.ingress.hostname" . }} + http: + paths: + {{- if .Values.ingress.extraPaths }} + {{- toYaml .Values.ingress.extraPaths | nindent 10 }} + {{- end }} + - path: {{ .Values.ingress.path }} + {{- if eq "true" (include "common.ingress.supportsPathType" .) }} + pathType: {{ .Values.ingress.pathType }} + {{- end }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" .) "servicePort" "minio-console" "context" $) | nindent 14 }} + {{- end }} + {{- range .Values.ingress.extraHosts }} + - host: {{ .name | quote }} + http: + paths: + - path: {{ default "/" .path }} + {{- if eq "true" (include "common.ingress.supportsPathType" $) }} + pathType: {{ default "ImplementationSpecific" .pathType }} + {{- end }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" $) "servicePort" "minio-console" "context" $) | nindent 14 }} + {{- end }} + {{- if .Values.ingress.extraRules }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingress.extraRules "context" $) | nindent 4 }} + {{- end }} + {{- $annotationsMap := include "common.tplvalues.render" (dict "value" $annotations "context" $) | fromYaml }} + {{- if or (and .Values.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" $annotationsMap )) .Values.ingress.selfSigned)) .Values.ingress.extraTls }} + tls: + {{- if and .Values.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" $annotationsMap )) .Values.ingress.selfSigned) }} + - hosts: + - {{ include "minio.ingress.hostname" . }} + secretName: {{ printf "%s-tls" (include "minio.ingress.hostname" .) }} + {{- end }} + {{- if .Values.ingress.extraTls }} + {{- include "common.tplvalues.render" ( dict "value" .Values.ingress.extraTls "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/manifest/helm-charts/infra/minio/templates/networkpolicy.yaml b/manifest/helm-charts/infra/minio/templates/networkpolicy.yaml new file mode 100644 index 000000000..2497ab147 --- /dev/null +++ b/manifest/helm-charts/infra/minio/templates/networkpolicy.yaml @@ -0,0 +1,36 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }} + podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.containerPorts.console }} + - port: {{ .Values.containerPorts.api }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ include "common.names.fullname" . }}-client: "true" + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 14 }} + {{- if .Values.networkPolicy.extraFromClauses }} + {{- toYaml .Values.networkPolicy.extraFromClauses | nindent 8 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/manifest/helm-charts/infra/minio/templates/prometheusrule.yaml b/manifest/helm-charts/infra/minio/templates/prometheusrule.yaml new file mode 100644 index 000000000..40bac68ef --- /dev/null +++ b/manifest/helm-charts/infra/minio/templates/prometheusrule.yaml @@ -0,0 +1,24 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ default .Release.Namespace .Values.metrics.prometheusRule.namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.metrics.prometheusRule.additionalLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.prometheusRule.additionalLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + groups: + - name: {{ include "common.names.fullname" . }} + rules: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.prometheusRule.rules "context" $ ) | nindent 6 }} +{{- end }} diff --git a/manifest/helm-charts/infra/minio/templates/provisioning-configmap.yaml b/manifest/helm-charts/infra/minio/templates/provisioning-configmap.yaml new file mode 100644 index 000000000..5d5bd1ac9 --- /dev/null +++ b/manifest/helm-charts/infra/minio/templates/provisioning-configmap.yaml @@ -0,0 +1,75 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.provisioning.enabled }} +{{- $fullname := printf "%s-provisioning" (include "common.names.fullname" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ $fullname }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: minio-provisioning + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + {{- range $bucket := .Values.provisioning.buckets }} + {{- if $bucket.lifecycle }} + bucket-{{ $bucket.name }}.json: | + { + "Rules": [ + {{- range $idx, $lifecycle := $bucket.lifecycle }} + {{- if not (eq $idx 0) }} + , + {{- end }} + { + "ID": "{{ $lifecycle.id }}", + "Status": "{{ ternary "Disabled" "Enabled" (and (not (empty $lifecycle.disabled)) $lifecycle.disabled) }}", + {{- if $lifecycle.expiry }} + "Expiration": { + {{- with $lifecycle.expiry.date }} + "Date": "{{ . }}" + {{- end }} + {{- with $lifecycle.expiry.days }} + "Days": {{ . }} + {{- end }} + } + {{- with $lifecycle.expiry.nonconcurrentDays }} + , + "NoncurrentVersionExpiration": { + "NoncurrentDays": {{ . }} + } + {{- end }} + {{- with $lifecycle.prefix }} + , + "Filter": { + "Prefix": "{{ . }}" + } + {{- end }} + } + {{- end }} + {{- end }} + ] + } + {{- end }} + {{- end }} + {{- range $policy := .Values.provisioning.policies }} + policy-{{ $policy.name }}.json: | + {{- $statementsLength := sub (len $policy.statements) 1 }} + { + "Version": "2012-10-17", + "Statement": [ + {{- range $i, $statement := $policy.statements }} + { + "Effect": "{{ default "Deny" $statement.effect }}"{{ if $statement.actions }}, + "Action": {{ toJson $statement.actions }}{{end}}{{ if $statement.resources }}, + "Resource": {{ toJson $statement.resources }}{{end}} + }{{ if lt $i $statementsLength }},{{end }} + {{- end }} + ] + } + {{- end }} +{{- end }} diff --git a/manifest/helm-charts/infra/minio/templates/provisioning-job.yaml b/manifest/helm-charts/infra/minio/templates/provisioning-job.yaml new file mode 100644 index 000000000..5b409dc54 --- /dev/null +++ b/manifest/helm-charts/infra/minio/templates/provisioning-job.yaml @@ -0,0 +1,324 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.provisioning.enabled }} +{{- $fullname := printf "%s-provisioning" (include "common.names.fullname" .) }} +{{- $minioAlias := "provisioning" }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ $fullname }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: minio-provisioning + annotations: + helm.sh/hook: post-install,post-upgrade + helm.sh/hook-delete-policy: before-hook-creation + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.provisioning.cleanupAfterFinished.enabled }} + ttlSecondsAfterFinished: {{ .Values.provisioning.cleanupAfterFinished.seconds }} + {{- end }} + parallelism: 1 + template: + metadata: + labels: {{- include "minio.labels.provisioning" . | nindent 8 }} + app.kubernetes.io/component: minio-provisioning + {{- if .Values.provisioning.podAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.provisioning.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + {{- include "minio.imagePullSecrets" . | nindent 6 }} + {{- if .Values.provisioning.schedulerName }} + schedulerName: {{ .Values.provisioning.schedulerName }} + {{- end }} + restartPolicy: OnFailure + terminationGracePeriodSeconds: 0 + {{- if .Values.provisioning.podSecurityContext.enabled }} + securityContext: {{- omit .Values.provisioning.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "minio.serviceAccountName" . }} + initContainers: + - name: wait-for-available-minio + image: {{ include "minio.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.provisioning.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.provisioning.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/bash + - -c + - |- + set -e; + echo "Waiting for Minio"; + wait-for-port \ + --host={{ include "common.names.fullname" . }} \ + --state=inuse \ + --timeout=120 \ + {{ .Values.service.ports.api | int64 }}; + echo "Minio is available"; + {{- if .Values.provisioning.resources }} + resources: {{- toYaml .Values.provisioning.resources | nindent 12 }} + {{- end }} + containers: + - name: minio + image: {{ include "minio.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.provisioning.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.provisioning.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.provisioning.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.command "context" $) | nindent 12 }} + {{- else }} + command: + - /bin/bash + - -c + - >- + set -e; + echo "Start Minio provisioning"; + + function attachPolicy() { + local tmp=$(mc admin $1 info {{ $minioAlias }} $2 | sed -n -e 's/^Policy.*: \(.*\)$/\1/p'); + IFS=',' read -r -a CURRENT_POLICIES <<< "$tmp"; + if [[ ! "${CURRENT_POLICIES[*]}" =~ "$3" ]]; then + mc admin policy attach {{ $minioAlias }} $3 --$1=$2; + fi; + }; + + function detachDanglingPolicies() { + local tmp=$(mc admin $1 info {{ $minioAlias }} $2 | sed -n -e 's/^Policy.*: \(.*\)$/\1/p'); + IFS=',' read -r -a CURRENT_POLICIES <<< "$tmp"; + IFS=',' read -r -a DESIRED_POLICIES <<< "$3"; + for current in "${CURRENT_POLICIES[@]}"; do + if [[ ! "${DESIRED_POLICIES[*]}" =~ "${current}" ]]; then + mc admin policy detach {{ $minioAlias }} $current --$1=$2; + fi; + done; + } + + function addUsersFromFile() { + local username=$(grep -oP '^username=\K.+' $1); + local password=$(grep -oP '^password=\K.+' $1); + local disabled=$(grep -oP '^disabled=\K.+' $1); + local policies_list=$(grep -oP '^policies=\K.+' $1); + local set_policies=$(grep -oP '^setPolicies=\K.+' $1); + + mc admin user add {{ $minioAlias }} "${username}" "${password}"; + + IFS=',' read -r -a POLICIES <<< "${policies_list}"; + for policy in "${POLICIES[@]}"; do + attachPolicy user "${username}" "${policy}"; + done; + if [ "${set_policies}" == "true" ]; then + detachDanglingPolicies user "${username}" "${policies_list}"; + fi; + + local user_status="enable"; + if [[ "${disabled}" != "" && "${disabled,,}" == "true" ]]; then + user_status="disable"; + fi; + + mc admin user "${user_status}" {{ $minioAlias }} "${username}"; + }; + + {{- $minioUrl := printf "$MINIO_SCHEME://%s:%d" (include "common.names.fullname" .) (.Values.service.ports.api | int) }} + {{- $minioRootUser := ternary ("$(<$MINIO_ROOT_USER_FILE)") ("$MINIO_ROOT_USER") (.Values.auth.useCredentialsFiles) }} + {{- $minioRootPassword := ternary ("$(<$MINIO_ROOT_PASSWORD_FILE)") ("$MINIO_ROOT_PASSWORD") (.Values.auth.useCredentialsFiles) }} + mc alias set {{ $minioAlias }} {{ $minioUrl }} {{ $minioRootUser }} {{ $minioRootPassword }}; + + {{- range $config := .Values.provisioning.config }} + {{- $options := list }} + {{- range $name, $value := $config.options }} + {{- $options = (printf "%s=%s" $name $value) | append $options }} + {{- end }} + {{- $options := join " " $options }} + mc admin config set {{ $minioAlias }} {{ $config.name }} {{ $options }}; + {{- end }} + + mc admin service restart {{ $minioAlias }}; + + {{- range $policy := .Values.provisioning.policies }} + mc admin policy create {{ $minioAlias }} {{ $policy.name }} /etc/ilm/policy-{{ $policy.name }}.json; + {{- end }} + + {{- range $user := .Values.provisioning.users }} + mc admin user add {{ $minioAlias }} {{ $user.username }} {{ $user.password }}; + {{- range $policy := $user.policies }} + attachPolicy user {{ $user.username }} {{ $policy }}; + {{- end }} + {{- if $user.setPolicies }} + detachDanglingPolicies user {{ $user.username }} "{{ join "," $user.policies }}"; + {{- end }} + {{- $userStatus := ternary ("disable") ("enable") (and (not (empty $user.disabled)) $user.disabled) }} + mc admin user {{ $userStatus }} {{ $minioAlias }} {{ $user.username }}; + {{- end }} + {{- if gt (len .Values.provisioning.usersExistingSecrets) 0 }} + while read -d '' configFile; do + addUsersFromFile "${configFile}"; + done < <(find "/opt/bitnami/minio/users/" -type l -not -name '..data' -print0); + {{- end }} + + {{- range $group := .Values.provisioning.groups }} + mc admin group add {{ $minioAlias }} {{ $group.name }} {{ join " " $group.members }}; + {{- range $policy := $group.policies }} + attachPolicy group {{ $group.name }} {{ $policy }}; + {{- end }} + {{- if $group.setPolicies }} + detachDanglingPolicies group {{ $group.name }} "{{ join "," $group.policies }}"; + {{- end }} + {{- $groupStatus := ternary ("disable") ("enable") (and (not (empty $group.disabled)) $group.disabled) }} + mc admin group {{ $groupStatus }} {{ $minioAlias }} {{ $group.name }}; + {{- end }} + + {{- $isDistributedMode := (eq .Values.mode "distributed") }} + {{- range $bucket := .Values.provisioning.buckets }} + {{- $target := printf "%s/%s" $minioAlias $bucket.name }} + {{- $region := ternary (printf "--region=%s" $bucket.region) ("") (not (empty $bucket.region)) }} + {{- $withLock := ternary ("--with-lock") ("") (and (not (empty $bucket.withLock)) $bucket.withLock) }} + mc mb {{ $target }} --ignore-existing {{ $region }} {{ $withLock }}; + + {{- if $bucket.lifecycle }} + mc ilm import {{ $minioAlias }}/{{ $bucket.name }} < /etc/ilm/bucket-{{ $bucket.name }}.json; + {{- end }} + + {{- with $bucket.quota }} + {{- if eq .type "hard" }} + mc quota set {{ $minioAlias }}/{{ $bucket.name }} {{ if .size }}--size {{ .size }}{{ end }}; + {{- else }} + mc quota {{ .type }} {{ $minioAlias }}/{{ $bucket.name }} {{ if .size }}--size {{ .size }}{{ end }}; + {{- end }} + {{- end }} + + {{- if $isDistributedMode }} + {{- if (or ((empty $bucket.withLock)) (not $bucket.withLock)) }} + {{- $versioning := ternary ("enable") ("suspend") (and (not (empty $bucket.versioning)) $bucket.versioning) }} + mc version {{ $versioning }} {{ $minioAlias }}/{{ $bucket.name }}; + {{- end }} + {{- end }} + + {{- if $bucket.tags }} + {{- $target := printf "%s/%s" $minioAlias $bucket.name }} + {{- $tags := list }} + {{- range $name, $value := $bucket.tags }} + {{- $tags = (printf "%s=%s" $name $value) | append $tags }} + {{- end }} + {{- $tags := join "&" $tags | quote }} + mc tag set {{ $target }} {{ $tags }}; + {{- end }} + {{- end }} + + {{- if .Values.provisioning.extraCommands }} + {{ join ";" .Values.provisioning.extraCommands | nindent 14 }}; + {{- end }} + + echo "End Minio provisioning"; + {{- end }} + {{- if .Values.provisioning.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: MINIO_SCHEME + value: {{ ternary "https" "http" .Values.tls.enabled | quote }} + {{- if .Values.auth.useCredentialsFiles }} + - name: MINIO_ROOT_USER_FILE + value: "/opt/bitnami/minio/secrets/root-user" + {{- else }} + - name: MINIO_ROOT_USER + valueFrom: + secretKeyRef: + name: {{ include "minio.secretName" . }} + key: root-user + {{- end }} + {{- if .Values.auth.useCredentialsFiles }} + - name: MINIO_ROOT_PASSWORD_FILE + value: "/opt/bitnami/minio/secrets/root-password" + {{- else }} + - name: MINIO_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "minio.secretName" . }} + key: root-password + {{- end }} + {{- if .Values.tls.mountPath }} + - name: MINIO_CERTS_DIR + value: {{ .Values.tls.mountPath | quote }} + {{- end }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsSecret "context" $) }} + {{- end }} + {{- if .Values.provisioning.resources }} + resources: {{- toYaml .Values.provisioning.resources | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.provisioning.enabled }} + - name: minio-provisioning + mountPath: /etc/ilm + {{- end }} + {{- if .Values.auth.useCredentialsFiles }} + - name: minio-credentials + mountPath: /opt/bitnami/minio/secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: minio-certs + mountPath: {{ default "/certs" .Values.tls.mountPath }} + - name: minio-client-certs + mountPath: /.mc/certs + {{- end }} + {{- range $idx, $_ := .Values.provisioning.usersExistingSecrets }} + - name: {{ printf "users-secret-%d" $idx }} + mountPath: /opt/bitnami/minio/users/{{ $idx }}/ + {{- end }} + {{- if .Values.provisioning.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.tolerations "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if .Values.provisioning.enabled }} + - name: minio-provisioning + configMap: + name: {{ $fullname }} + {{- end }} + {{- if .Values.auth.useCredentialsFiles }} + - name: minio-credentials + secret: + secretName: {{ include "minio.secretName" . }} + {{- end }} + {{- range $idx, $userSecret := .Values.provisioning.usersExistingSecrets }} + - name: {{ printf "users-secret-%d" $idx }} + secret: + secretName: {{ $userSecret }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: minio-certs + secret: + secretName: {{ include "minio.tlsSecretName" . }} + items: + - key: tls.crt + path: public.crt + - key: tls.key + path: private.key + - key: ca.crt + path: CAs/public.crt + - name: minio-client-certs + secret: + secretName: {{ include "minio.tlsSecretName" . }} + items: + - key: ca.crt + path: CAs/public.crt + {{- end }} + {{- if .Values.provisioning.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.extraVolumes "context" $) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/manifest/helm-charts/infra/minio/templates/pvc.yaml b/manifest/helm-charts/infra/minio/templates/pvc.yaml new file mode 100644 index 000000000..9974dd18c --- /dev/null +++ b/manifest/helm-charts/infra/minio/templates/pvc.yaml @@ -0,0 +1,26 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if (include "minio.createPVC" .) }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if or .Values.persistence.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.persistence.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" (dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{- include "common.storage.class" (dict "persistence" .Values.persistence "global" .Values.global) | nindent 2 }} +{{- end }} diff --git a/manifest/helm-charts/infra/minio/templates/secrets.yaml b/manifest/helm-charts/infra/minio/templates/secrets.yaml new file mode 100644 index 000000000..3c3241da0 --- /dev/null +++ b/manifest/helm-charts/infra/minio/templates/secrets.yaml @@ -0,0 +1,20 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if (include "minio.createSecret" .) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + root-user: {{ include "minio.secret.userValue" . | b64enc | quote }} + root-password: {{ include "minio.secret.passwordValue" . | b64enc | quote }} +{{- end }} diff --git a/manifest/helm-charts/infra/minio/templates/service.yaml b/manifest/helm-charts/infra/minio/templates/service.yaml new file mode 100644 index 000000000..10381545c --- /dev/null +++ b/manifest/helm-charts/infra/minio/templates/service.yaml @@ -0,0 +1,51 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +apiVersion: v1 +kind: Service +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if or .Values.service.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.service.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" (dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.service.type }} + {{- if and (eq .Values.service.type "ClusterIP") .Values.service.clusterIP }} + clusterIP: {{ .Values.service.clusterIP }} + {{- end }} + {{- if or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.service.type "LoadBalancer") .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{ .Values.service.loadBalancerSourceRanges }} + {{ end }} + {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + ports: + - name: minio-api + port: {{ .Values.service.ports.api }} + targetPort: minio-api + {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.api)) }} + nodePort: {{ .Values.service.nodePorts.api }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + - name: minio-console + port: {{ .Values.service.ports.console }} + targetPort: minio-console + {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.console)) }} + nodePort: {{ .Values.service.nodePorts.console }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if .Values.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} diff --git a/manifest/helm-charts/infra/minio/templates/serviceaccount.yaml b/manifest/helm-charts/infra/minio/templates/serviceaccount.yaml new file mode 100644 index 000000000..b85d5eb1d --- /dev/null +++ b/manifest/helm-charts/infra/minio/templates/serviceaccount.yaml @@ -0,0 +1,20 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "minio.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if or .Values.serviceAccount.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.serviceAccount.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" (dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +secrets: + - name: {{ include "common.names.fullname" . }} +{{- end }} diff --git a/manifest/helm-charts/infra/minio/templates/servicemonitor.yaml b/manifest/helm-charts/infra/minio/templates/servicemonitor.yaml new file mode 100644 index 000000000..95f48d74d --- /dev/null +++ b/manifest/helm-charts/infra/minio/templates/servicemonitor.yaml @@ -0,0 +1,53 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.metrics.serviceMonitor.enabled }} +apiVersion: {{ default "monitoring.coreos.com/v1" .Values.metrics.serviceMonitor.apiVersion }} +kind: ServiceMonitor +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ default .Release.Namespace .Values.metrics.serviceMonitor.namespace | quote }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.metrics.serviceMonitor.labels .Values.commonLabels ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + endpoints: + - port: minio-api + path: {{ .Values.metrics.serviceMonitor.path }} + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- toYaml .Values.metrics.serviceMonitor.metricRelabelings | nindent 8 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelings }} + relabelings: {{- toYaml .Values.metrics.serviceMonitor.relabelings | nindent 8 }} + {{- end }} + {{- if .Values.tls.enabled }} + scheme: https + {{- end }} + {{- if .Values.metrics.serviceMonitor.tlsConfig }} + tlsConfig: {{- toYaml .Values.metrics.serviceMonitor.tlsConfig | nindent 8 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.jobLabel }} + jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 6 }} + {{- if .Values.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} +{{- end }} diff --git a/manifest/helm-charts/infra/minio/templates/standalone/deployment.yaml b/manifest/helm-charts/infra/minio/templates/standalone/deployment.yaml new file mode 100644 index 000000000..76052143e --- /dev/null +++ b/manifest/helm-charts/infra/minio/templates/standalone/deployment.yaml @@ -0,0 +1,255 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if (eq .Values.mode "standalone") }} +apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} +kind: Deployment +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + {{- if .Values.deployment.updateStrategy }} + strategy: {{- toYaml .Values.deployment.updateStrategy | nindent 4 }} + {{- end }} + template: + metadata: + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + {{- if or .Values.podAnnotations (include "minio.createSecret" .) }} + annotations: + {{- if (include "minio.createSecret" .) }} + checksum/credentials-secret: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.podAnnotations "context" $) | nindent 8 }} + {{- end }} + {{- end }} + spec: + {{- include "minio.imagePullSecrets" . | nindent 6 }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName }} + {{- end }} + serviceAccountName: {{ template "minio.serviceAccountName" . }} + {{- if .Values.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAffinityPreset "customLabels" $podLabels "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAntiAffinityPreset "customLabels" $podLabels "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.nodeAffinityPreset.type "key" .Values.nodeAffinityPreset.key "values" .Values.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName | quote }} + {{- end }} + {{- if .Values.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.podSecurityContext.enabled }} + securityContext: {{- omit .Values.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if or .Values.initContainers (and .Values.volumePermissions.enabled .Values.persistence.enabled) }} + initContainers: + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} + - name: volume-permissions + image: {{ template "minio.volumePermissions.image" . }} + imagePullPolicy: {{ default "" .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} {{ .Values.persistence.mountPath }} + securityContext: {{- .Values.volumePermissions.containerSecurityContext | toYaml | nindent 12 }} + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: {{ .Values.persistence.mountPath }} + {{- end }} + {{- end }} + containers: + - name: minio + image: {{ include "minio.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: MINIO_SCHEME + value: {{ ternary "https" "http" .Values.tls.enabled | quote }} + - name: MINIO_FORCE_NEW_KEYS + value: {{ ternary "yes" "no" .Values.auth.forceNewKeys | quote }} + {{- if .Values.auth.useCredentialsFiles }} + - name: MINIO_ROOT_USER_FILE + value: "/opt/bitnami/minio/secrets/root-user" + {{- else }} + - name: MINIO_ROOT_USER + valueFrom: + secretKeyRef: + name: {{ include "minio.secretName" . }} + key: root-user + {{- end }} + {{- if .Values.auth.useCredentialsFiles }} + - name: MINIO_ROOT_PASSWORD_FILE + value: "/opt/bitnami/minio/secrets/root-password" + {{- else }} + - name: MINIO_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "minio.secretName" . }} + key: root-password + {{- end }} + {{- if .Values.defaultBuckets }} + - name: MINIO_DEFAULT_BUCKETS + value: {{ .Values.defaultBuckets }} + {{- end }} + - name: MINIO_BROWSER + value: {{ ternary "off" "on" .Values.disableWebUI | quote }} + - name: MINIO_PROMETHEUS_AUTH_TYPE + value: {{ .Values.metrics.prometheusAuthType | quote }} + - name: MINIO_CONSOLE_PORT_NUMBER + value: {{ .Values.containerPorts.console | quote }} + {{- if .Values.tls.mountPath }} + - name: MINIO_CERTS_DIR + value: {{ .Values.tls.mountPath | quote }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsSecret "context" $) }} + {{- end }} + ports: + - name: minio-api + containerPort: {{ .Values.containerPorts.api }} + protocol: TCP + - name: minio-console + containerPort: {{ .Values.containerPorts.console }} + protocol: TCP + {{- if .Values.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: /minio/health/live + port: minio-api + scheme: {{ ternary "HTTPS" "HTTP" .Values.tls.enabled | quote }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.readinessProbe.enabled }} + readinessProbe: + tcpSocket: + port: minio-api + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + {{- if .Values.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.startupProbe.enabled }} + startupProbe: + tcpSocket: + port: minio-console + initialDelaySeconds: {{ .Values.startupProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.startupProbe.periodSeconds }} + timeoutSeconds: {{ .Values.startupProbe.timeoutSeconds }} + successThreshold: {{ .Values.startupProbe.successThreshold }} + failureThreshold: {{ .Values.startupProbe.failureThreshold }} + {{- end }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + {{- if .Values.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.auth.useCredentialsFiles }} + - name: minio-credentials + mountPath: /opt/bitnami/minio/secrets/ + {{- end }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + {{- if .Values.tls.enabled }} + - name: minio-certs + mountPath: {{ default "/certs" .Values.tls.mountPath }} + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if .Values.auth.useCredentialsFiles }} + - name: minio-credentials + secret: + secretName: {{ include "minio.secretName" . }} + {{- end }} + - name: data + {{- if .Values.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ include "minio.claimName" . }} + {{- else }} + emptyDir: {} + {{- end }} + {{- if .Values.tls.enabled }} + - name: minio-certs + secret: + secretName: {{ include "minio.tlsSecretName" . }} + items: + - key: tls.crt + path: public.crt + - key: tls.key + path: private.key + - key: ca.crt + path: CAs/public.crt + {{- end }} + {{- if .Values.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/manifest/helm-charts/infra/minio/templates/tls-secrets.yaml b/manifest/helm-charts/infra/minio/templates/tls-secrets.yaml new file mode 100644 index 000000000..6af1762b5 --- /dev/null +++ b/manifest/helm-charts/infra/minio/templates/tls-secrets.yaml @@ -0,0 +1,69 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.ingress.enabled }} +{{- if .Values.ingress.secrets }} +{{- range .Values.ingress.secrets }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name }} + namespace: {{ $.Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $.Values.commonLabels "context" $ ) | nindent 4 }} + {{- if $.Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ .certificate | b64enc }} + tls.key: {{ .key | b64enc }} +--- +{{- end }} +{{- end }} +{{- if and .Values.ingress.tls .Values.ingress.selfSigned }} +{{- $secretName := printf "%s-tls" .Values.ingress.hostname }} +{{- $ca := genCA "minio-ca" 365 }} +{{- $cert := genSignedCert .Values.ingress.hostname nil (list .Values.ingress.hostname) 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secretName }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }} + tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }} + ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }} +{{- end }} +{{- end }} +{{- if (include "minio.createTlsSecret" .) }} +{{- $secretName := printf "%s-crt" (include "common.names.fullname" .) }} +{{- $ca := genCA "minio-ca" 365 }} +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $fullname := include "common.names.fullname" . }} +{{- $serviceName := include "common.names.fullname" . }} +{{- $headlessServiceName := printf "%s-headless" (include "common.names.fullname" .) }} +{{- $altNames := list (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "*.%s.%s.svc.%s" $headlessServiceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $headlessServiceName $releaseNamespace $clusterDomain) "127.0.0.1" "localhost" $fullname }} +{{- $cert := genSignedCert $fullname nil $altNames 365 $ca }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secretName }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }} + tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }} + ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }} +{{- end }} diff --git a/manifest/helm-charts/infra/minio/values.yaml b/manifest/helm-charts/infra/minio/values.yaml new file mode 100644 index 000000000..e46e02f59 --- /dev/null +++ b/manifest/helm-charts/infra/minio/values.yaml @@ -0,0 +1,1092 @@ +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 + +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.storageClass Global StorageClass for Persistent Volume(s) +## +global: + imageRegistry: "" + ## e.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + storageClass: "" + +## @section Common parameters + +## @param nameOverride String to partially override common.names.fullname template (will maintain the release name) +## +nameOverride: "" +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: "" +## @param commonLabels Labels to add to all deployed objects +## +commonLabels: {} +## @param commonAnnotations Annotations to add to all deployed objects +## +commonAnnotations: {} +## @param kubeVersion Force target Kubernetes version (using Helm capabilities if not set) +## +kubeVersion: "" +## @param clusterDomain Default Kubernetes cluster domain +## +clusterDomain: cluster.local +## @param extraDeploy Array of extra objects to deploy with the release +## +extraDeploy: [] + +## @section MinIO® parameters + +## Bitnami MinIO® image version +## ref: https://hub.docker.com/r/bitnami/minio/tags/ +## @param image.registry MinIO® image registry +## @param image.repository MinIO® image repository +## @param image.tag MinIO® image tag (immutable tags are recommended) +## @param image.digest MinIO® image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag +## @param image.pullPolicy Image pull policy +## @param image.pullSecrets Specify docker-registry secret names as an array +## @param image.debug Specify if debug logs should be enabled +## +image: + registry: docker.io + repository: bitnami/minio + tag: 2023.9.20-debian-11-r0 + digest: "" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Set to true if you would like to see extra information on logs + ## + debug: false +## Bitnami MinIO® Client image version +## ref: https://hub.docker.com/r/bitnami/minio-client/tags/ +## @param clientImage.registry MinIO® Client image registry +## @param clientImage.repository MinIO® Client image repository +## @param clientImage.tag MinIO® Client image tag (immutable tags are recommended) +## @param clientImage.digest MinIO® Client image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag +## +clientImage: + registry: docker.io + repository: bitnami/minio-client + tag: 2023.9.20-debian-11-r0 + digest: "" +## @param mode MinIO® server mode (`standalone` or `distributed`) +## ref: https://docs.minio.io/docs/distributed-minio-quickstart-guide +## +mode: standalone +## MinIO® authentication parameters +## +auth: + ## @param auth.rootUser MinIO® root username + ## + rootUser: admin + ## @param auth.rootPassword Password for MinIO® root user + ## + rootPassword: "" + ## @param auth.existingSecret Use existing secret for credentials details (`auth.rootUser` and `auth.rootPassword` will be ignored and picked up from this secret). The secret has to contain the keys `root-user` and `root-password`) + ## + existingSecret: "" + ## @param auth.forcePassword Force users to specify required passwords + ## + forcePassword: false + ## @param auth.useCredentialsFiles Mount credentials as a files instead of using an environment variable + ## + useCredentialsFiles: false + ## @param auth.forceNewKeys Force root credentials (user and password) to be reconfigured every time they change in the secrets + ## + forceNewKeys: false +## @param defaultBuckets Comma, semi-colon or space separated list of buckets to create at initialization (only in standalone mode) +## e.g: +## defaultBuckets: "my-bucket, my-second-bucket" +## +defaultBuckets: "" +## @param disableWebUI Disable MinIO® Web UI +## ref: https://github.com/minio/minio/tree/master/docs/config/#browser +## +disableWebUI: false +## Enable tls in front of MinIO® containers. +## +tls: + ## @param tls.enabled Enable tls in front of the container + ## + enabled: false + ## @param tls.autoGenerated Generate automatically self-signed TLS certificates + ## + autoGenerated: false + ## @param tls.existingSecret Name of an existing secret holding the certificate information + ## + existingSecret: "" + ## @param tls.mountPath The mount path where the secret will be located + ## Custom mount path where the certificates will be located, if empty will default to /certs + mountPath: "" +## @param extraEnvVars Extra environment variables to be set on MinIO® container +## e.g: +## extraEnvVars: +## - name: FOO +## value: "bar" +## +extraEnvVars: [] +## @param extraEnvVarsCM ConfigMap with extra environment variables +## +extraEnvVarsCM: "" +## @param extraEnvVarsSecret Secret with extra environment variables +## +extraEnvVarsSecret: "" +## @param command Default container command (useful when using custom images). Use array form +## +command: [] +## @param args Default container args (useful when using custom images). Use array form +## +args: [] + +## @section MinIO® deployment/statefulset parameters + +## @param schedulerName Specifies the schedulerName, if it's nil uses kube-scheduler +## https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +schedulerName: "" +## @param terminationGracePeriodSeconds In seconds, time the given to the MinIO pod needs to terminate gracefully +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods +## +terminationGracePeriodSeconds: "" +## MinIO® deployment parameters +## Only when 'mode' is 'standalone' +## +deployment: + ## @param deployment.updateStrategy.type Deployment strategy type + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## e.g: + ## updateStrategy: + ## type: RollingUpdate + ## rollingUpdate: + ## maxSurge: 25% + ## maxUnavailable: 25% + ## + updateStrategy: + type: Recreate +## MinIO® statefulset parameters +## Only when mode is 'distributed' +## +statefulset: + ## @param statefulset.updateStrategy.type StatefulSet strategy type + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## e.g: + ## updateStrategy: + ## type: RollingUpdate + ## rollingUpdate: + ## maxSurge: 25% + ## maxUnavailable: 25% + ## + updateStrategy: + type: RollingUpdate + ## @param statefulset.podManagementPolicy StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel + ## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pod-management-policy + ## + podManagementPolicy: Parallel + ## @param statefulset.replicaCount Number of pods per zone (only for MinIO® distributed mode). Should be even and `>= 4` + ## + replicaCount: 4 + ## @param statefulset.zones Number of zones (only for MinIO® distributed mode) + ## + zones: 1 + ## @param statefulset.drivesPerNode Number of drives attached to every node (only for MinIO® distributed mode) + ## + drivesPerNode: 1 + +## MinIO® provisioning +## +provisioning: + ## @param provisioning.enabled Enable MinIO® provisioning Job + ## + enabled: false + ## @param provisioning.schedulerName Name of the k8s scheduler (other than default) for MinIO® provisioning + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param provisioning.podLabels Extra labels for provisioning pods + ## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param provisioning.podAnnotations Provisioning Pod annotations. + ## + podAnnotations: {} + ## @param provisioning.command Default provisioning container command (useful when using custom images). Use array form + ## + command: [] + ## @param provisioning.args Default provisioning container args (useful when using custom images). Use array form + ## + args: [] + ## @param provisioning.extraCommands Optionally specify extra list of additional commands for MinIO® provisioning pod + ## + extraCommands: [] + ## @param provisioning.extraVolumes Optionally specify extra list of additional volumes for MinIO® provisioning pod + ## + extraVolumes: [] + ## @param provisioning.extraVolumeMounts Optionally specify extra list of additional volumeMounts for MinIO® provisioning container + ## + extraVolumeMounts: [] + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param provisioning.resources.limits The resources limits for the container + ## @param provisioning.resources.requests The requested resources for the container + ## + resources: + ## Example: + ## limits: + ## cpu: 100m + ## memory: 64Mi + limits: {} + ## Examples: + ## requests: + ## cpu: 200m + ## memory: 128Mi + requests: {} + ## @param provisioning.policies MinIO® policies provisioning + ## https://docs.min.io/docs/minio-admin-complete-guide.html#policy + ## e.g. + ## policies: + ## - name: custom-bucket-specific-policy + ## statements: + ## - resources: + ## - "arn:aws:s3:::my-bucket" + ## actions: + ## - "s3:GetBucketLocation" + ## - "s3:ListBucket" + ## - "s3:ListBucketMultipartUploads" + ## - resources: + ## - "arn:aws:s3:::my-bucket/*" + ## # Allowed values: "Allow" | "Deny" + ## # Defaults to "Deny" if not specified + ## effect: "Allow" + ## actions: + ## - "s3:AbortMultipartUpload" + ## - "s3:DeleteObject" + ## - "s3:GetObject" + ## - "s3:ListMultipartUploadParts" + ## - "s3:PutObject" + policies: [] + ## @param provisioning.users MinIO® users provisioning. Can be used in addition to provisioning.usersExistingSecrets. + ## https://docs.min.io/docs/minio-admin-complete-guide.html#user + ## e.g. + ## users: + ## - username: test-username + ## password: test-password + ## disabled: false + ## policies: + ## - readwrite + ## - consoleAdmin + ## - diagnostics + ## # When set to true, it will replace all policies with the specified. + ## # When false, the policies will be added to the existing. + ## setPolicies: false + users: [] + ## @param provisioning.usersExistingSecrets Array if existing secrets containing MinIO® users to be provisioned. Can be used in addition to provisioning.users. + ## https://docs.min.io/docs/minio-admin-complete-guide.html#user + ## + ## Instead of configuring users inside values.yaml, referring to existing Kubernetes secrets containing user + ## configurations is possible. + ## e.g. + ## usersExistingSecrets: + ## - centralized-minio-users + ## + ## All provided Kubernetes secrets require a specific data structure. The same data from the provisioning.users example above + ## can be defined via secrets with the following data structure. The secret keys have no meaning to the provisioning job except that + ## they are used as filenames. + ## ## apiVersion: v1 + ## ## kind: Secret + ## ## metadata: + ## ## name: centralized-minio-users + ## ## type: Opaque + ## ## stringData: + ## ## username1: | + ## ## username=test-username + ## ## password=test-password + ## ## disabled=false + ## ## policies=readwrite,consoleAdmin,diagnostics + ## ## setPolicies=false + usersExistingSecrets: [] + ## @param provisioning.groups MinIO® groups provisioning + ## https://docs.min.io/docs/minio-admin-complete-guide.html#group + ## e.g. + ## groups + ## - name: test-group + ## disabled: false + ## members: + ## - test-username + ## policies: + ## - readwrite + ## # When set to true, it will replace all policies with the specified. + ## # When false, the policies will be added to the existing. + ## setPolicies: false + groups: [] + ## @param provisioning.buckets MinIO® buckets, versioning, lifecycle, quota and tags provisioning + ## Buckets https://docs.min.io/docs/minio-client-complete-guide.html#mb + ## Lifecycle https://docs.min.io/docs/minio-client-complete-guide.html#ilm + ## Quotas https://docs.min.io/docs/minio-admin-complete-guide.html#bucket + ## Tags https://docs.min.io/docs/minio-client-complete-guide.html#tag + ## Versioning https://docs.min.io/docs/minio-client-complete-guide.html#version + ## e.g. + ## buckets: + ## - name: test-bucket + ## region: us-east-1 + ## # Only when mode is 'distributed' + ## # ref: https://docs.minio.io/docs/distributed-minio-quickstart-guide + ## versioning: false + ## # Versioning is automatically enabled if withLock is true + ## # ref: https://docs.min.io/docs/minio-bucket-versioning-guide.html + ## withLock: true + ## # Only when mode is 'distributed' + ## # ref: https://docs.minio.io/docs/distributed-minio-quickstart-guide + ## lifecycle: + ## - id: TestPrefix7dRetention + ## prefix: test-prefix + ## disabled: false + ## expiry: + ## days: 7 + ## # Days !OR! date + ## # date: "2021-11-11T00:00:00Z" + ## nonconcurrentDays: 3 + ## # Only when mode is 'distributed' + ## # ref: https://docs.minio.io/docs/distributed-minio-quickstart-guide + ## quota: + ## # set (hard still works as an alias but is deprecated) or clear(+ omit size) + ## type: set + ## size: 10GiB + ## tags: + ## key1: value1 + buckets: [] + ## @param provisioning.config MinIO® config provisioning + ## https://docs.min.io/docs/minio-server-configuration-guide.html + ## e.g. + ## config: + ## - name: region + ## options: + ## name: us-east-1 + config: [] + ## MinIO® pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param provisioning.podSecurityContext.enabled Enable pod Security Context + ## @param provisioning.podSecurityContext.fsGroup Group ID for the container + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## MinIO® container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param provisioning.containerSecurityContext.enabled Enable container Security Context + ## @param provisioning.containerSecurityContext.runAsUser User ID for the container + ## @param provisioning.containerSecurityContext.runAsNonRoot Avoid running as root User + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + ## Automatic Cleanup for Finished Jobs + ## @param provisioning.cleanupAfterFinished.enabled Enables Cleanup for Finished Jobs + ## @param provisioning.cleanupAfterFinished.seconds Sets the value of ttlSecondsAfterFinished + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/ttlafterfinished/ + ## + cleanupAfterFinished: + enabled: false + seconds: 600 +## @param hostAliases MinIO® pod host aliases +## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ +## +hostAliases: [] +## @param containerPorts.api MinIO® container port to open for MinIO® API +## @param containerPorts.console MinIO® container port to open for MinIO® Console +## +containerPorts: + api: 9000 + console: 9001 +## MinIO® pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## @param podSecurityContext.enabled Enable pod Security Context +## @param podSecurityContext.fsGroup Group ID for the container +## +podSecurityContext: + enabled: true + fsGroup: 1001 +## MinIO® container Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +## @param containerSecurityContext.enabled Enable container Security Context +## @param containerSecurityContext.runAsUser User ID for the container +## @param containerSecurityContext.runAsNonRoot Avoid running as root User +## +containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true +## @param podLabels Extra labels for MinIO® pods +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} +## @param podAnnotations Annotations for MinIO® pods +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} +## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAffinityPreset: "" +## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAntiAffinityPreset: soft +## Node affinity preset +## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity +## +nodeAffinityPreset: + ## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param nodeAffinityPreset.key Node label key to match. Ignored if `affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] +## @param affinity Affinity for pod assignment. Evaluated as a template. +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set +## +affinity: {} +## @param nodeSelector Node labels for pod assignment. Evaluated as a template. +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} +## @param tolerations Tolerations for pod assignment. Evaluated as a template. +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] +## @param topologySpreadConstraints Topology Spread Constraints for MinIO® pods assignment spread across your cluster among failure-domains +## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods +## +topologySpreadConstraints: [] +## @param priorityClassName MinIO® pods' priorityClassName +## +priorityClassName: "" +## MinIO® containers' resource requests and limits +## ref: https://kubernetes.io/docs/user-guide/compute-resources/ +## We usually recommend not to specify default resources and to leave this as a conscious +## choice for the user. This also increases chances charts run on environments with little +## resources, such as Minikube. If you do want to specify resources, uncomment the following +## lines, adjust them as necessary, and remove the curly braces after 'resources:'. +## @param resources.limits The resources limits for the MinIO® container +## @param resources.requests The requested resources for the MinIO® container +## +resources: + ## Example: + ## limits: + ## cpu: 250m + ## memory: 256Mi + limits: {} + ## Examples: + ## requests: + ## cpu: 250m + ## memory: 256Mi + requests: {} +## Configure extra options for liveness probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes +## @param livenessProbe.enabled Enable livenessProbe +## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe +## @param livenessProbe.periodSeconds Period seconds for livenessProbe +## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe +## @param livenessProbe.failureThreshold Failure threshold for livenessProbe +## @param livenessProbe.successThreshold Success threshold for livenessProbe +## +livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 +## Configure extra options for readiness probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes +## @param readinessProbe.enabled Enable readinessProbe +## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe +## @param readinessProbe.periodSeconds Period seconds for readinessProbe +## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe +## @param readinessProbe.failureThreshold Failure threshold for readinessProbe +## @param readinessProbe.successThreshold Success threshold for readinessProbe +## +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 +## Configure extra options for startupProbe probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes +## @param startupProbe.enabled Enable startupProbe +## @param startupProbe.initialDelaySeconds Initial delay seconds for startupProbe +## @param startupProbe.periodSeconds Period seconds for startupProbe +## @param startupProbe.timeoutSeconds Timeout seconds for startupProbe +## @param startupProbe.failureThreshold Failure threshold for startupProbe +## @param startupProbe.successThreshold Success threshold for startupProbe +## +startupProbe: + enabled: false + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 60 +## @param customLivenessProbe Override default liveness probe +## +customLivenessProbe: {} +## @param customReadinessProbe Override default readiness probe +## +customReadinessProbe: {} +## @param customStartupProbe Override default startup probe +## +customStartupProbe: {} +## @param lifecycleHooks for the MinIO® container(s) to automate configuration before or after startup +## +lifecycleHooks: {} +## @param extraVolumes Optionally specify extra list of additional volumes for MinIO® pods +## +extraVolumes: [] +## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts for MinIO® container(s) +## +extraVolumeMounts: [] +## @param initContainers Add additional init containers to the MinIO® pods +## e.g: +## initContainers: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +initContainers: [] +## @param sidecars Add additional sidecar containers to the MinIO® pods +## e.g: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +sidecars: [] + +## @section Traffic exposure parameters + +## MinIO® Service properties +## +service: + ## @param service.type MinIO® service type + ## + type: ClusterIP + ## @param service.ports.api MinIO® API service port + ## @param service.ports.console MinIO® Console service port + ## + ports: + api: 9000 + console: 9001 + ## @param service.nodePorts.api Specify the MinIO® API nodePort value for the LoadBalancer and NodePort service types + ## @param service.nodePorts.console Specify the MinIO® Console nodePort value for the LoadBalancer and NodePort service types + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePorts: + api: "" + console: "" + ## @param service.clusterIP Service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## @param service.loadBalancerIP loadBalancerIP if service type is `LoadBalancer` (optional, cloud specific) + ## ref: https://kubernetes.io/docs/user-guide/services/#type-loadbalancer + ## + loadBalancerIP: "" + ## @param service.loadBalancerSourceRanges Addresses that are allowed when service is LoadBalancer + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param service.externalTrafficPolicy Enable client source IP preservation + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param service.extraPorts Extra ports to expose in the service (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param service.annotations Annotations for MinIO® service + ## This can be used to set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## Headless service properties + ## + headless: + ## @param service.headless.annotations Annotations for the headless service. + ## + annotations: {} +## Configure the ingress resource that allows you to access the +## MinIO® Console. Set up the URL +## ref: https://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + ## @param ingress.enabled Enable ingress controller resource for MinIO Console + ## + enabled: false + ## @param ingress.apiVersion Force Ingress API version (automatically detected if not set) + ## + apiVersion: "" + ## @param ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) + ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster. + ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/ + ## + ingressClassName: "" + ## @param ingress.hostname Default host for the ingress resource + ## + hostname: minio.local + ## @param ingress.path The Path to MinIO®. You may need to set this to '/*' in order to use this with ALB ingress controllers. + ## + path: / + ## @param ingress.pathType Ingress path type + ## + pathType: ImplementationSpecific + ## @param ingress.servicePort Service port to be used + ## Default is http. Alternative is https. + ## + servicePort: minio-console + ## @param ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. + ## For a full list of possible ingress annotations, please see + ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md + ## Use this parameter to set the required annotations for cert-manager, see + ## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations + ## + ## e.g: + ## annotations: + ## kubernetes.io/ingress.class: nginx + ## cert-manager.io/cluster-issuer: cluster-issuer-name + ## + annotations: {} + ## @param ingress.tls Enable TLS configuration for the hostname defined at `ingress.hostname` parameter + ## TLS certificates will be retrieved from a TLS secret with name: `{{- printf "%s-tls" .Values.ingress.hostname }}` + ## You can: + ## - Use the `ingress.secrets` parameter to create this TLS secret + ## - Rely on cert-manager to create it by setting the corresponding annotations + ## - Rely on Helm to create self-signed certificates by setting `ingress.selfSigned=true` + ## + tls: false + ## @param ingress.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm + ## + selfSigned: false + ## @param ingress.extraHosts The list of additional hostnames to be covered with this ingress record. + ## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array + ## e.g: + ## extraHosts: + ## - name: minio.local + ## path: / + ## + extraHosts: [] + ## @param ingress.extraPaths Any additional paths that may need to be added to the ingress under the main host + ## For example: The ALB ingress controller requires a special rule for handling SSL redirection. + ## extraPaths: + ## - path: /* + ## backend: + ## serviceName: ssl-redirect + ## servicePort: use-annotation + ## + extraPaths: [] + ## @param ingress.extraTls The tls configuration for additional hostnames to be covered with this ingress record. + ## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls + ## e.g: + ## extraTls: + ## - hosts: + ## - minio.local + ## secretName: minio.local-tls + ## + extraTls: [] + ## @param ingress.secrets If you're providing your own certificates, please use this to add the certificates as secrets + ## key and certificate are expected in PEM format + ## name should line up with a secretName set further up + ## + ## If it is not set and you're using cert-manager, this is unneeded, as it will create a secret for you with valid certificates + ## If it is not set and you're NOT using cert-manager either, self-signed certificates will be created valid for 365 days + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + ## + ## Example + ## secrets: + ## - name: minio.local-tls + ## key: "" + ## certificate: "" + ## + secrets: [] + ## @param ingress.extraRules Additional rules to be covered with this ingress record + ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules + ## e.g: + ## extraRules: + ## - host: example.local + ## http: + ## path: / + ## backend: + ## service: + ## name: example-svc + ## port: + ## name: http + ## + extraRules: [] + +## Configure the ingress resource that allows you to access the +## MinIO® API. Set up the URL +## ref: https://kubernetes.io/docs/user-guide/ingress/ +## +apiIngress: + ## @param apiIngress.enabled Enable ingress controller resource for MinIO API + ## + enabled: false + ## @param apiIngress.apiVersion Force Ingress API version (automatically detected if not set) + ## + apiVersion: "" + ## @param apiIngress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) + ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster. + ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/ + ## + ingressClassName: "" + ## @param apiIngress.hostname Default host for the ingress resource + ## + hostname: minio.local + ## @param apiIngress.path The Path to MinIO®. You may need to set this to '/*' in order to use this with ALB ingress controllers. + ## + path: / + ## @param apiIngress.pathType Ingress path type + ## + pathType: ImplementationSpecific + ## @param apiIngress.servicePort Service port to be used + ## Default is http. Alternative is https. + ## + servicePort: minio-api + ## @param apiIngress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. + ## For a full list of possible ingress annotations, please see + ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md + ## Use this parameter to set the required annotations for cert-manager, see + ## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations + ## + ## e.g: + ## annotations: + ## kubernetes.io/ingress.class: nginx + ## cert-manager.io/cluster-issuer: cluster-issuer-name + ## + annotations: {} + ## @param apiIngress.tls Enable TLS configuration for the hostname defined at `apiIngress.hostname` parameter + ## TLS certificates will be retrieved from a TLS secret with name: `{{- printf "%s-tls" .Values.apiIngress.hostname }}` + ## You can: + ## - Use the `ingress.secrets` parameter to create this TLS secret + ## - Rely on cert-manager to create it by setting the corresponding annotations + ## - Rely on Helm to create self-signed certificates by setting `ingress.selfSigned=true` + ## + tls: false + ## @param apiIngress.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm + ## + selfSigned: false + ## @param apiIngress.extraHosts The list of additional hostnames to be covered with this ingress record. + ## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array + ## e.g: + ## extraHosts: + ## - name: minio.local + ## path: / + ## + extraHosts: [] + ## @param apiIngress.extraPaths Any additional paths that may need to be added to the ingress under the main host + ## For example: The ALB ingress controller requires a special rule for handling SSL redirection. + ## extraPaths: + ## - path: /* + ## backend: + ## serviceName: ssl-redirect + ## servicePort: use-annotation + ## + extraPaths: [] + ## @param apiIngress.extraTls The tls configuration for additional hostnames to be covered with this ingress record. + ## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls + ## e.g: + ## extraTls: + ## - hosts: + ## - minio.local + ## secretName: minio.local-tls + ## + extraTls: [] + ## @param apiIngress.secrets If you're providing your own certificates, please use this to add the certificates as secrets + ## key and certificate are expected in PEM format + ## name should line up with a secretName set further up + ## + ## If it is not set and you're using cert-manager, this is unneeded, as it will create a secret for you with valid certificates + ## If it is not set and you're NOT using cert-manager either, self-signed certificates will be created valid for 365 days + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + ## + ## Example + ## secrets: + ## - name: minio.local-tls + ## key: "" + ## certificate: "" + ## + secrets: [] + ## @param apiIngress.extraRules Additional rules to be covered with this ingress record + ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules + ## e.g: + ## extraRules: + ## - host: example.local + ## http: + ## path: / + ## backend: + ## service: + ## name: example-svc + ## port: + ## name: http + ## + extraRules: [] +## NetworkPolicy parameters +## +networkPolicy: + ## @param networkPolicy.enabled Enable the default NetworkPolicy policy + ## + enabled: false + ## @param networkPolicy.allowExternal Don't require client label for connections + ## When set to false, only pods with the correct client label will have network access to the port MinIO® is + ## listening on. When true, MinIO® will accept connections from any source (with the correct destination port). + ## + allowExternal: true + ## @param networkPolicy.extraFromClauses Allows to add extra 'from' clauses to the NetworkPolicy + extraFromClauses: [] + ## Example + ## extraFromClauses: + ## - podSelector: + ## matchLabels: + ## a: b + +## @section Persistence parameters + +## Enable persistence using Persistent Volume Claims +## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + ## @param persistence.enabled Enable MinIO® data persistence using PVC. If false, use emptyDir + ## + enabled: true + ## @param persistence.storageClass PVC Storage Class for MinIO® data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param persistence.mountPath Data volume mount path + ## + mountPath: /bitnami/minio/data + ## @param persistence.accessModes PVC Access Modes for MinIO® data volume + ## + accessModes: + - ReadWriteOnce + ## @param persistence.size PVC Storage Request for MinIO® data volume + ## + size: 8Gi + ## @param persistence.annotations Annotations for the PVC + ## + annotations: {} + ## @param persistence.existingClaim Name of an existing PVC to use (only in `standalone` mode) + ## + existingClaim: "" + +## @section Volume Permissions parameters + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section. +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` + ## + enabled: false + ## @param volumePermissions.image.registry Init container volume-permissions image registry + ## @param volumePermissions.image.repository Init container volume-permissions image repository + ## @param volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended) + ## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy + ## @param volumePermissions.image.pullSecrets Specify docker-registry secret names as an array + ## + image: + registry: docker.io + repository: bitnami/os-shell + tag: 11-debian-11-r72 + digest: "" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init container' resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param volumePermissions.resources.limits Init container volume-permissions resource limits + ## @param volumePermissions.resources.requests Init container volume-permissions resource requests + ## + resources: + ## Example: + ## limits: + ## cpu: 500m + ## memory: 1Gi + limits: {} + requests: {} + ## Init container' Security Context + ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser + ## and not the below volumePermissions.containerSecurityContext.runAsUser + ## @param volumePermissions.containerSecurityContext.runAsUser User ID for the init container + ## + containerSecurityContext: + runAsUser: 0 + +## @section RBAC parameters + +## Specifies whether a ServiceAccount should be created +## +serviceAccount: + ## @param serviceAccount.create Enable the creation of a ServiceAccount for MinIO® pods + ## + create: true + ## @param serviceAccount.name Name of the created ServiceAccount + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param serviceAccount.automountServiceAccountToken Enable/disable auto mounting of the service account token + ## + automountServiceAccountToken: true + ## @param serviceAccount.annotations Custom annotations for MinIO® ServiceAccount + ## + annotations: {} + +## @section Other parameters + +## MinIO® Pod Disruption Budget configuration in distributed mode. +## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +## +pdb: + ## @param pdb.create Enable/disable a Pod Disruption Budget creation + ## + create: false + ## @param pdb.minAvailable Minimum number/percentage of pods that must still be available after the eviction + ## + minAvailable: 1 + ## @param pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable after the eviction + ## + maxUnavailable: "" + +## @section Metrics parameters + +metrics: + ## @param metrics.prometheusAuthType Authentication mode for Prometheus (`jwt` or `public`) + ## To allow public access without authentication for prometheus metrics set environment as follows. + ## + prometheusAuthType: public + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled If the operator is installed in your cluster, set to true to create a Service Monitor Entry + ## + enabled: false + ## @param metrics.serviceMonitor.namespace Namespace which Prometheus is running in + ## + namespace: "" + ## @param metrics.serviceMonitor.labels Extra labels for the ServiceMonitor + ## + labels: {} + ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in Prometheus + ## + jobLabel: "" + ## @param metrics.serviceMonitor.path HTTP path to scrape for metrics + ## + path: /minio/v2/metrics/cluster + ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped + ## + interval: 30s + ## @param metrics.serviceMonitor.scrapeTimeout Specify the timeout after which the scrape is ended + ## e.g: + ## scrapeTimeout: 30s + scrapeTimeout: "" + ## @param metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion + ## + metricRelabelings: [] + ## @param metrics.serviceMonitor.relabelings Metrics relabelings to add to the scrape endpoint, applied before scraping + ## + relabelings: [] + ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param metrics.serviceMonitor.selector Prometheus instance selector labels + ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration + ## + selector: {} + ## @param metrics.serviceMonitor.apiVersion ApiVersion for the serviceMonitor Resource (defaults to "monitoring.coreos.com/v1") + apiVersion: "" + ## @param metrics.serviceMonitor.tlsConfig Additional TLS configuration for metrics endpoint with "https" scheme + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#monitoring.coreos.com/v1.TLSConfig + tlsConfig: {} + + ## Prometheus Operator PrometheusRule configuration + ## + prometheusRule: + ## @param metrics.prometheusRule.enabled Create a Prometheus Operator PrometheusRule (also requires `metrics.enabled` to be `true` and `metrics.prometheusRule.rules`) + ## + enabled: false + ## @param metrics.prometheusRule.namespace Namespace for the PrometheusRule Resource (defaults to the Release Namespace) + ## + namespace: "" + ## @param metrics.prometheusRule.additionalLabels Additional labels that can be used so PrometheusRule will be discovered by Prometheus + ## + additionalLabels: {} + ## @param metrics.prometheusRule.rules Prometheus Rule definitions + # - alert: minio cluster nodes offline + # annotations: + # summary: "minio cluster nodes offline" + # description: "minio cluster nodes offline, pod {{`{{`}} $labels.pod {{`}}`}} service {{`{{`}} $labels.job {{`}}`}} offline" + # for: 10m + # expr: minio_cluster_nodes_offline_total > 0 + # labels: + # severity: critical + # group: PaaS + ## + rules: []