networks: openim: driver: bridge services: mongodb: image: "${MONGO_IMAGE}" ports: - "37017:27017" container_name: mongo command: > bash -c ' docker-entrypoint.sh mongod --wiredTigerCacheSizeGB $$wiredTigerCacheSizeGB --auth & until mongosh -u $$MONGO_INITDB_ROOT_USERNAME -p $$MONGO_INITDB_ROOT_PASSWORD --authenticationDatabase admin --eval "db.runCommand({ ping: 1 })" &>/dev/null; do echo "Waiting for MongoDB to start..." sleep 1 done && mongosh -u $$MONGO_INITDB_ROOT_USERNAME -p $$MONGO_INITDB_ROOT_PASSWORD --authenticationDatabase admin --eval " db = db.getSiblingDB(\"$$MONGO_INITDB_DATABASE\"); if (!db.getUser(\"$$MONGO_OPENIM_USERNAME\")) { db.createUser({ user: \"$$MONGO_OPENIM_USERNAME\", pwd: \"$$MONGO_OPENIM_PASSWORD\", roles: [{role: \"readWrite\", db: \"$$MONGO_INITDB_DATABASE\"}] }); print(\"User created successfully: \"); print(\"Username: $$MONGO_OPENIM_USERNAME\"); print(\"Password: $$MONGO_OPENIM_PASSWORD\"); print(\"Database: $$MONGO_INITDB_DATABASE\"); } else { print(\"User already exists in database: $$MONGO_INITDB_DATABASE, Username: $$MONGO_OPENIM_USERNAME\"); } " && tail -f /dev/null ' volumes: - "${DATA_DIR}/components/mongodb/data/db:/data/db" - "${DATA_DIR}/components/mongodb/data/logs:/data/logs" - "${DATA_DIR}/components/mongodb/data/conf:/etc/mongo" - "${MONGO_BACKUP_DIR}:/data/backup" environment: - TZ=Asia/Shanghai - wiredTigerCacheSizeGB=1 - MONGO_INITDB_ROOT_USERNAME=root - MONGO_INITDB_ROOT_PASSWORD=openIM123 - MONGO_INITDB_DATABASE=openim_v3 - MONGO_OPENIM_USERNAME=openIM - MONGO_OPENIM_PASSWORD=openIM123 restart: always networks: - openim redis: image: "${REDIS_IMAGE}" container_name: redis ports: - "16379:6379" volumes: - "${DATA_DIR}/components/redis/data:/data" - "${DATA_DIR}/components/redis/config/redis.conf:/usr/local/redis/config/redis.conf" environment: TZ: Asia/Shanghai restart: always sysctls: net.core.somaxconn: 1024 command: > redis-server --requirepass openIM123 --appendonly yes --aof-use-rdb-preamble yes --save "" networks: - openim etcd: image: "${ETCD_IMAGE}" container_name: etcd ports: - "12379:2379" - "12380:2380" environment: - ETCD_NAME=s1 - ETCD_DATA_DIR=/etcd-data - ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379 - ETCD_ADVERTISE_CLIENT_URLS=http://0.0.0.0:2379 - ETCD_LISTEN_PEER_URLS=http://0.0.0.0:2380 - ETCD_INITIAL_ADVERTISE_PEER_URLS=http://0.0.0.0:2380 - ETCD_INITIAL_CLUSTER=s1=http://0.0.0.0:2380 - ETCD_INITIAL_CLUSTER_TOKEN=tkn - ETCD_INITIAL_CLUSTER_STATE=new restart: always networks: - openim kafka: image: "${KAFKA_IMAGE}" container_name: kafka user: root restart: always ports: - "19094:9094" volumes: - "${DATA_DIR}/components/kafka:/bitnami/kafka" environment: #KAFKA_HEAP_OPTS: "-Xms128m -Xmx256m" TZ: Asia/Shanghai # Unique identifier for the Kafka node (required in controller mode) KAFKA_CFG_NODE_ID: 0 # Defines the roles this Kafka node plays: broker, controller, or both KAFKA_CFG_PROCESS_ROLES: controller,broker # Specifies which nodes are controller nodes for quorum voting. # The syntax follows the KRaft mode (no ZooKeeper): node.id@host:port # The controller listener endpoint here is kafka:9093 KAFKA_CFG_CONTROLLER_QUORUM_VOTERS: 0@kafka:9093 # Specifies which listener is used for controller-to-controller communication KAFKA_CFG_CONTROLLER_LISTENER_NAMES: CONTROLLER # Default number of partitions for new topics KAFKA_NUM_PARTITIONS: 8 # Whether to enable automatic topic creation KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: "true" # Kafka internal listeners; Kafka supports multiple ports with different protocols # Each port is used for a specific purpose: INTERNAL for internal broker communication, # CONTROLLER for controller communication, EXTERNAL for external client connections. # These logical listener names are mapped to actual protocols via KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP # In short, Kafka is listening on three logical ports: 9092 for internal communication, # 9093 for controller traffic, and 9094 for external access. KAFKA_CFG_LISTENERS: "INTERNAL://:9092,CONTROLLER://:9093,EXTERNAL://:9094" # Addresses advertised to clients. INTERNAL://kafka:9092 uses the internal Docker service name 'kafka', # so other containers can access Kafka via kafka:9092. # EXTERNAL://localhost:19094 is the address external clients (e.g., in the LAN) should use to connect. # If Kafka is deployed on a different machine than IM, 'localhost' should be replaced with the LAN IP. KAFKA_CFG_ADVERTISED_LISTENERS: "INTERNAL://kafka:9092,EXTERNAL://localhost:19094" # Maps logical listener names to actual protocols. # Supported protocols include: PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: "CONTROLLER:PLAINTEXT,EXTERNAL:PLAINTEXT,INTERNAL:PLAINTEXT" # Defines which listener is used for inter-broker communication within the Kafka cluster KAFKA_CFG_INTER_BROKER_LISTENER_NAME: "INTERNAL" # Authentication configuration variables - comment out to disable auth # KAFKA_USERNAME: "openIM" # KAFKA_PASSWORD: "openIM123" command: > /bin/sh -c ' if [ -n "$${KAFKA_USERNAME}" ] && [ -n "$${KAFKA_PASSWORD}" ]; then echo "=== Kafka SASL Authentication ENABLED ===" echo "Username: $${KAFKA_USERNAME}" # Set environment variables for SASL authentication export KAFKA_CFG_LISTENERS="SASL_PLAINTEXT://:9092,CONTROLLER://:9093,EXTERNAL://:9094" export KAFKA_CFG_ADVERTISED_LISTENERS="SASL_PLAINTEXT://kafka:9092,EXTERNAL://localhost:19094" export KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP="CONTROLLER:PLAINTEXT,EXTERNAL:SASL_PLAINTEXT,SASL_PLAINTEXT:SASL_PLAINTEXT" export KAFKA_CFG_SASL_ENABLED_MECHANISMS="PLAIN" export KAFKA_CFG_SASL_MECHANISM_INTER_BROKER_PROTOCOL="PLAIN" export KAFKA_CFG_INTER_BROKER_LISTENER_NAME="SASL_PLAINTEXT" export KAFKA_CLIENT_USERS="$${KAFKA_USERNAME}" export KAFKA_CLIENT_PASSWORDS="$${KAFKA_PASSWORD}" fi # Start Kafka with the configured environment exec /opt/bitnami/scripts/kafka/entrypoint.sh /opt/bitnami/scripts/kafka/run.sh ' networks: - openim minio: image: "${MINIO_IMAGE}" ports: - "10005:9000" - "19090:9090" container_name: minio volumes: - "${DATA_DIR}/components/mnt/data:/data" - "${DATA_DIR}/components/mnt/config:/root/.minio" environment: TZ: Asia/Shanghai MINIO_ROOT_USER: root MINIO_ROOT_PASSWORD: openIM123 restart: always command: minio server /data --console-address ':9090' networks: - openim openim-web-front: image: ${OPENIM_WEB_FRONT_IMAGE} container_name: openim-web-front restart: always ports: - "11001:80" networks: - openim openim-admin-front: image: ${OPENIM_ADMIN_FRONT_IMAGE} container_name: openim-admin-front restart: always ports: - "11002:80" networks: - openim prometheus: image: ${PROMETHEUS_IMAGE} container_name: prometheus restart: always user: root profiles: - m volumes: - ./config/prometheus.yml:/etc/prometheus/prometheus.yml - ./config/instance-down-rules.yml:/etc/prometheus/instance-down-rules.yml - ${DATA_DIR}/components/prometheus/data:/prometheus command: - '--config.file=/etc/prometheus/prometheus.yml' - '--storage.tsdb.path=/prometheus' - '--web.listen-address=:${PROMETHEUS_PORT}' network_mode: host alertmanager: image: ${ALERTMANAGER_IMAGE} container_name: alertmanager restart: always profiles: - m volumes: - ./config/alertmanager.yml:/etc/alertmanager/alertmanager.yml - ./config/email.tmpl:/etc/alertmanager/email.tmpl command: - '--config.file=/etc/alertmanager/alertmanager.yml' - '--web.listen-address=:${ALERTMANAGER_PORT}' network_mode: host grafana: image: ${GRAFANA_IMAGE} container_name: grafana user: root restart: always profiles: - m environment: - GF_SECURITY_ALLOW_EMBEDDING=true - GF_SESSION_COOKIE_SAMESITE=none - GF_SESSION_COOKIE_SECURE=true - GF_AUTH_ANONYMOUS_ENABLED=true - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin - GF_SERVER_HTTP_PORT=${GRAFANA_PORT} volumes: - ${DATA_DIR:-./}/components/grafana:/var/lib/grafana network_mode: host node-exporter: image: ${NODE_EXPORTER_IMAGE} container_name: node-exporter restart: always profiles: - m volumes: - /proc:/host/proc:ro - /sys:/host/sys:ro - /:/rootfs:ro command: - '--path.procfs=/host/proc' - '--path.sysfs=/host/sys' - '--path.rootfs=/rootfs' - '--web.listen-address=:${NODE_EXPORTER_PORT}' network_mode: host