From bb4463349e5ae3759cb96c3ac80b16597f1eae3a Mon Sep 17 00:00:00 2001 From: Xinwei Xiong <3293172751@qq.com> Date: Tue, 12 Dec 2023 16:44:02 +0800 Subject: [PATCH] feat: add openim docker compose and env (#1550) * feat: add openim docker compose Signed-off-by: Xinwei Xiong(cubxxw) <3293172751nss@gmail.com> * feat: add openim env Signed-off-by: Xinwei Xiong(cubxxw) <3293172751nss@gmail.com> * feat: add openim env Signed-off-by: Xinwei Xiong(cubxxw) <3293172751nss@gmail.com> * feat: add openim fix env Signed-off-by: Xinwei Xiong(cubxxw) <3293172751nss@gmail.com> --------- Signed-off-by: Xinwei Xiong(cubxxw) <3293172751nss@gmail.com> --- config/templates/README.md | 36 ++ config/templates/alertmanager.yml.template | 33 ++ config/templates/config.yaml.template | 506 +++++++++++++++++++++ config/templates/env.template | 249 ++++++++++ config/templates/prometheus.yml.template | 85 ++++ deployments/templates/env-template.yaml | 23 +- docker-compose.yml | 118 +++-- docs/contrib/util-makefile.md | 2 +- scripts/init-config.sh | 167 +++++-- 9 files changed, 1093 insertions(+), 126 deletions(-) create mode 100644 config/templates/README.md create mode 100644 config/templates/alertmanager.yml.template create mode 100644 config/templates/config.yaml.template create mode 100644 config/templates/env.template create mode 100644 config/templates/prometheus.yml.template diff --git a/config/templates/README.md b/config/templates/README.md new file mode 100644 index 000000000..6a979c6bb --- /dev/null +++ b/config/templates/README.md @@ -0,0 +1,36 @@ +# Examples Directory + +Welcome to the `examples` directory of our project! This directory contains a collection of example files that demonstrate various configurations and setups for our software. These examples are designed to provide you with templates that can be used as a starting point for your own configurations. + +## Overview + +In this directory, you'll find examples for a variety of use cases. Each file is a template with default values and configurations that illustrate best practices and typical scenarios. Whether you're just getting started or looking to implement a complex setup, these examples should help you get on the right track. + +## Structure + +Here's a quick overview of what you'll find in this directory: + ++ `env-example.yaml`: Demonstrates how to set up environment variables. ++ `openim-example.yaml`: A sample configuration file for the OpenIM application. ++ `prometheus-example.yml`: An example Prometheus configuration for monitoring. ++ `alertmanager-example.yml`: A template for setting up Alertmanager configurations. + +## How to Use These Examples + +To use these examples, simply copy the relevant file to your working directory and rename it as needed (e.g., removing the `-example` suffix). Then, modify the file according to your requirements. + +### Tips for Using Example Files: + +1. **Read the Comments**: Each file contains comments that explain various sections and settings. Make sure to read these comments for a better understanding of how to customize the file. +2. **Check for Required Changes**: Some examples might require mandatory changes (like setting specific environment variables) before they can be used effectively. +3. **Version Compatibility**: Ensure that the example files are compatible with the version of the software you are using. + +## Contributing + +If you have a configuration that you believe would be beneficial to others, please feel free to contribute by opening a pull request with your proposed changes. We appreciate contributions that expand our examples with new scenarios and use cases. + +## Support + +If you encounter any issues or have questions regarding the example files, please open an issue on our repository. Our community is here to help you navigate through any challenges you might face. + +Thank you for exploring our examples, and we hope they will be helpful in setting up and configuring your environment! diff --git a/config/templates/alertmanager.yml.template b/config/templates/alertmanager.yml.template new file mode 100644 index 000000000..da5f99b19 --- /dev/null +++ b/config/templates/alertmanager.yml.template @@ -0,0 +1,33 @@ +###################### AlertManager Configuration ###################### +# AlertManager configuration using environment variables +# +# Resolve timeout +# SMTP configuration for sending alerts +# Templates for email notifications +# Routing configurations for alerts +# Receiver configurations +global: + resolve_timeout: 5m + smtp_from: alert@openim.io + smtp_smarthost: smtp.163.com:465 + smtp_auth_username: alert@openim.io + smtp_auth_password: YOURAUTHPASSWORD + smtp_require_tls: false + smtp_hello: xxx监控告警 + +templates: + - /etc/alertmanager/email.tmpl + +route: + group_by: ['alertname'] + group_wait: 5s + group_interval: 5s + repeat_interval: 5m + receiver: email +receivers: + - name: email + email_configs: + - to: 'alert@example.com' + html: '{{ template "email.to.html" . }}' + headers: { Subject: "[OPENIM-SERVER]Alarm" } + send_resolved: true diff --git a/config/templates/config.yaml.template b/config/templates/config.yaml.template new file mode 100644 index 000000000..a35d06c8c --- /dev/null +++ b/config/templates/config.yaml.template @@ -0,0 +1,506 @@ +# Copyright © 2023 OpenIM. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the License); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# ----------------------------------------------------------------- +# TODO: This config file is the template file +# --| source: deployments/templates/openim.yaml +# --| env: scripts/install/environment +# --| target: config/config.yaml +# ----------------------------------------------------------------- + +envs: + discovery: zookeeper + +###################### Zookeeper ###################### +# Zookeeper configuration +# It's not recommended to modify the schema +# +# Zookeeper address +# Zookeeper username +# Zookeeper password +zookeeper: + schema: openim + address: [ 172.28.0.1:12181 ] + username: '' + password: '' + +###################### Mongo ###################### +# MongoDB configuration +# If uri is not empty, it will be used directly +# +# MongoDB address for standalone setup, Mongos address for sharded cluster setup +# Default MongoDB database name +# Maximum connection pool size +mongo: + uri: '' + address: [ 172.28.0.1:37017 ] + database: openIM_v3 + username: root + password: openIM123 + maxPoolSize: 100 + +###################### Redis configuration information ###################### +# Redis configuration +# +# Username is required only for Redis version 6.0+ +redis: + address: [ 172.28.0.1:16379 ] + username: '' + password: openIM123 + +###################### Kafka configuration information ###################### +# Kafka configuration +# +# Kafka username +# Kafka password +# It's not recommended to modify this topic name +# Consumer group ID, it's not recommended to modify +kafka: + username: '' + password: '' + addr: [ 172.28.0.1:19094 ] + latestMsgToRedis: + topic: "latestMsgToRedis" + offlineMsgToMongo: + topic: "offlineMsgToMongoMysql" + msgToPush: + topic: "msgToPush" + consumerGroupID: + msgToRedis: redis + msgToMongo: mongo + msgToMySql: mysql + msgToPush: push + +###################### RPC configuration information ###################### +# RPC configuration +# +# IP address to register with zookeeper when starting RPC, the IP and corresponding rpcPort should be accessible by api/gateway +# Default listen IP is 0.0.0.0 +rpc: + registerIP: '' + listenIP: 0.0.0.0 + +###################### API configuration information ###################### +# API configuration +# +# API service port +# Default listen IP is 0.0.0.0 +api: + openImApiPort: [ 10002 ] + listenIP: 0.0.0.0 + +###################### Object configuration information ###################### +# Object storage configuration +# +# Use minio for object storage +# API URL should be accessible by the app +# It's not recommended to modify the bucket name +# Endpoint should be accessible by the app +# Session token +# Configuration for Tencent COS +# Configuration for Aliyun OSS +# apiURL is the address of the api, the access address of the app, use s3 must be configured +# minio.endpoint can be configured as an intranet address, +# minio.signEndpoint is minio public network address +object: + enable: "minio" + apiURL: "http://172.28.0.1:10002" + minio: + bucket: "openim" + endpoint: "http://172.28.0.1:10005" + accessKeyID: "root" + secretAccessKey: "openIM123" + sessionToken: '' + signEndpoint: "http://172.28.0.1:10005" + publicRead: false + cos: + bucketURL: https://temp-1252357374.cos.ap-chengdu.myqcloud.com + secretID: '' + secretKey: '' + sessionToken: '' + publicRead: false + oss: + endpoint: "https://oss-cn-chengdu.aliyuncs.com" + bucket: "demo-9999999" + bucketURL: "https://demo-9999999.oss-cn-chengdu.aliyuncs.com" + accessKeyID: '' + accessKeySecret: '' + sessionToken: '' + publicRead: false + kodo: + endpoint: "http://s3.cn-east-1.qiniucs.com" + bucket: "demo-9999999" + bucketURL: "http://your.domain.com" + accessKeyID: '' + accessKeySecret: '' + sessionToken: '' + publicRead: false + +###################### RPC Port Configuration ###################### +# RPC service ports +# These ports are passed into the program by the script and are not recommended to modify +# For launching multiple programs, just fill in multiple ports separated by commas +# For example, [10110, 10111] +rpcPort: + openImUserPort: [ 10110 ] + openImFriendPort: [ 10120 ] + openImMessagePort: [ 10130 ] + openImGroupPort: [ 10150 ] + openImAuthPort: [ 10160 ] + openImPushPort: [ 10170 ] + openImConversationPort: [ 10180 ] + openImThirdPort: [ 10190 ] + +###################### RPC Register Name Configuration ###################### +# RPC service names for registration, it's not recommended to modify these +rpcRegisterName: + openImUserName: User + openImFriendName: Friend + openImMsgName: Msg + openImPushName: Push + openImMessageGatewayName: MessageGateway + openImGroupName: Group + openImAuthName: Auth + openImConversationName: Conversation + openImThirdName: Third + +###################### Log Configuration ###################### +# Log configuration +# +# Storage directory +# Log rotation time +# Maximum number of logs to retain +# Log level, 6 means all levels +# Whether to output to stdout +# Whether to output in json format +# Whether to include stack trace in logs +log: + storageLocation: ../logs/ + rotationTime: 24 + remainRotationCount: 2 + remainLogLevel: 6 + isStdout: false + isJson: false + withStack: false + +###################### Variables definition ###################### +# Long connection server configuration +# +# Websocket port for msg_gateway +# Maximum number of websocket connections +# Maximum length of websocket request package +# Websocket connection handshake timeout +longConnSvr: + openImWsPort: [ 10001 ] + websocketMaxConnNum: 100000 + openImMessageGatewayPort: [ 10140 ] + websocketMaxMsgLen: 4096 + websocketTimeout: 10 + +# Push notification service configuration +# +# Use GeTui for push notifications +# GeTui offline push configuration +# FCM offline push configuration +# Account file, place it in the config directory +# JPush configuration, modify these after applying in JPush backend +push: + enable: getui + geTui: + pushUrl: "https://restapi.getui.com/v2/$appId" + masterSecret: '' + appKey: '' + intent: '' + channelID: '' + channelName: '' + fcm: + serviceAccount: "x.json" + jpns: + appKey: '' + masterSecret: '' + pushUrl: '' + pushIntent: '' + +# App manager configuration +# +# Built-in app manager user IDs +# Built-in app manager nicknames +manager: + userID: [ "openIM123456", "openIM654321", "openIMAdmin" ] + nickname: [ "system1", "system2", "system3" ] + +# Multi-platform login policy +# For each platform(Android, iOS, Windows, Mac, web), only one can be online at a time +multiLoginPolicy: 1 + +# Whether to store messages in MySQL, messages in MySQL are only used for management background +chatPersistenceMysql: true + +# Message cache timeout in seconds, it's not recommended to modify +msgCacheTimeout: 86400 + +# Whether to enable read receipts for group chat +groupMessageHasReadReceiptEnable: true + +# Whether to enable read receipts for single chat +singleMessageHasReadReceiptEnable: true + +# MongoDB offline message retention period in days +retainChatRecords: 365 + +# Schedule to clear expired messages(older than retainChatRecords days) in MongoDB every Wednesday at 2am +# This deletion is just for cleaning up disk usage according to previous configuration retainChatRecords, no notification will be sent +chatRecordsClearTime: "0 2 * * 3" + +# Schedule to auto delete messages every day at 2am +# This deletion is for messages that have been retained for more than msg_destruct_time (seconds) in the conversation field +msgDestructTime: "0 2 * * *" + +# Secret key +secret: openIM123 + +# Token policy +# +# Token expiration period in days +tokenPolicy: + expire: 90 + +# Message verification policy +# +# Whether to verify friendship when sending messages +messageVerify: + friendVerify: false + +# iOS push notification configuration +# +# iOS push notification sound +# Whether to count badge +# Whether it's production environment +iosPush: + pushSound: "xxx" + badgeCount: true + production: false + +###################### Third-party service configuration ###################### +# Callback configuration +# +# Callback URL +# Whether to enable this callback event +# Timeout in seconds +# Whether to continue execution if callback fails +callback: + url: "" + beforeSendSingleMsg: + enable: false + timeout: 5 + failedContinue: true + afterSendSingleMsg: + enable: false + timeout: 5 + failedContinue: true + beforeSendGroupMsg: + enable: false + timeout: 5 + failedContinue: true + afterSendGroupMsg: + enable: false + timeout: 5 + failedContinue: true + msgModify: + enable: false + timeout: 5 + failedContinue: true + userOnline: + enable: false + timeout: 5 + failedContinue: true + userOffline: + enable: false + timeout: 5 + failedContinue: true + userKickOff: + enable: false + timeout: 5 + failedContinue: true + offlinePush: + enable: false + timeout: 5 + failedContinue: true + onlinePush: + enable: false + timeout: 5 + failedContinue: true + superGroupOnlinePush: + enable: false + timeout: 5 + failedContinue: true + beforeAddFriend: + enable: false + timeout: 5 + failedContinue: true + beforeUpdateUserInfo: + enable: false + timeout: 5 + failedContinue: true + beforeCreateGroup: + enable: false + timeout: 5 + failedContinue: true + afterCreateGroup: + enable: false + timeout: 5 + failedContinue: true + beforeMemberJoinGroup: + enable: false + timeout: 5 + failedContinue: true + beforeSetGroupMemberInfo: + enable: false + timeout: 5 + failedContinue: true + afterSetGroupMemberInfo: + enable: false + timeout: 5 + failedContinue: true + setMessageReactionExtensions: + enable: false + timeout: 5 + failedContinue: true + quitGroup: + enable: false + timeout: 5 + failedContinue: true + killGroupMember: + enable: false + timeout: 5 + failedContinue: true + dismissGroup: + enable: false + timeout: 5 + failedContinue: true + joinGroup: + enable: false + timeout: 5 + failedContinue: true + groupMsgRead: + enable: false + timeout: 5 + failedContinue: true + singleMsgRead: + enable: false + timeout: 5 + failedContinue: true + updateUserInfo: + enable: false + timeout: 5 + failedContinue: true + beforeUserRegister: + enable: false + timeout: 5 + failedContinue: true + afterUserRegister: + enable: false + timeout: 5 + failedContinue: true + transferGroupOwner: + enable: false + timeout: 5 + failedContinue: true + beforeSetFriendRemark: + enable: false + timeout: 5 + failedContinue: true + afterSetFriendRemark: + enable: false + timeout: 5 + failedContinue: true + afterGroupMsgRead: + enable: false + timeout: 5 + failedContinue: true + afterGroupMsgRevoke: + enable: false + timeout: 5 + failedContinue: true + afterJoinGroup: + enable: false + timeout: 5 + failedContinue: true + beforeInviteUserToGroup: + enable: false + timeout: 5 + failedContinue: true + joinGroupAfter: + enable: false + timeout: 5 + failedContinue: true + setGroupInfoAfter: + enable: false + timeout: 5 + failedContinue: true + setGroupInfoBefore: + enable: false + timeout: 5 + failedContinue: true + revokeMsgAfter: + enable: false + timeout: 5 + failedContinue: true + addBlackBefore: + enable: false + timeout: 5 + failedContinue: true + addFriendAfter: + enable: false + timeout: 5 + failedContinue: true + addFriendAgreeBefore: + enable: false + timeout: 5 + failedContinue: true + deleteFriendAfter: + enable: false + timeout: 5 + failedContinue: true + importFriendsBefore: + enable: false + timeout: 5 + failedContinue: true + importFriendsAfter: + enable: false + timeout: 5 + failedContinue: true + removeBlackAfter: + enable: false + timeout: 5 + failedContinue: true +###################### Prometheus ###################### +# Prometheus configuration for various services +# The number of Prometheus ports per service needs to correspond to rpcPort +# The number of ports needs to be consistent with msg_transfer_service_num in script/path_info.sh +prometheus: + enable: false + prometheusUrl: 172.28.0.1:13000 + apiPrometheusPort: [20100] + userPrometheusPort: [ 20110 ] + friendPrometheusPort: [ 20120 ] + messagePrometheusPort: [ 20130 ] + messageGatewayPrometheusPort: [ 20140 ] + groupPrometheusPort: [ 20150 ] + authPrometheusPort: [ 20160 ] + pushPrometheusPort: [ 20170 ] + conversationPrometheusPort: [ 20230 ] + rtcPrometheusPort: [ 21300 ] + thirdPrometheusPort: [ 21301 ] + messageTransferPrometheusPort: [ 21400, 21401, 21402, 21403 ] # List of ports diff --git a/config/templates/env.template b/config/templates/env.template new file mode 100644 index 000000000..e47a9c073 --- /dev/null +++ b/config/templates/env.template @@ -0,0 +1,249 @@ +# ====================================== +# ========= Basic Configuration ======== +# ====================================== + +# The user for authentication or system operations. +# Default: OPENIM_USER=root +USER=root + +# Password associated with the specified user for authentication. +# Default: PASSWORD=openIM123 +PASSWORD=openIM123 + +# Endpoint for the MinIO object storage service. +# Default: MINIO_ENDPOINT=http://172.28.0.1:10005 +MINIO_ENDPOINT=http://172.28.0.1:10005 + +# Base URL for the application programming interface (API). +# Default: API_URL=http://172.28.0.1:10002 +API_URL=http://172.28.0.1:10002 + +# Directory path for storing data files or related information. +# Default: DATA_DIR=./ +DATA_DIR=./ + +# Choose the appropriate image address, the default is GITHUB image, +# you can choose docker hub, for Chinese users can choose Ali Cloud +# export IMAGE_REGISTRY="ghcr.io/openimsdk" +# export IMAGE_REGISTRY="openim" +# export IMAGE_REGISTRY="registry.cn-hangzhou.aliyuncs.com/openimsdk" +IMAGE_REGISTRY=ghcr.io/openimsdk + +# ====================================== +# ========= Network Configuration ====== +# ====================================== + +# Subnet for the Docker network. +# Default: DOCKER_BRIDGE_SUBNET=172.28.0.0/16 +DOCKER_BRIDGE_SUBNET=172.28.0.0/16 + +# Gateway for the Docker network. +# Default: DOCKER_BRIDGE_GATEWAY=172.28.0.1 +DOCKER_BRIDGE_GATEWAY=172.28.0.1 + +MONGO_NETWORK_ADDRESS=172.28.0.2 +REDIS_NETWORK_ADDRESS=172.28.0.3 +KAFKA_NETWORK_ADDRESS=172.28.0.4 +ZOOKEEPER_NETWORK_ADDRESS=172.28.0.5 +MINIO_NETWORK_ADDRESS=172.28.0.6 +OPENIM_WEB_NETWORK_ADDRESS=172.28.0.7 +OPENIM_SERVER_NETWORK_ADDRESS=172.28.0.8 +OPENIM_CHAT_NETWORK_ADDRESS=172.28.0.9 +PROMETHEUS_NETWORK_ADDRESS=172.28.0.10 +GRAFANA_NETWORK_ADDRESS=172.28.0.11 +NODE_EXPORTER_NETWORK_ADDRESS=172.28.0.12 +OPENIM_ADMIN_FRONT_NETWORK_ADDRESS=172.28.0.13 +ALERT_MANAGER_NETWORK_ADDRESS=172.28.0.14 + +# =============================================== +# = Component Extension Configuration = +# =============================================== + +# ============ Component Extension Configuration ========== +# ----- ZooKeeper Configuration ----- +# Address or hostname for the ZooKeeper service. +# Default: ZOOKEEPER_ADDRESS=172.28.0.1 +ZOOKEEPER_ADDRESS=172.28.0.5 + +# Port for ZooKeeper service. +# Default: ZOOKEEPER_PORT=12181 +ZOOKEEPER_PORT=12181 + +# ----- MongoDB Configuration ----- +# Address or hostname for the MongoDB service. +# Default: MONGO_ADDRESS=172.28.0.1 +MONGO_ADDRESS=172.28.0.2 + +# Port on which MongoDB service is running. +# Default: MONGO_PORT=37017 +# MONGO_PORT=37017 + +# Username to authenticate with the MongoDB service. +# Default: MONGO_USERNAME=root +# MONGO_USERNAME=root + +# Password to authenticate with the MongoDB service. +# Default: MONGO_PASSWORD=openIM123 +MONGO_PASSWORD=openIM123 + +# Name of the database in MongoDB to be used. +# Default: MONGO_DATABASE=openIM_v3 +MONGO_DATABASE=openIM_v3 + +# ----- Redis Configuration ----- +# Address or hostname for the Redis service. +# Default: REDIS_ADDRESS=172.28.0.1 +REDIS_ADDRESS=172.28.0.3 + +# Port on which Redis in-memory data structure store is running. +# Default: REDIS_PORT=16379 +REDIS_PORT=16379 + +# Password to authenticate with the Redis service. +# Default: REDIS_PASSWORD=openIM123 +REDIS_PASSWORD=openIM123 + +# ----- Kafka Configuration ----- +# Address or hostname for the Kafka service. +# Default: KAFKA_ADDRESS=172.28.0.1 +KAFKA_ADDRESS=172.28.0.4 + +# Kakfa username to authenticate with the Kafka service. +# KAFKA_USERNAME='' + +# Port on which Kafka distributed streaming platform is running. +# Default: KAFKA_PORT=19092 +KAFKA_PORT=19094 + +# Topic in Kafka for storing the latest messages in Redis. +# Default: KAFKA_LATESTMSG_REDIS_TOPIC=latestMsgToRedis +KAFKA_LATESTMSG_REDIS_TOPIC=latestMsgToRedis + +# Topic in Kafka for pushing messages (e.g. notifications or updates). +# Default: KAFKA_MSG_PUSH_TOPIC=msgToPush +KAFKA_MSG_PUSH_TOPIC=msgToPush + +# Topic in Kafka for storing offline messages in MongoDB. +# Default: KAFKA_OFFLINEMSG_MONGO_TOPIC=offlineMsgToMongoMysql +KAFKA_OFFLINEMSG_MONGO_TOPIC=offlineMsgToMongoMysql + +# ----- MinIO Configuration ---- +# Address or hostname for the MinIO object storage service. +# Default: MINIO_ADDRESS=172.28.0.1 +MINIO_ADDRESS=172.28.0.6 + +# Port on which MinIO object storage service is running. +# Default: MINIO_PORT=10005 +MINIO_PORT=10005 + +# Access key to authenticate with the MinIO service. +# Default: MINIO_ACCESS_KEY=root +# MINIO_ACCESS_KEY=root + +# Secret key corresponding to the access key for MinIO authentication. +# Default: MINIO_SECRET_KEY=openIM123 +MINIO_SECRET_KEY=openIM123 + +# ----- Prometheus Configuration ----- +# Address or hostname for the Prometheus service. +# Default: PROMETHEUS_ADDRESS=172.28.0.1 +PROMETHEUS_ADDRESS=172.28.0.10 + +# Port on which Prometheus service is running. +# Default: PROMETHEUS_PORT=19090 +PROMETHEUS_PORT=19090 + +# ----- Grafana Configuration ----- +# Address or hostname for the Grafana service. +# Default: GRAFANA_ADDRESS=172.28.0.1 +GRAFANA_ADDRESS=172.28.0.11 + +# Port on which Grafana service is running. +# Default: GRAFANA_PORT=13000 +GRAFANA_PORT=13000 + +# ====================================== +# ============ OpenIM Web =============== +# ====================================== + +# Path to the OpenIM web distribution. +# Default: OPENIM_WEB_DIST_PATH=/app/dist +OPENIM_WEB_DIST_PATH=/app/dist + +# Port on which OpenIM web service is running. +# Default: OPENIM_WEB_PORT=11001 +OPENIM_WEB_PORT=11001 + +# Address or hostname for the OpenIM web service. +# Default: OPENIM_WEB_ADDRESS=172.28.0.1 +OPENIM_WEB_ADDRESS=172.28.0.7 + +# ====================================== +# ========= OpenIM Server ============== +# ====================================== + +# Address or hostname for the OpenIM server. +# Default: OPENIM_SERVER_ADDRESS=172.28.0.1 +OPENIM_SERVER_ADDRESS=172.28.0.8 + +# Port for the OpenIM WebSockets. +# Default: OPENIM_WS_PORT=10001 +OPENIM_WS_PORT=10001 + +# Port for the OpenIM API. +# Default: API_OPENIM_PORT=10002 +API_OPENIM_PORT=10002 + + +# ====================================== +# ========== OpenIM Chat =============== +# ====================================== + +# Branch name for OpenIM chat. +# Default: CHAT_BRANCH=main +CHAT_BRANCH=main + +# Address or hostname for the OpenIM chat service. +# Default: OPENIM_CHAT_ADDRESS=172.28.0.1 +OPENIM_CHAT_ADDRESS=172.28.0.9 + +# Port for the OpenIM chat API. +# Default: OPENIM_CHAT_API_PORT=10008 +OPENIM_CHAT_API_PORT=10008 + +# Directory path for storing data files or related information for OpenIM chat. +# Default: OPENIM_CHAT_DATA_DIR=./openim-chat/main +OPENIM_CHAT_DATA_DIR=./openim-chat/main + + +# ====================================== +# ========== OpenIM Admin ============== +# ====================================== + +# Branch name for OpenIM server. +# Default: SERVER_BRANCH=main +SERVER_BRANCH=main + +# Port for the OpenIM admin API. +# Default: OPENIM_ADMIN_API_PORT=10009 +OPENIM_ADMIN_API_PORT=10009 + +# Port for the node exporter. +# Default: NODE_EXPORTER_PORT=19100 +NODE_EXPORTER_PORT=19100 + +# Port for the prometheus. +# Default: PROMETHEUS_PORT=19090 +PROMETHEUS_PORT=19090 + +# Port for the grafana. +# Default: GRAFANA_PORT=13000 +GRAFANA_PORT=13000 + +# Port for the admin front. +# Default: OPENIM_ADMIN_FRONT_PORT=11002 +OPENIM_ADMIN_FRONT_PORT=11002 + +# Port for the alertmanager. +# Default: ALERT_MANAGER_PORT=19093 +ALERT_MANAGER_PORT=19093 diff --git a/config/templates/prometheus.yml.template b/config/templates/prometheus.yml.template new file mode 100644 index 000000000..7950c5d33 --- /dev/null +++ b/config/templates/prometheus.yml.template @@ -0,0 +1,85 @@ +# my global config +global: + scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. + evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. + # scrape_timeout is set to the global default (10s). + +# Alertmanager configuration +alerting: + alertmanagers: + - static_configs: + - targets: ['172.28.0.1:19093'] + +# Load rules once and periodically evaluate them according to the global 'evaluation_interval'. +rule_files: + - "instance-down-rules.yml" +# - "first_rules.yml" +# - "second_rules.yml" + +# A scrape configuration containing exactly one endpoint to scrape: +# Here it's Prometheus itself. +scrape_configs: + # The job name is added as a label "job='job_name'"" to any timeseries scraped from this config. + # Monitored information captured by prometheus + - job_name: 'node-exporter' + static_configs: + - targets: [ '172.28.0.1:19100' ] + labels: + namespace: 'default' + + # prometheus fetches application services + - job_name: 'openimserver-openim-api' + static_configs: + - targets: [ '172.28.0.1:20100' ] + labels: + namespace: 'default' + - job_name: 'openimserver-openim-msggateway' + static_configs: + - targets: [ '172.28.0.1:20140' ] + labels: + namespace: 'default' + - job_name: 'openimserver-openim-msgtransfer' + static_configs: + - targets: [ 172.28.0.1:21400, 172.28.0.1:21401, 172.28.0.1:21402, 172.28.0.1:21403 ] + labels: + namespace: 'default' + - job_name: 'openimserver-openim-push' + static_configs: + - targets: [ '172.28.0.1:20170' ] + labels: + namespace: 'default' + - job_name: 'openimserver-openim-rpc-auth' + static_configs: + - targets: [ '172.28.0.1:20160' ] + labels: + namespace: 'default' + - job_name: 'openimserver-openim-rpc-conversation' + static_configs: + - targets: [ '172.28.0.1:20230' ] + labels: + namespace: 'default' + - job_name: 'openimserver-openim-rpc-friend' + static_configs: + - targets: [ '172.28.0.1:20120' ] + labels: + namespace: 'default' + - job_name: 'openimserver-openim-rpc-group' + static_configs: + - targets: [ '172.28.0.1:20150' ] + labels: + namespace: 'default' + - job_name: 'openimserver-openim-rpc-msg' + static_configs: + - targets: [ '172.28.0.1:20130' ] + labels: + namespace: 'default' + - job_name: 'openimserver-openim-rpc-third' + static_configs: + - targets: [ '172.28.0.1:21301' ] + labels: + namespace: 'default' + - job_name: 'openimserver-openim-rpc-user' + static_configs: + - targets: [ '172.28.0.1:20110' ] + labels: + namespace: 'default' diff --git a/deployments/templates/env-template.yaml b/deployments/templates/env-template.yaml index 4db838c8e..b62543aba 100644 --- a/deployments/templates/env-template.yaml +++ b/deployments/templates/env-template.yaml @@ -1,17 +1,3 @@ -# Copyright © 2023 OpenIM. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - # ====================================== # ========= Basic Configuration ======== # ====================================== @@ -90,11 +76,11 @@ MONGO_ADDRESS=${MONGO_NETWORK_ADDRESS} # Port on which MongoDB service is running. # Default: MONGO_PORT=37017 -MONGO_PORT=${MONGO_PORT} +# MONGO_PORT=${MONGO_PORT} # Username to authenticate with the MongoDB service. # Default: MONGO_USERNAME=root -MONGO_USERNAME=${MONGO_USERNAME} +# MONGO_USERNAME=${MONGO_USERNAME} # Password to authenticate with the MongoDB service. # Default: MONGO_PASSWORD=openIM123 @@ -122,6 +108,9 @@ REDIS_PASSWORD=${REDIS_PASSWORD} # Default: KAFKA_ADDRESS=172.28.0.1 KAFKA_ADDRESS=${KAFKA_NETWORK_ADDRESS} +# Kakfa username to authenticate with the Kafka service. +# KAFKA_USERNAME=${KAFKA_USERNAME} + # Port on which Kafka distributed streaming platform is running. # Default: KAFKA_PORT=19092 KAFKA_PORT=${KAFKA_PORT} @@ -149,7 +138,7 @@ MINIO_PORT=${MINIO_PORT} # Access key to authenticate with the MinIO service. # Default: MINIO_ACCESS_KEY=root -MINIO_ACCESS_KEY=${MINIO_ACCESS_KEY} +# MINIO_ACCESS_KEY=${MINIO_ACCESS_KEY} # Secret key corresponding to the access key for MinIO authentication. # Default: MINIO_SECRET_KEY=openIM123 diff --git a/docker-compose.yml b/docker-compose.yml index 233c8d492..22f57d329 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -7,55 +7,57 @@ networks: ipam: driver: default config: - - subnet: '${DOCKER_BRIDGE_SUBNET}' - gateway: '${DOCKER_BRIDGE_GATEWAY}' + - subnet: '${DOCKER_BRIDGE_SUBNET:-172.28.0.0/16}' + gateway: '${DOCKER_BRIDGE_GATEWAY:-172.28.0.1}' services: mongodb: - image: mongo:6.0.2 + image: mongo:${MONGODB_IMAGE_VERSION-6.0.2} ports: - - "${MONGO_PORT}:27017" + - "${MONGO_PORT:-37017}:27017" container_name: mongo command: --wiredTigerCacheSizeGB 1 --auth volumes: - - "${DATA_DIR}/components/mongodb/data/db:/data/db" - - "${DATA_DIR}/components/mongodb/data/logs:/data/logs" - - "${DATA_DIR}/components/mongodb/data/conf:/etc/mongo" - - ./scripts/mongo-init.sh:/docker-entrypoint-initdb.d/mongo-init.sh:ro" + - "${DATA_DIR:-./}/components/mongodb/data/db:/data/db" + - "${DATA_DIR:-./}/components/mongodb/data/logs:/data/logs" + - "${DATA_DIR:-./}/components/mongodb/data/conf:/etc/mongo" + - ./scripts/mongo-init.sh:/docker-entrypoint-initdb.d/mongo-init.sh:ro environment: - TZ=Asia/Shanghai - wiredTigerCacheSizeGB=1 - - MONGO_INITDB_ROOT_USERNAME=${MONGO_USERNAME} - - MONGO_INITDB_ROOT_PASSWORD=${MONGO_PASSWORD} - - MONGO_INITDB_DATABASE=${MONGO_DATABASE} + - MONGO_INITDB_ROOT_USERNAME=${MONGO_USERNAME:-root} + - MONGO_INITDB_ROOT_PASSWORD=${MONGO_PASSWORD:-openIM123} + - MONGO_INITDB_DATABASE=${MONGO_DATABASE:-openIM_v3} restart: always networks: server: - ipv4_address: ${MONGO_NETWORK_ADDRESS} + ipv4_address: ${MONGO_NETWORK_ADDRESS:-172.28.0.2} redis: - image: redis:7.0.0 + # image: redis:7.0.0 + image: redis:${REDIS_IMAGE_VERSION:-7.0.0} container_name: redis ports: - - "${REDIS_PORT}:6379" + - "${REDIS_PORT:-16379}:6379" volumes: - - "${DATA_DIR}/components/redis/data:/data" - - "${DATA_DIR}/components/redis/config/redis.conf:/usr/local/redis/config/redis.conf" + - "${DATA_DIR:-./}/components/redis/data:/data" + - "${DATA_DIR:-./}/components/redis/config/redis.conf:/usr/local/redis/config/redis.conf" environment: TZ: Asia/Shanghai restart: always sysctls: net.core.somaxconn: 1024 - command: redis-server --requirepass ${REDIS_PASSWORD} --appendonly yes + command: redis-server --requirepass ${REDIS_PASSWORD:-openIM123} --appendonly yes networks: server: - ipv4_address: ${REDIS_NETWORK_ADDRESS} + ipv4_address: ${REDIS_NETWORK_ADDRESS:-172.28.0.3} zookeeper: - image: bitnami/zookeeper:3.8 + # image: bitnami/zookeeper:3.8 + image: bitnami/zookeeper:${ZOOKEEPER_IMAGE_VERSION:-3.8} container_name: zookeeper ports: - - "${ZOOKEEPER_PORT}:2181" + - "${ZOOKEEPER_PORT:-12181}:2181" volumes: - "/etc/localtime:/etc/localtime" environment: @@ -64,81 +66,77 @@ services: restart: always networks: server: - ipv4_address: ${ZOOKEEPER_NETWORK_ADDRESS} + ipv4_address: ${ZOOKEEPER_NETWORK_ADDRESS:-172.28.0.5} kafka: - image: 'bitnami/kafka:3.5.1' + # image: 'bitnami/kafka:3.5.1' + image: 'bitnami/kafka:${KAFKA_IMAGE_VERSION:-3.5.1}' container_name: kafka - user: root restart: always + user: ${KAFKA_USER:-root} ports: - - "${KAFKA_PORT}:9094" + - "${KAFKA_PORT:-19094}:9094" volumes: - ./scripts/create-topic.sh:/opt/bitnami/kafka/create-topic.sh - - ${DATA_DIR}/components/kafka:/bitnami/kafka + - "${DATA_DIR:-./}/components/kafka:/bitnami/kafka" command: > - bash -c " - /opt/bitnami/scripts/kafka/run.sh & sleep 5; /opt/bitnami/kafka/create-topic.sh; wait - " + bash -c "/opt/bitnami/scripts/kafka/run.sh & sleep 5; /opt/bitnami/kafka/create-topic.sh; wait" environment: - TZ=Asia/Shanghai - KAFKA_CFG_NODE_ID=0 - KAFKA_CFG_PROCESS_ROLES=controller,broker - KAFKA_CFG_CONTROLLER_QUORUM_VOTERS=0@:9093 - KAFKA_CFG_LISTENERS=PLAINTEXT://:9092,CONTROLLER://:9093,EXTERNAL://:9094 - - KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://kafka:9092,EXTERNAL://${DOCKER_BRIDGE_GATEWAY}:${KAFKA_PORT} + - KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://kafka:9092,EXTERNAL://${DOCKER_BRIDGE_GATEWAY:-172.28.0.1}:${KAFKA_PORT:-19094} - KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=CONTROLLER:PLAINTEXT,EXTERNAL:PLAINTEXT,PLAINTEXT:PLAINTEXT - KAFKA_CFG_CONTROLLER_LISTENER_NAMES=CONTROLLER networks: server: - ipv4_address: ${KAFKA_NETWORK_ADDRESS} + ipv4_address: ${KAFKA_NETWORK_ADDRESS:-172.28.0.4} minio: - image: minio/minio + # image: minio/minio + image: minio/minio:${MINIO_IMAGE_VERSION:-latest} ports: - - "${MINIO_PORT}:9000" + - "${MINIO_PORT:-10005}:9000" - "9090:9090" container_name: minio volumes: - - "${DATA_DIR}/components/mnt/data:/data" - - "${DATA_DIR}/components/mnt/config:/root/.minio" + - "${DATA_DIR:-./}/components/mnt/data:/data" + - "${DATA_DIR:-./}/components/mnt/config:/root/.minio" environment: - MINIO_ROOT_USER: "${MINIO_ACCESS_KEY}" - MINIO_ROOT_PASSWORD: "${MINIO_SECRET_KEY}" + MINIO_ROOT_USER: "${MINIO_ACCESS_KEY:-root}" + MINIO_ROOT_PASSWORD: "${MINIO_SECRET_KEY:-openIM123}" restart: always command: minio server /data --console-address ':9090' networks: server: - ipv4_address: ${MINIO_NETWORK_ADDRESS} + ipv4_address: ${MINIO_NETWORK_ADDRESS:-172.28.0.6} openim-web: - # image: ghcr.io/openimsdk/openim-web:latest - # image: registry.cn-hangzhou.aliyuncs.com/openimsdk/openim-web:latest - # image: openim/openim-web:latest - image: ${IMAGE_REGISTRY}/openim-web:latest + # image: ${IMAGE_REGISTRY:-ghcr.io/openimsdk}/openim-web:latest + image: ${IMAGE_REGISTRY:-ghcr.io/openimsdk}/openim-web:${OPENIM_WEB_IMAGE_VERSION:-latest} container_name: openim-web environment: - - OPENIM_WEB_DIST_PATH=${OPENIM_WEB_DIST_PATH} - - OPENIM_WEB_PORT=${OPENIM_WEB_PORT} + - OPENIM_WEB_DIST_PATH=${OPENIM_WEB_DIST_PATH:-/app/dist} + - OPENIM_WEB_PORT=${OPENIM_WEB_PORT:-11001} restart: always ports: - - "${OPENIM_WEB_PORT}:11001" + - "${OPENIM_WEB_PORT:-11001}:11001" networks: server: - ipv4_address: ${OPENIM_WEB_NETWORK_ADDRESS} + ipv4_address: ${OPENIM_WEB_NETWORK_ADDRESS:-172.28.0.7} + # Uncomment and configure the following services as needed # openim-admin: - # image: ${IMAGE_REGISTRY}/openim-admin-front:v3.4.0 - # # image: ghcr.io/openimsdk/openim-admin-front:v3.4.0 - # # image: registry.cn-hangzhou.aliyuncs.com/openimsdk/openim-admin-front:v3.4.0 - # # image: openim/openim-admin-front:v3.4.0 + # image: ${IMAGE_REGISTRY:-ghcr.io/openimsdk}/openim-admin-front:v3.4.0 # container_name: openim-admin # restart: always # ports: - # - "${OPENIM_ADMIN_FRONT_PORT}:80" + # - "${OPENIM_ADMIN_FRONT_PORT:-11002}:80" # networks: # server: - # ipv4_address: ${OPENIM_ADMIN_FRONT_NETWORK_ADDRESS} + # ipv4_address: ${OPENIM_ADMIN_FRONT_NETWORK_ADDRESS:-172.28.0.13} # prometheus: # image: prom/prometheus @@ -149,10 +147,10 @@ services: # - ./config/prometheus.yml:/etc/prometheus/prometheus.yml # - ./config/instance-down-rules.yml:/etc/prometheus/instance-down-rules.yml # ports: - # - "${PROMETHEUS_PORT}:9090" + # - "${PROMETHEUS_PORT:-19090}:9090" # networks: # server: - # ipv4_address: ${PROMETHEUS_NETWORK_ADDRESS} + # ipv4_address: ${PROMETHEUS_NETWORK_ADDRESS:-172.28.0.10} # alertmanager: # image: prom/alertmanager @@ -163,10 +161,10 @@ services: # - ./config/alertmanager.yml:/etc/alertmanager/alertmanager.yml # - ./config/email.tmpl:/etc/alertmanager/email.tmpl # ports: - # - "${ALERT_MANAGER_PORT}:9093" + # - "${ALERT_MANAGER_PORT:-19093}:9093" # networks: # server: - # ipv4_address: ${ALERT_MANAGER_NETWORK_ADDRESS} + # ipv4_address: ${ALERT_MANAGER_NETWORK_ADDRESS:-172.28.0.14} # grafana: # image: grafana/grafana @@ -175,12 +173,12 @@ services: # user: root # restart: always # ports: - # - "${GRAFANA_PORT}:3000" + # - "${GRAFANA_PORT:-13000}:3000" # volumes: - # - ${DATA_DIR}/components/grafana:/var/lib/grafana + # - ${DATA_DIR:-./}/components/grafana:/var/lib/grafana # networks: # server: - # ipv4_address: ${GRAFANA_NETWORK_ADDRESS} + # ipv4_address: ${GRAFANA_NETWORK_ADDRESS:-172.28.0.11} # node-exporter: # image: quay.io/prometheus/node-exporter @@ -188,7 +186,7 @@ services: # hostname: node-exporter # restart: always # ports: - # - "${NODE_EXPORTER_PORT}:9100" + # - "${NODE_EXPORTER_PORT:-19100}:9100" # networks: # server: - # ipv4_address: ${NODE_EXPORTER_NETWORK_ADDRESS} + # ipv4_address: ${NODE_EXPORTER_NETWORK_ADDRESS:-172.28.0.12} diff --git a/docs/contrib/util-makefile.md b/docs/contrib/util-makefile.md index e0331f50e..8bde02874 100644 --- a/docs/contrib/util-makefile.md +++ b/docs/contrib/util-makefile.md @@ -30,7 +30,7 @@ Executing `make tools` ensures verification and installation of the default tool - go-junit-report - go-gitlint -The installation path is situated at `/root/workspaces/openim/Open-IM-Server/_output/tools/`. +The installation path is situated at `./_output/tools/`. ## Toolset Categories diff --git a/scripts/init-config.sh b/scripts/init-config.sh index 380239d50..631bd68d7 100755 --- a/scripts/init-config.sh +++ b/scripts/init-config.sh @@ -2,7 +2,7 @@ # Copyright © 2023 OpenIM. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. +# You may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 @@ -13,19 +13,22 @@ # See the License for the specific language governing permissions and # limitations under the License. -# This script automatically initializes the various configuration files -# Read: https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/init-config.md +# This script automatically initializes various configuration files and can generate example files. set -o errexit set -o nounset set -o pipefail +# Root directory of the OpenIM project OPENIM_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. +# Source initialization script source "${OPENIM_ROOT}/scripts/lib/init.sh" +# Default environment file readonly ENV_FILE=${ENV_FILE:-"${OPENIM_ROOT}/scripts/install/environment.sh"} +# Templates for configuration files declare -A TEMPLATES=( ["${OPENIM_ROOT}/deployments/templates/env-template.yaml"]="${OPENIM_ROOT}/.env" ["${OPENIM_ROOT}/deployments/templates/openim.yaml"]="${OPENIM_ROOT}/config/config.yaml" @@ -33,22 +36,107 @@ declare -A TEMPLATES=( ["${OPENIM_ROOT}/deployments/templates/alertmanager.yml"]="${OPENIM_ROOT}/config/alertmanager.yml" ) -openim::log::info "Read more configuration information: https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/environment.md" +# Templates for example files +declare -A EXAMPLES=( + ["${OPENIM_ROOT}/deployments/templates/env-template.yaml"]="${OPENIM_ROOT}/config/templates/env.template" + ["${OPENIM_ROOT}/deployments/templates/openim.yaml"]="${OPENIM_ROOT}/config/templates/config.yaml.template" + ["${OPENIM_ROOT}/deployments/templates/prometheus.yml"]="${OPENIM_ROOT}/config/templates/prometheus.yml.template" + ["${OPENIM_ROOT}/deployments/templates/alertmanager.yml"]="${OPENIM_ROOT}/config/templates/alertmanager.yml.template" +) -# New variables for argument handling +# Command-line options FORCE_OVERWRITE=false SKIP_EXISTING=false +GENERATE_EXAMPLES=false +CLEAN_ENV_EXAMPLES=false -# Function to display help +# Function to display help information show_help() { echo "Usage: $(basename "$0") [options]" echo "Options:" - echo " -h, --help Show this help message" - echo " --force Overwrite existing files without prompt" - echo " --skip Skip generation if file exists" + echo " -h, --help Show this help message" + echo " --force Overwrite existing files without prompt" + echo " --skip Skip generation if file exists" + echo " --examples Generate example files" + echo " --clean-env-examples Generate example files in a clean environment" +} + +# Function to generate configuration files +generate_config_files() { + # Loop through each template in TEMPLATES + for template in "${!TEMPLATES[@]}"; do + # Read the corresponding output files for the template + IFS=';' read -ra OUTPUT_FILES <<< "${TEMPLATES[$template]}" + for output_file in "${OUTPUT_FILES[@]}"; do + # Check if the output file already exists + if [[ -f "${output_file}" ]]; then + # Handle existing file based on command-line options + if [[ "${FORCE_OVERWRITE}" == true ]]; then + openim::log::info "Force overwriting ${output_file}." + elif [[ "${SKIP_EXISTING}" == true ]]; then + openim::log::info "Skipping generation of ${output_file} as it already exists." + continue + else + # Ask user for confirmation to overwrite + echo -n "File ${output_file} already exists. Overwrite? (Y/N): " + read -r -n 1 REPLY + echo + if [[ ! $REPLY =~ ^[Yy]$ ]]; then + openim::log::info "Skipping generation of ${output_file}." + continue + fi + fi + fi + + # Process the template file to generate the output file + openim::log::info "⌚ Working with template file: ${template} to generate ${output_file}..." + if [[ ! -f "${OPENIM_ROOT}/scripts/genconfig.sh" ]]; then + openim::log::error "genconfig.sh script not found" + exit 1 + fi + "${OPENIM_ROOT}/scripts/genconfig.sh" "${ENV_FILE}" "${template}" > "${output_file}" || { + openim::log::error "Error processing template file ${template}" + exit 1 + } + sleep 0.5 + done + done +} + +# Function to generate example files +generate_example_files() { + for template in "${!EXAMPLES[@]}"; do + local example_file="${EXAMPLES[$template]}" + if [[ ! -f "${example_file}" ]]; then + openim::log::info "Generating example file: ${example_file} from ${template}..." + cp "${template}" "${example_file}" + fi + done +} + +declare -A env_vars=( + ["OPENIM_IP"]="172.28.0.1" + ["DATA_DIR"]="./" + ["LOG_STORAGE_LOCATION"]="../logs/" +) + +generate_clean_environment_examples() { + env_cmd="env -i" + for var in "${!env_vars[@]}"; do + env_cmd+=" $var='${env_vars[$var]}'" + done + + for template in "${!EXAMPLES[@]}"; do + local example_file="${EXAMPLES[$template]}" + openim::log::info "Generating example file: ${example_file} from ${template}..." + + eval "$env_cmd ${OPENIM_ROOT}/scripts/genconfig.sh '${ENV_FILE}' '${template}' > '${example_file}'" || { + openim::log::error "Error processing template file ${template}" + exit 1 + } + done } -# Parse command-line options while [[ $# -gt 0 ]]; do case "$1" in -h|--help) @@ -63,6 +151,14 @@ while [[ $# -gt 0 ]]; do SKIP_EXISTING=true shift ;; + --examples) + GENERATE_EXAMPLES=true + shift + ;; + --clean-env-examples) + CLEAN_ENV_EXAMPLES=true + shift + ;; *) echo "Unknown option: $1" show_help @@ -71,44 +167,19 @@ while [[ $# -gt 0 ]]; do esac done -for template in "${!TEMPLATES[@]}"; do - if [[ ! -f "${template}" ]]; then - openim::log::error_exit "Template file ${template} does not exist..." - exit 1 - fi -done +# Generate configuration files if requested +if [[ "${FORCE_OVERWRITE}" == true || "${SKIP_EXISTING}" == false ]]; then + generate_config_files +fi -for template in "${!TEMPLATES[@]}"; do - IFS=';' read -ra OUTPUT_FILES <<< "${TEMPLATES[$template]}" - for output_file in "${OUTPUT_FILES[@]}"; do - if [[ -f "${output_file}" ]]; then - if [[ "${FORCE_OVERWRITE}" == true ]]; then - openim::log::info "Force overwriting ${output_file}." - elif [[ "${SKIP_EXISTING}" == true ]]; then - openim::log::info "Skipping generation of ${output_file} as it already exists." - continue - else - echo -n "File ${output_file} already exists. Overwrite? (Y/N): " - read -r -n 1 REPLY - echo - if [[ ! $REPLY =~ ^[Yy]$ ]]; then - openim::log::info "Skipping generation of ${output_file}." - continue - fi - fi - fi +# Generate example files if --examples option is provided +if [[ "${GENERATE_EXAMPLES}" == true ]]; then + generate_example_files +fi - openim::log::info "⌚ Working with template file: ${template} to ${output_file}..." - if [[ ! -f "${OPENIM_ROOT}/scripts/genconfig.sh" ]]; then - openim::log::error "genconfig.sh script not found" - exit 1 - fi - "${OPENIM_ROOT}/scripts/genconfig.sh" "${ENV_FILE}" "${template}" > "${output_file}" || { - openim::log::error "Error processing template file ${template}" - exit 1 - } - sleep 0.5 - done -done +# Generate example files in a clean environment if --clean-env-examples option is provided +if [[ "${CLEAN_ENV_EXAMPLES}" == true ]]; then + generate_clean_environment_examples +fi -openim::log::success "✨ All configuration files have been successfully generated!" +openim::log::success "Configuration and example files generation complete!" \ No newline at end of file