Merge remote-tracking branch 'origin/release-v3.5' into release-v3.5

pull/1823/head
withchao 2 years ago
commit ababd785ef

@ -10,15 +10,13 @@ networks:
- subnet: '${DOCKER_BRIDGE_SUBNET:-172.28.0.0/16}' - subnet: '${DOCKER_BRIDGE_SUBNET:-172.28.0.0/16}'
gateway: '${DOCKER_BRIDGE_GATEWAY:-172.28.0.1}' gateway: '${DOCKER_BRIDGE_GATEWAY:-172.28.0.1}'
services: services:
mongodb: mongodb:
image: mongo:${MONGODB_IMAGE_VERSION-6.0.2} image: mongo:${MONGODB_IMAGE_VERSION-6.0.2}
ports: ports:
- "${MONGO_PORT:-37017}:27017" - "${MONGO_PORT:-37017}:27017"
container_name: mongo container_name: mongo
command: --wiredTigerCacheSizeGB 1 --auth command: ["/bin/bash", "-c", "/docker-entrypoint-initdb.d/mongo-init.sh || true; docker-entrypoint.sh mongod --wiredTigerCacheSizeGB 1 --auth"]
volumes: volumes:
- "${DATA_DIR:-./}/components/mongodb/data/db:/data/db" - "${DATA_DIR:-./}/components/mongodb/data/db:/data/db"
- "${DATA_DIR:-./}/components/mongodb/data/logs:/data/logs" - "${DATA_DIR:-./}/components/mongodb/data/logs:/data/logs"
@ -96,7 +94,7 @@ services:
ipv4_address: ${KAFKA_NETWORK_ADDRESS:-172.28.0.4} ipv4_address: ${KAFKA_NETWORK_ADDRESS:-172.28.0.4}
minio: minio:
image: minio/minio:${MINIO_IMAGE_VERSION:-latest} image: minio/minio:${MINIO_IMAGE_VERSION:-RELEASE.2024-01-11T07-46-16Z}
ports: ports:
- "${MINIO_PORT:-10005}:9000" - "${MINIO_PORT:-10005}:9000"
- "9090:9090" - "9090:9090"
@ -114,21 +112,104 @@ services:
ipv4_address: ${MINIO_NETWORK_ADDRESS:-172.28.0.6} ipv4_address: ${MINIO_NETWORK_ADDRESS:-172.28.0.6}
openim-web: openim-web:
image: ${IMAGE_REGISTRY:-ghcr.io/openimsdk}/openim-web:${OPENIM_WEB_IMAGE_VERSION:-latest} image: ${IMAGE_REGISTRY:-ghcr.io/openimsdk}/openim-web:${OPENIM_WEB_IMAGE_VERSION:-v3.5.0-docker}
container_name: openim-web container_name: openim-web
environment:
- OPENIM_WEB_DIST_PATH=${OPENIM_WEB_DIST_PATH:-/app/dist}
- OPENIM_WEB_PORT=${OPENIM_WEB_PORT:-11001}
restart: always restart: always
ports: ports:
- "${OPENIM_WEB_PORT:-11001}:11001" - "${OPENIM_WEB_PORT:-11001}:80"
networks: networks:
server: server:
ipv4_address: ${OPENIM_WEB_NETWORK_ADDRESS:-172.28.0.7} ipv4_address: ${OPENIM_WEB_NETWORK_ADDRESS:-172.28.0.7}
### TODO: Uncomment, or deploy using openim docker: https://github.com/openimsdk/openim-docker
# Uncomment and configure the following services as needed # Uncomment and configure the following services as needed
# openim-server:
# image: ${IMAGE_REGISTRY:-ghcr.io/openimsdk}/openim-server:${SERVER_IMAGE_VERSION:-main}
# container_name: openim-server
# ports:
# - "${OPENIM_WS_PORT:-10001}:${OPENIM_WS_PORT:-10001}"
# - "${API_OPENIM_PORT:-10002}:${API_OPENIM_PORT:-10002}"
# - "${API_PROM_PORT:-20100}:${API_PROM_PORT:-20100}"
# - "${USER_PROM_PORT:-20110}:${USER_PROM_PORT:-20110}"
# - "${FRIEND_PROM_PORT:-20120}:${FRIEND_PROM_PORT:-20120}"
# - "${MESSAGE_PROM_PORT:-20130}:${MESSAGE_PROM_PORT:-20130}"
# - "${MSG_GATEWAY_PROM_PORT:-20140}:${MSG_GATEWAY_PROM_PORT:-20140}"
# - "${GROUP_PROM_PORT:-20150}:${GROUP_PROM_PORT:-20150}"
# - "${AUTH_PROM_PORT:-20160}:${AUTH_PROM_PORT:-20160}"
# - "${PUSH_PROM_PORT:-20170}:${PUSH_PROM_PORT:-20170}"
# - "${CONVERSATION_PROM_PORT:-20230}:${CONVERSATION_PROM_PORT:-20230}"
# - "${RTC_PROM_PORT:-21300}:${RTC_PROM_PORT:-21300}"
# - "${THIRD_PROM_PORT:-21301}:${THIRD_PROM_PORT:-21301}"
# - "21400-21403:21400-21403"
# healthcheck:
# test: ["CMD", "/openim/openim-server/scripts/check-all.sh"]
# interval: 120s
# timeout: 30s
# retries: 5
# env_file:
# - .env
# environment:
# - OPENIM_IP=${OPENIM_IP:-127.0.0.1}
# volumes:
# - "${DATA_DIR:-./}/openim-server/logs:/openim/openim-server/logs"
# - "${DATA_DIR:-./}/openim-server/_output/logs:/openim/openim-server/_output/logs"
# - "${DATA_DIR:-./}/openim-server/config:/openim/openim-server/config"
# restart: always
# depends_on:
# - kafka
# - mysql
# - mongodb
# - redis
# - minio
# logging:
# driver: json-file
# options:
# max-size: "1g"
# max-file: "2"
# networks:
# server:
# ipv4_address: ${OPENIM_SERVER_NETWORK_ADDRESS:-172.28.0.8}
# openim-chat:
# image: ${IMAGE_REGISTRY:-ghcr.io/openimsdk}/openim-chat:${CHAT_IMAGE_VERSION:-main}
# container_name: openim-chat
# healthcheck:
# test: ["CMD", "/openim/openim-chat/scripts/check_all.sh"]
# interval: 60s
# timeout: 30s
# retries: 5
# env_file:
# - .env
# environment:
# - ZOOKEEPER_ADDRESS=${DOCKER_BRIDGE_GATEWAY:-172.28.0.1}
# - ZOOKEEPER_PORT=${ZOOKEEPER_PORT:-12181}
# - OPENIM_SERVER_ADDRESS=http://${OPENIM_SERVER_ADDRESS:-172.28.0.1}
# - API_OPENIM_PORT=${API_OPENIM_PORT:-10002}
# - MYSQL_ADDRESS=${DOCKER_BRIDGE_GATEWAY:-172.28.0.1}
# - MYSQL_PORT=${MYSQL_PORT:-13306}
# - REDIS_ADDRESS=${DOCKER_BRIDGE_GATEWAY:-172.28.0.1}
# - REDIS_PORT=${REDIS_PORT:-16379}
# ports:
# - "${OPENIM_CHAT_API_PORT:-10008}:10008"
# - "${OPENIM_ADMIN_API_PORT:-10009}:10009"
# volumes:
# - "${DATA_DIR:-./}/components/openim-chat/logs:/openim/openim-chat/logs"
# - "${DATA_DIR:-./}/components/openim-chat/config:/openim/openim-chat/config"
# restart: always
# # user: root:root
# logging:
# driver: json-file
# options:
# max-size: "1g"
# max-file: "2"
# networks:
# server:
# ipv4_address: ${OPENIM_CHAT_NETWORK_ADDRESS:-172.28.0.9}
# openim-admin: # openim-admin:
# image: ${IMAGE_REGISTRY:-ghcr.io/openimsdk}/openim-admin-front:v3.4.0 # # https://github.com/openimsdk/open-im-server/issues/1662
# image: ${IMAGE_REGISTRY:-ghcr.io/openimsdk}/openim-admin:${ADMIN_FRONT_VERSION:-toc-base-open-docker.35}
# container_name: openim-admin # container_name: openim-admin
# restart: always # restart: always
# ports: # ports:
@ -143,8 +224,8 @@ services:
# hostname: prometheus # hostname: prometheus
# restart: always # restart: always
# volumes: # volumes:
# - ./config/prometheus.yml:/etc/prometheus/prometheus.yml # - "${DATA_DIR:-./}/config/instance-down-rules.yml:/etc/prometheus/instance-down-rules.yml"
# - ./config/instance-down-rules.yml:/etc/prometheus/instance-down-rules.yml # - "${DATA_DIR:-./}/config/prometheus.yml:/etc/prometheus/prometheus.yml"
# ports: # ports:
# - "${PROMETHEUS_PORT:-19090}:9090" # - "${PROMETHEUS_PORT:-19090}:9090"
# networks: # networks:
@ -157,8 +238,8 @@ services:
# hostname: alertmanager # hostname: alertmanager
# restart: always # restart: always
# volumes: # volumes:
# - ./config/alertmanager.yml:/etc/alertmanager/alertmanager.yml # - ${DATA_DIR:-./}/config/alertmanager.yml:/etc/alertmanager/alertmanager.yml
# - ./config/email.tmpl:/etc/alertmanager/email.tmpl # - ${DATA_DIR:-./}/config/email.tmpl:/etc/alertmanager/email.tmpl
# ports: # ports:
# - "${ALERT_MANAGER_PORT:-19093}:9093" # - "${ALERT_MANAGER_PORT:-19093}:9093"
# networks: # networks:
@ -174,7 +255,7 @@ services:
# ports: # ports:
# - "${GRAFANA_PORT:-13000}:3000" # - "${GRAFANA_PORT:-13000}:3000"
# volumes: # volumes:
# - ${DATA_DIR:-./}/components/grafana:/var/lib/grafana # - "${DATA_DIR:-./}/components/grafana:/var/lib/grafana"
# networks: # networks:
# server: # server:
# ipv4_address: ${GRAFANA_NETWORK_ADDRESS:-172.28.0.11} # ipv4_address: ${GRAFANA_NETWORK_ADDRESS:-172.28.0.11}

@ -124,6 +124,16 @@ func (m *msgServer) MarkMsgsAsRead(
return return
} }
} }
req_callback := &cbapi.CallbackSingleMsgReadReq{
UserID: req.UserID,
ConversationID: req.ConversationID,
ContentType: conversation.ConversationType,
Seqs: req.Seqs,
}
if err = CallbackSingleMsgRead(ctx, req_callback); err != nil {
return nil, err
}
if err = m.sendMarkAsReadNotification(ctx, req.ConversationID, conversation.ConversationType, req.UserID, if err = m.sendMarkAsReadNotification(ctx, req.ConversationID, conversation.ConversationType, req.UserID,
m.conversationAndGetRecvID(conversation, req.UserID), req.Seqs, hasReadSeq); err != nil { m.conversationAndGetRecvID(conversation, req.UserID), req.Seqs, hasReadSeq); err != nil {
return return

@ -70,7 +70,7 @@ func GetContent(msg *sdkws.MsgData) string {
} }
func callbackBeforeSendSingleMsg(ctx context.Context, msg *pbchat.SendMsgReq) error { func callbackBeforeSendSingleMsg(ctx context.Context, msg *pbchat.SendMsgReq) error {
if !config.Config.Callback.CallbackBeforeSendSingleMsg.Enable { if !config.Config.Callback.CallbackBeforeSendSingleMsg.Enable || msg.MsgData.ContentType == constant.Typing {
return nil return nil
} }
req := &cbapi.CallbackBeforeSendSingleMsgReq{ req := &cbapi.CallbackBeforeSendSingleMsgReq{
@ -85,7 +85,7 @@ func callbackBeforeSendSingleMsg(ctx context.Context, msg *pbchat.SendMsgReq) er
} }
func callbackAfterSendSingleMsg(ctx context.Context, msg *pbchat.SendMsgReq) error { func callbackAfterSendSingleMsg(ctx context.Context, msg *pbchat.SendMsgReq) error {
if !config.Config.Callback.CallbackAfterSendSingleMsg.Enable { if !config.Config.Callback.CallbackAfterSendSingleMsg.Enable || msg.MsgData.ContentType == constant.Typing {
return nil return nil
} }
req := &cbapi.CallbackAfterSendSingleMsgReq{ req := &cbapi.CallbackAfterSendSingleMsgReq{
@ -100,7 +100,7 @@ func callbackAfterSendSingleMsg(ctx context.Context, msg *pbchat.SendMsgReq) err
} }
func callbackBeforeSendGroupMsg(ctx context.Context, msg *pbchat.SendMsgReq) error { func callbackBeforeSendGroupMsg(ctx context.Context, msg *pbchat.SendMsgReq) error {
if !config.Config.Callback.CallbackBeforeSendGroupMsg.Enable { if !config.Config.Callback.CallbackBeforeSendGroupMsg.Enable || msg.MsgData.ContentType == constant.Typing {
return nil return nil
} }
req := &cbapi.CallbackBeforeSendGroupMsgReq{ req := &cbapi.CallbackBeforeSendGroupMsgReq{
@ -115,7 +115,7 @@ func callbackBeforeSendGroupMsg(ctx context.Context, msg *pbchat.SendMsgReq) err
} }
func callbackAfterSendGroupMsg(ctx context.Context, msg *pbchat.SendMsgReq) error { func callbackAfterSendGroupMsg(ctx context.Context, msg *pbchat.SendMsgReq) error {
if !config.Config.Callback.CallbackAfterSendGroupMsg.Enable { if !config.Config.Callback.CallbackAfterSendGroupMsg.Enable || msg.MsgData.ContentType == constant.Typing {
return nil return nil
} }
req := &cbapi.CallbackAfterSendGroupMsgReq{ req := &cbapi.CallbackAfterSendGroupMsgReq{

@ -234,7 +234,7 @@ func (s *userServer) AccountCheck(ctx context.Context, req *pbuser.AccountCheckR
} }
func (s *userServer) GetPaginationUsers(ctx context.Context, req *pbuser.GetPaginationUsersReq) (resp *pbuser.GetPaginationUsersResp, err error) { func (s *userServer) GetPaginationUsers(ctx context.Context, req *pbuser.GetPaginationUsersReq) (resp *pbuser.GetPaginationUsersResp, err error) {
total, users, err := s.PageFindUser(ctx, constant.IMOrdinaryUser, req.Pagination) total, users, err := s.PageFindUser(ctx, constant.IMOrdinaryUser, constant.AppOrdinaryUsers, req.Pagination)
if err != nil { if err != nil {
return nil, err return nil, err
} }

@ -94,9 +94,10 @@ type CallbackGroupMsgReadResp struct {
type CallbackSingleMsgReadReq struct { type CallbackSingleMsgReadReq struct {
CallbackCommand `json:"callbackCommand"` CallbackCommand `json:"callbackCommand"`
SendID string `json:"sendID"` UserID string `json:"userID"`
ReceiveID string `json:"receiveID"` ConversationID string `json:"conversationID"`
ContentType int64 `json:"contentType"` ContentType int32 `json:"contentType"`
Seqs []int64 `json:"seqs"`
} }
type CallbackSingleMsgReadResp struct { type CallbackSingleMsgReadResp struct {

@ -49,7 +49,7 @@ type UserDatabase interface {
// UpdateByMap update (zero value) external guarantee userID exists // UpdateByMap update (zero value) external guarantee userID exists
UpdateByMap(ctx context.Context, userID string, args map[string]any) (err error) UpdateByMap(ctx context.Context, userID string, args map[string]any) (err error)
// FindUser // FindUser
PageFindUser(ctx context.Context, level int64, pagination pagination.Pagination) (count int64, users []*relation.UserModel, err error) PageFindUser(ctx context.Context, level1 int64, level2 int64, pagination pagination.Pagination) (count int64, users []*relation.UserModel, err error)
// Page If not found, no error is returned // Page If not found, no error is returned
Page(ctx context.Context, pagination pagination.Pagination) (count int64, users []*relation.UserModel, err error) Page(ctx context.Context, pagination pagination.Pagination) (count int64, users []*relation.UserModel, err error)
// IsExist true as long as one exists // IsExist true as long as one exists
@ -184,8 +184,8 @@ func (u *userDatabase) Page(ctx context.Context, pagination pagination.Paginatio
return u.userDB.Page(ctx, pagination) return u.userDB.Page(ctx, pagination)
} }
func (u *userDatabase) PageFindUser(ctx context.Context, level int64, pagination pagination.Pagination) (count int64, users []*relation.UserModel, err error) { func (u *userDatabase) PageFindUser(ctx context.Context, level1 int64, level2 int64, pagination pagination.Pagination) (count int64, users []*relation.UserModel, err error) {
return u.userDB.PageFindUser(ctx, level, pagination) return u.userDB.PageFindUser(ctx, level1, level2, pagination)
} }
// IsExist Does userIDs exist? As long as there is one, it will be true. // IsExist Does userIDs exist? As long as there is one, it will be true.

@ -77,8 +77,15 @@ func (u *UserMgo) Page(ctx context.Context, pagination pagination.Pagination) (c
return mgoutil.FindPage[*relation.UserModel](ctx, u.coll, bson.M{}, pagination) return mgoutil.FindPage[*relation.UserModel](ctx, u.coll, bson.M{}, pagination)
} }
func (u *UserMgo) PageFindUser(ctx context.Context, level int64, pagination pagination.Pagination) (count int64, users []*relation.UserModel, err error) { func (u *UserMgo) PageFindUser(ctx context.Context, level1 int64, level2 int64, pagination pagination.Pagination) (count int64, users []*relation.UserModel, err error) {
return mgoutil.FindPage[*relation.UserModel](ctx, u.coll, bson.M{"app_manger_level": level}, pagination) query := bson.M{
"$or": []bson.M{
{"app_manger_level": level1},
{"app_manger_level": level2},
},
}
return mgoutil.FindPage[*relation.UserModel](ctx, u.coll, query, pagination)
} }
func (u *UserMgo) GetAllUserID(ctx context.Context, pagination pagination.Pagination) (int64, []string, error) { func (u *UserMgo) GetAllUserID(ctx context.Context, pagination pagination.Pagination) (int64, []string, error) {

@ -56,7 +56,7 @@ type UserModelInterface interface {
TakeNotification(ctx context.Context, level int64) (user []*UserModel, err error) TakeNotification(ctx context.Context, level int64) (user []*UserModel, err error)
TakeByNickname(ctx context.Context, nickname string) (user []*UserModel, err error) TakeByNickname(ctx context.Context, nickname string) (user []*UserModel, err error)
Page(ctx context.Context, pagination pagination.Pagination) (count int64, users []*UserModel, err error) Page(ctx context.Context, pagination pagination.Pagination) (count int64, users []*UserModel, err error)
PageFindUser(ctx context.Context, level int64, pagination pagination.Pagination) (count int64, users []*UserModel, err error) PageFindUser(ctx context.Context, level1 int64, level2 int64, pagination pagination.Pagination) (count int64, users []*UserModel, err error)
Exist(ctx context.Context, userID string) (exist bool, err error) Exist(ctx context.Context, userID string) (exist bool, err error)
GetAllUserID(ctx context.Context, pagination pagination.Pagination) (count int64, userIDs []string, err error) GetAllUserID(ctx context.Context, pagination pagination.Pagination) (count int64, userIDs []string, err error)
GetUserGlobalRecvMsgOpt(ctx context.Context, userID string) (opt int, err error) GetUserGlobalRecvMsgOpt(ctx context.Context, userID string) (opt int, err error)

@ -30,6 +30,7 @@ OPENIM_VERBOSE=4
openim::log::info "\n# Begin to check all openim service" openim::log::info "\n# Begin to check all openim service"
openim::log::status "Check all dependent service ports"
# Elegant printing function # Elegant printing function
# Elegant printing function # Elegant printing function
print_services_and_ports() { print_services_and_ports() {
@ -60,7 +61,7 @@ print_services_and_ports "${OPENIM_DEPENDENCY_TARGETS[@]}" "${OPENIM_DEPENDENCY_
# OpenIM check # OpenIM check
echo "++ The port being checked: ${OPENIM_SERVER_PORT_LISTARIES[@]}" echo "++ The port being checked: ${OPENIM_SERVER_PORT_LISTARIES[@]}"
openim::log::info "\n## Check all dependent service ports" openim::log::info "\n## Check all dependent service ports"
echo "+++ The port being checked: ${OPENIM_DEPENDENCY_PORT_LISTARIES[@]}" echo "++ The port being checked: ${OPENIM_DEPENDENCY_PORT_LISTARIES[@]}"
set +e set +e

@ -103,8 +103,9 @@ function openim::tools::start_service() {
printf "Specifying prometheus port: %s\n" "${prometheus_port}" printf "Specifying prometheus port: %s\n" "${prometheus_port}"
cmd="${cmd} --prometheus_port ${prometheus_port}" cmd="${cmd} --prometheus_port ${prometheus_port}"
fi fi
openim::log::info "Starting ${binary_name}..." openim::log::status "Starting ${binary_name}..."
${cmd} # Later, after discarding Docker, the Docker keyword is unreliable, and Kubepods is used
${cmd} | tee -a "${LOG_FILE}"
} }
function openim::tools::start() { function openim::tools::start() {

@ -22,7 +22,6 @@
# example: ./coscli cp/sync -r /home/off-line/docker-off-line/ cos://openim-1306374445/openim/image/amd/off-line/off-line/ -e cos.ap-guangzhou.myqcloud.com # example: ./coscli cp/sync -r /home/off-line/docker-off-line/ cos://openim-1306374445/openim/image/amd/off-line/off-line/ -e cos.ap-guangzhou.myqcloud.com
# https://cloud.tencent.com/document/product/436/71763 # https://cloud.tencent.com/document/product/436/71763
# Tencent cos configuration
readonly BUCKET="openim-1306374445" readonly BUCKET="openim-1306374445"
readonly REGION="ap-guangzhou" readonly REGION="ap-guangzhou"
readonly COS_RELEASE_DIR="openim-release" readonly COS_RELEASE_DIR="openim-release"
@ -36,8 +35,8 @@ readonly RELEASE_TARS="${LOCAL_OUTPUT_ROOT}/release-tars"
readonly RELEASE_IMAGES="${LOCAL_OUTPUT_ROOT}/release-images" readonly RELEASE_IMAGES="${LOCAL_OUTPUT_ROOT}/release-images"
# OpenIM github account info # OpenIM github account info
readonly OPENIM_GITHUB_ORG=OpenIMSDK readonly OPENIM_GITHUB_ORG=openimsdk
readonly OPENIM_GITHUB_REPO=Open-IM-Server readonly OPENIM_GITHUB_REPO=open-im-server
readonly CHAT_GITHUB_REPO=chat readonly CHAT_GITHUB_REPO=chat
readonly ARTIFACT=openim.tar.gz readonly ARTIFACT=openim.tar.gz
@ -46,6 +45,14 @@ readonly CHECKSUM=${ARTIFACT}.sha1sum
OPENIM_BUILD_CONFORMANCE=${OPENIM_BUILD_CONFORMANCE:-y} OPENIM_BUILD_CONFORMANCE=${OPENIM_BUILD_CONFORMANCE:-y}
OPENIM_BUILD_PULL_LATEST_IMAGES=${OPENIM_BUILD_PULL_LATEST_IMAGES:-y} OPENIM_BUILD_PULL_LATEST_IMAGES=${OPENIM_BUILD_PULL_LATEST_IMAGES:-y}
if [ -z "${OPENIM_ROOT}" ]; then
OPENIM_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd -P)"
fi
if [ -z "${TOOLS_DIR}" ]; then
TOOLS_DIR="${OPENIM_ROOT}/_output/tools"
fi
# Validate a ci version # Validate a ci version
# #
# Globals: # Globals:
@ -113,13 +120,14 @@ function openim::release::package_tarballs() {
openim::util::wait-for-jobs || { openim::log::error "previous tarball phase failed"; return 1; } openim::util::wait-for-jobs || { openim::log::error "previous tarball phase failed"; return 1; }
} }
function openim::release::updload_tarballs() { function openim::release::upload_tarballs() {
openim::log::info "upload ${RELEASE_TARS}/* to cos bucket ${BUCKET}." openim::log::info "upload ${RELEASE_TARS}/* to cos bucket ${BUCKET}."
for file in $(ls ${RELEASE_TARS}/*) for file in $(ls ${RELEASE_TARS}/*)
do do
if [ "${COSTOOL}" == "coscli" ];then if [ "${COSTOOL}" == "coscli" ];then
coscli cp "${file}" "cos://${BUCKET}/${COS_RELEASE_DIR}/${OPENIM_GIT_VERSION}/${file##*/}" echo "++++ ${TOOLS_DIR}/coscli cp ${file} cos://${BUCKET}/${COS_RELEASE_DIR}/${OPENIM_GIT_VERSION}/${file##*/}"
coscli cp "${file}" "cos://${BUCKET}/${COS_RELEASE_DIR}/latest/${file##*/}" ${TOOLS_DIR}/coscli cp "${file}" "cos://${BUCKET}/${COS_RELEASE_DIR}/${OPENIM_GIT_VERSION}/${file##*/}"
${TOOLS_DIR}/coscli cp "${file}" "cos://${BUCKET}/${COS_RELEASE_DIR}/latest/${file##*/}"
else else
coscmd upload "${file}" "${COS_RELEASE_DIR}/${OPENIM_GIT_VERSION}/" coscmd upload "${file}" "${COS_RELEASE_DIR}/${OPENIM_GIT_VERSION}/"
coscmd upload "${file}" "${COS_RELEASE_DIR}/latest/" coscmd upload "${file}" "${COS_RELEASE_DIR}/latest/"
@ -139,6 +147,8 @@ function openim::release::package_src_tarball() {
\( -path "${OPENIM_ROOT}"/_\* -o \ \( -path "${OPENIM_ROOT}"/_\* -o \
-path "${OPENIM_ROOT}"/.git\* -o \ -path "${OPENIM_ROOT}"/.git\* -o \
-path "${OPENIM_ROOT}"/.github\* -o \ -path "${OPENIM_ROOT}"/.github\* -o \
-path "${OPENIM_ROOT}"/components\* -o \
-path "${OPENIM_ROOT}"/logs\* -o \
-path "${OPENIM_ROOT}"/.gitignore\* -o \ -path "${OPENIM_ROOT}"/.gitignore\* -o \
-path "${OPENIM_ROOT}"/.gsemver.yml\* -o \ -path "${OPENIM_ROOT}"/.gsemver.yml\* -o \
-path "${OPENIM_ROOT}"/.config\* -o \ -path "${OPENIM_ROOT}"/.config\* -o \
@ -158,6 +168,7 @@ function openim::release::package_src_tarball() {
function openim::release::package_server_tarballs() { function openim::release::package_server_tarballs() {
# Find all of the built client binaries # Find all of the built client binaries
local long_platforms=("${LOCAL_OUTPUT_BINPATH}"/*/*) local long_platforms=("${LOCAL_OUTPUT_BINPATH}"/*/*)
if [[ -n ${OPENIM_BUILD_PLATFORMS-} ]]; then if [[ -n ${OPENIM_BUILD_PLATFORMS-} ]]; then
read -ra long_platforms <<< "${OPENIM_BUILD_PLATFORMS}" read -ra long_platforms <<< "${OPENIM_BUILD_PLATFORMS}"
fi fi
@ -167,19 +178,23 @@ function openim::release::package_server_tarballs() {
local platform_tag local platform_tag
platform=${platform_long##${LOCAL_OUTPUT_BINPATH}/} # Strip LOCAL_OUTPUT_BINPATH platform=${platform_long##${LOCAL_OUTPUT_BINPATH}/} # Strip LOCAL_OUTPUT_BINPATH
platform_tag=${platform/\//-} # Replace a "/" for a "-" platform_tag=${platform/\//-} # Replace a "/" for a "-"
openim::log::status "Starting tarball: server $platform_tag" openim::log::status "Starting tarball: server $platform_tag"
( (
local release_stage="${RELEASE_STAGE}/server/${platform_tag}/openim" local release_stage="${RELEASE_STAGE}/server/${platform_tag}/openim"
openim::log::info "release_stage: ${release_stage}"
rm -rf "${release_stage}" rm -rf "${release_stage}"
mkdir -p "${release_stage}/server/bin" mkdir -p "${release_stage}/server/bin"
local server_bins=("${OPENIM_SERVER_BINARIES[@]}") local server_bins=("${OPENIM_SERVER_BINARIES[@]}")
# This fancy expression will expand to prepend a path openim::log::info " Copy client binaries: ${client_bins[@]/#/${LOCAL_OUTPUT_BINPATH}/${platform}/}"
# (${LOCAL_OUTPUT_BINPATH}/${platform}/) to every item in the openim::log::info " Copy client binaries to: ${release_stage}/server/bin"
# server_bins array.
cp "${server_bins[@]/bin/#/${LOCAL_OUTPUT_BINPATH}/${platform}/}" \ # Copy server binaries
cp "${server_bins[@]/#/${LOCAL_OUTPUT_BINPATH}/${platform}/}" \
"${release_stage}/server/bin/" "${release_stage}/server/bin/"
openim::release::clean_cruft openim::release::clean_cruft
@ -188,38 +203,48 @@ function openim::release::package_server_tarballs() {
openim::release::create_tarball "${package_name}" "${release_stage}/.." openim::release::create_tarball "${package_name}" "${release_stage}/.."
) & ) &
done done
openim::log::status "Waiting on tarballs" openim::log::status "Waiting on tarballs"
openim::util::wait-for-jobs || { openim::log::error "server tarball creation failed"; exit 1; } openim::util::wait-for-jobs || { openim::log::error "server tarball creation failed"; exit 1; }
} }
# Package up all of the cross compiled clients. Over time this should grow into
# a full SDK
# Package up all of the cross compiled clients. Over time this should grow into # Package up all of the cross compiled clients. Over time this should grow into
# a full SDK # a full SDK
function openim::release::package_client_tarballs() { function openim::release::package_client_tarballs() {
# Find all of the built client binaries # Find all of the built client binaries
local long_platforms=("${LOCAL_OUTPUT_BINPATH}"/*/*) local long_platforms=("${LOCAL_OUTPUT_BINTOOLSPATH}"/*/*)
if [[ -n ${OPENIM_BUILD_PLATFORMS-} ]]; then if [[ -n ${OPENIM_BUILD_PLATFORMS-} ]]; then
read -ra long_platforms <<< "${OPENIM_BUILD_PLATFORMS}" read -ra long_platforms <<< "${OPENIM_BUILD_PLATFORMS}"
fi fi
# echo "++++ LOCAL_OUTPUT_BINTOOLSPATH: ${LOCAL_OUTPUT_BINTOOLSPATH}"
# LOCAL_OUTPUT_BINTOOLSPATH: /data/workspaces/open-im-server/_output/bin/tools
# echo "++++ long_platforms: ${long_platforms[@]}"
# long_platforms: /data/workspaces/open-im-server/_output/bin/tools/darwin/amd64 /data/workspaces/open-im-server/_output/bin/tools/darwin/arm64 /data/workspaces/open-im-server/_output/bin/tools/linux/amd64 /data/workspaces/open-im-server/_output/bin/tools/linux/arm64 /data/workspaces/open-im-server/_output/bin/tools/linux/mips64 /data/workspaces/open-im-server/_output/bin/tools/linux/mips64le /data/workspaces/open-im-server/_output/bin/tools/linux/ppc64le /data/workspaces/open-im-server/_output/bin/tools/linux/s390x /data/workspaces/open-im-server/_output/bin/tools/windows/amd64
for platform_long in "${long_platforms[@]}"; do for platform_long in "${long_platforms[@]}"; do
local platform local platform
local platform_tag local platform_tag
platform=${platform_long##${LOCAL_OUTPUT_BINPATH}/} # Strip LOCAL_OUTPUT_BINPATH platform=${platform_long##${LOCAL_OUTPUT_BINTOOLSPATH}/} # Strip LOCAL_OUTPUT_BINTOOLSPATH
platform_tag=${platform/\//-} # Replace a "/" for a "-" platform_tag=${platform/\//-} # Replace a "/" for a "-"
openim::log::status "Starting tarball: client $platform_tag" openim::log::status "Starting tarball: client $platform_tag" # darwin-amd64
( (
local release_stage="${RELEASE_STAGE}/client/${platform_tag}/openim" local release_stage="${RELEASE_STAGE}/client/${platform_tag}/openim"
openim::log::info "release_stage: ${release_stage}"
# ++++ release_stage: /data/workspaces/open-im-server/_output/release-stage/client/darwin-amd64/openim
rm -rf "${release_stage}" rm -rf "${release_stage}"
mkdir -p "${release_stage}/client/bin" mkdir -p "${release_stage}/client/bin"
local client_bins=("${OPENIM_CLIENT_BINARIES[@]}") local client_bins=("${OPENIM_CLIENT_BINARIES[@]}")
# This fancy expression will expand to prepend a path # client_bins: changelog component conversion-msg conversion-mysql formitychecker imctl infra ncpu openim-web up35 versionchecker yamlfmt
# (${LOCAL_OUTPUT_BINPATH}/${platform}/) to every item in the # Copy client binclient_bins:aries
# client_bins array. openim::log::info " Copy client binaries: ${client_bins[@]/#/${LOCAL_OUTPUT_BINTOOLSPATH}/${platform}/}"
cp "${client_bins[@]/bin/#/${LOCAL_OUTPUT_BINPATH}/${platform}/}" \ openim::log::info " Copy client binaries to: ${release_stage}/client/bin"
cp "${client_bins[@]/#/${LOCAL_OUTPUT_BINTOOLSPATH}/${platform}/}" \
"${release_stage}/client/bin/" "${release_stage}/client/bin/"
openim::release::clean_cruft openim::release::clean_cruft
@ -228,7 +253,6 @@ function openim::release::package_client_tarballs() {
openim::release::create_tarball "${package_name}" "${release_stage}/.." openim::release::create_tarball "${package_name}" "${release_stage}/.."
) & ) &
done done
openim::log::status "Waiting on tarballs" openim::log::status "Waiting on tarballs"
openim::util::wait-for-jobs || { openim::log::error "client tarball creation failed"; exit 1; } openim::util::wait-for-jobs || { openim::log::error "client tarball creation failed"; exit 1; }
} }
@ -354,7 +378,7 @@ function openim::release::create_docker_images_for_server() {
rm -rf "${docker_build_path}" rm -rf "${docker_build_path}"
mkdir -p "${docker_build_path}" mkdir -p "${docker_build_path}"
ln "${binary_file_path}" "${docker_build_path}/${binary_name}" ln "${binary_file_path}" "${docker_build_path}/${binary_name}"
ln ""${OPENIM_ROOT}"/build/nsswitch.conf" "${docker_build_path}/nsswitch.conf" ln "${OPENIM_ROOT}/build/nsswitch.conf" "${docker_build_path}/nsswitch.conf"
chmod 0644 "${docker_build_path}/nsswitch.conf" chmod 0644 "${docker_build_path}/nsswitch.conf"
cat <<EOF > "${docker_file_path}" cat <<EOF > "${docker_file_path}"
FROM ${base_image} FROM ${base_image}
@ -399,7 +423,7 @@ EOF
function openim::release::package_openim_manifests_tarball() { function openim::release::package_openim_manifests_tarball() {
openim::log::status "Building tarball: manifests" openim::log::status "Building tarball: manifests"
local src_dir=""${OPENIM_ROOT}"/deployments" local src_dir="${OPENIM_ROOT}/deployments"
local release_stage="${RELEASE_STAGE}/manifests/openim" local release_stage="${RELEASE_STAGE}/manifests/openim"
rm -rf "${release_stage}" rm -rf "${release_stage}"
@ -420,7 +444,7 @@ function openim::release::package_openim_manifests_tarball() {
#cp "${src_dir}/openim-rpc-msg.yaml" "${dst_dir}" #cp "${src_dir}/openim-rpc-msg.yaml" "${dst_dir}"
#cp "${src_dir}/openim-rpc-third.yaml" "${dst_dir}" #cp "${src_dir}/openim-rpc-third.yaml" "${dst_dir}"
#cp "${src_dir}/openim-rpc-user.yaml" "${dst_dir}" #cp "${src_dir}/openim-rpc-user.yaml" "${dst_dir}"
#cp ""${OPENIM_ROOT}"/cluster/gce/gci/health-monitor.sh" "${dst_dir}/health-monitor.sh" #cp "${OPENIM_ROOT}/cluster/gce/gci/health-monitor.sh" "${dst_dir}/health-monitor.sh"
openim::release::clean_cruft openim::release::clean_cruft
@ -442,6 +466,7 @@ function openim::release::package_final_tarball() {
# This isn't a "full" tarball anymore, but the release lib still expects # This isn't a "full" tarball anymore, but the release lib still expects
# artifacts under "full/openim/" # artifacts under "full/openim/"
local release_stage="${RELEASE_STAGE}/full/openim" local release_stage="${RELEASE_STAGE}/full/openim"
openim::log::info "release_stage(final): ${release_stage}"
rm -rf "${release_stage}" rm -rf "${release_stage}"
mkdir -p "${release_stage}" mkdir -p "${release_stage}"
@ -454,7 +479,8 @@ EOF
# We want everything in /scripts. # We want everything in /scripts.
mkdir -p "${release_stage}/release" mkdir -p "${release_stage}/release"
cp -R ""${OPENIM_ROOT}"/scripts/release" "${release_stage}/" mkdir -p "${OPENIM_ROOT}/scripts/release"
cp -R "${OPENIM_ROOT}/scripts/release" "${release_stage}/"
cat <<EOF > "${release_stage}/release/get-openim-binaries.sh" cat <<EOF > "${release_stage}/release/get-openim-binaries.sh"
#!/usr/bin/env bash #!/usr/bin/env bash
# This file download openim client and server binaries from tencent cos bucket. # This file download openim client and server binaries from tencent cos bucket.
@ -471,11 +497,11 @@ Server binary tarballs are no longer included in the OpenIM final tarball.
Run release/get-openim-binaries.sh to download client and server binaries. Run release/get-openim-binaries.sh to download client and server binaries.
EOF EOF
# Include hack/lib as a dependency for the cluster/ scripts # Include scripts/lib as a dependency for the cluster/ scripts
#mkdir -p "${release_stage}/hack" #mkdir -p "${release_stage}/hack"
#cp -R ""${OPENIM_ROOT}"/hack/lib" "${release_stage}/hack/" #cp -R "${OPENIM_ROOT}/scripts/lib" "${release_stage}/scripts/"
cp -R "${OPENIM_ROOT}"/{docs,configs,scripts,deployments,init,README.md,LICENSE} "${release_stage}/" cp -R "${OPENIM_ROOT}"/{docs,config,scripts,deployments,README.md,LICENSE} "${release_stage}/"
echo "${OPENIM_GIT_VERSION}" > "${release_stage}/version" echo "${OPENIM_GIT_VERSION}" > "${release_stage}/version"
@ -507,7 +533,7 @@ function openim::release::install_github_release(){
# - git-chglog # - git-chglog
# - coscmd or coscli # - coscmd or coscli
function openim::release::verify_prereqs(){ function openim::release::verify_prereqs(){
if [ -z "$(which github-release 2>/dev/null)" ]; then if [ -z "$(which ${TOOLS_DIR}/github-release 2>/dev/null)" ]; then
openim::log::info "'github-release' tool not installed, try to install it." openim::log::info "'github-release' tool not installed, try to install it."
if ! openim::release::install_github_release; then if ! openim::release::install_github_release; then
@ -516,7 +542,7 @@ function openim::release::verify_prereqs(){
fi fi
fi fi
if [ -z "$(which git-chglog 2>/dev/null)" ]; then if [ -z "$(which ${TOOLS_DIR}/git-chglog 2>/dev/null)" ]; then
openim::log::info "'git-chglog' tool not installed, try to install it." openim::log::info "'git-chglog' tool not installed, try to install it."
if ! go install github.com/git-chglog/git-chglog/cmd/git-chglog@latest &>/dev/null; then if ! go install github.com/git-chglog/git-chglog/cmd/git-chglog@latest &>/dev/null; then
@ -525,7 +551,7 @@ function openim::release::verify_prereqs(){
fi fi
fi fi
if [ -z "$(which gsemver 2>/dev/null)" ]; then if [ -z "$(which ${TOOLS_DIR}/gsemver 2>/dev/null)" ]; then
openim::log::info "'gsemver' tool not installed, try to install it." openim::log::info "'gsemver' tool not installed, try to install it."
if ! go install github.com/arnaud-deprez/gsemver@latest &>/dev/null; then if ! go install github.com/arnaud-deprez/gsemver@latest &>/dev/null; then
@ -534,8 +560,7 @@ function openim::release::verify_prereqs(){
fi fi
fi fi
if [ -z "$(which ${TOOLS_DIR}/${COSTOOL} 2>/dev/null)" ]; then
if [ -z "$(which ${COSTOOL} 2>/dev/null)" ]; then
openim::log::info "${COSTOOL} tool not installed, try to install it." openim::log::info "${COSTOOL} tool not installed, try to install it."
if ! make -C "${OPENIM_ROOT}" tools.install.${COSTOOL}; then if ! make -C "${OPENIM_ROOT}" tools.install.${COSTOOL}; then
@ -545,6 +570,7 @@ function openim::release::verify_prereqs(){
fi fi
if [ -z "${TENCENT_SECRET_ID}" -o -z "${TENCENT_SECRET_KEY}" ];then if [ -z "${TENCENT_SECRET_ID}" -o -z "${TENCENT_SECRET_KEY}" ];then
openim::log::info "You need set env: TENCENT_SECRET_ID(cos secretid) and TENCENT_SECRET_KEY(cos secretkey)"
openim::log::error "can not find env: TENCENT_SECRET_ID and TENCENT_SECRET_KEY" openim::log::error "can not find env: TENCENT_SECRET_ID and TENCENT_SECRET_KEY"
return 1 return 1
fi fi
@ -584,39 +610,57 @@ EOF
# https://github.com/github-release/github-release # https://github.com/github-release/github-release
function openim::release::github_release() { function openim::release::github_release() {
# create a github release # create a github release
if [ -z "${GITHUB_TOKEN}" ];then
openim::log::error "can not find env: GITHUB_TOKEN"
return 1
fi
openim::log::info "create a new github release with tag ${OPENIM_GIT_VERSION}" openim::log::info "create a new github release with tag ${OPENIM_GIT_VERSION}"
github-release release \ ${TOOLS_DIR}/github-release release \
--user ${OPENIM_GITHUB_ORG} \ --user ${OPENIM_GITHUB_ORG} \
--repo ${OPENIM_GITHUB_REPO} \ --repo ${OPENIM_GITHUB_REPO} \
--tag ${OPENIM_GIT_VERSION} \ --tag ${OPENIM_GIT_VERSION} \
--description "" \ --description "" \
--pre-release --pre-release \
--draft
# update openim tarballs # update openim tarballs
openim::log::info "upload ${ARTIFACT} to release ${OPENIM_GIT_VERSION}" openim::log::info "upload ${ARTIFACT} to release ${OPENIM_GIT_VERSION}"
github-release upload \ ${TOOLS_DIR}/github-release upload \
--user ${OPENIM_GITHUB_ORG} \ --user ${OPENIM_GITHUB_ORG} \
--repo ${OPENIM_GITHUB_REPO} \ --repo ${OPENIM_GITHUB_REPO} \
--tag ${OPENIM_GIT_VERSION} \ --tag ${OPENIM_GIT_VERSION} \
--name ${ARTIFACT} \ --name ${ARTIFACT} \
--label "openim-${OPENIM_GIT_VERSION}" \
--file ${RELEASE_TARS}/${ARTIFACT} --file ${RELEASE_TARS}/${ARTIFACT}
openim::log::info "upload openim-src.tar.gz to release ${OPENIM_GIT_VERSION}" for file in ${RELEASE_TARS}/*.tar.gz; do
github-release upload \ if [[ -f "$file" ]]; then
filename=$(basename "$file")
openim::log::info "Update file ${filename} to release vertion ${OPENIM_GIT_VERSION}"
${TOOLS_DIR}/github-release upload \
--user ${OPENIM_GITHUB_ORG} \ --user ${OPENIM_GITHUB_ORG} \
--repo ${OPENIM_GITHUB_REPO} \ --repo ${OPENIM_GITHUB_REPO} \
--tag ${OPENIM_GIT_VERSION} \ --tag ${OPENIM_GIT_VERSION} \
--name "openim-src.tar.gz" \ --name "${filename}" \
--file ${RELEASE_TARS}/openim-src.tar.gz --file "${file}"
fi
done
} }
function openim::release::generate_changelog() { function openim::release::generate_changelog() {
openim::log::info "generate CHANGELOG-${OPENIM_GIT_VERSION#v}.md and commit it" openim::log::info "generate CHANGELOG-${OPENIM_GIT_VERSION#v}.md and commit it"
git-chglog ${OPENIM_GIT_VERSION} > "${OPENIM_ROOT}"/CHANGELOG/CHANGELOG-${OPENIM_GIT_VERSION#v}.md local major_version=$(echo ${OPENIM_GIT_VERSION} | cut -d '+' -f 1)
${TOOLS_DIR}/git-chglog --config ${OPENIM_ROOT}/CHANGELOG/.chglog/config.yml ${OPENIM_GIT_VERSION} > ${OPENIM_ROOT}/CHANGELOG/CHANGELOG-${major_version#v}.md
set +o errexit set +o errexit
git add "${OPENIM_ROOT}"/CHANGELOG/CHANGELOG-${OPENIM_GIT_VERSION#v}.md git add "${OPENIM_ROOT}"/CHANGELOG/CHANGELOG-${major_version#v}.md
git commit -a -m "docs(changelog): add CHANGELOG-${OPENIM_GIT_VERSION#v}.md" git commit -a -m "docs(changelog): add CHANGELOG-${major_version#v}.md"
git push -f origin main # 最后将 CHANGELOG 也 push 上去 echo ""
echo "##########################################################################"
echo "git commit -a -m \"docs(changelog): add CHANGELOG-${major_version#v}.md\""
openim::log::info "You need git push CHANGELOG-${major_version#v}.md to remote"
echo "##########################################################################"
echo ""
} }

File diff suppressed because it is too large Load Diff

@ -13,27 +13,138 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# Description:
# This script automates the process of building and releasing OpenIM,
# including tasks like setting up the environment, verifying prerequisites,
# building commands, packaging tarballs, uploading tarballs, creating GitHub
# releases, and generating changelogs.
#
# Usage:
# ./scripts/release.sh [options]
# Options include:
# -h, --help : Show help message
# -se, --setup-env : Execute setup environment
# -vp, --verify-prereqs : Execute prerequisites verification
# -bc, --build-command : Execute build command
# -bi, --build-image : Execute build image (default: not executed)
# -pt, --package-tarballs : Execute package tarballs
# -ut, --upload-tarballs : Execute upload tarballs
# -gr, --github-release : Execute GitHub release
# -gc, --generate-changelog: Execute generate changelog
#
# This script can also be executed via the 'make release' command as an alternative.
#
# Dependencies:
# This script depends on external scripts found in the 'scripts' directory and
# assumes the presence of necessary tools and permissions for building and
# releasing software.
#
# Note:
# The script uses standard bash script practices with error handling,
# and it defaults to executing all steps if no specific option is provided.
#
# Build a OpenIM release. This will build the binaries, create the Docker # Build a OpenIM release. This will build the binaries, create the Docker
# images and other build artifacts. # images and other build artifacts.
# Build a OpenIM release. This script supports various flags for flexible execution control.
set -o errexit set -o errexit
set -o nounset set -o nounset
set -o pipefail set -o pipefail
OPENIM_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. OPENIM_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
source "${OPENIM_ROOT}/scripts/common.sh" source "${OPENIM_ROOT}/scripts/common.sh"
source "${OPENIM_ROOT}/scripts/lib/release.sh" source "${OPENIM_ROOT}/scripts/lib/release.sh"
OPENIM_RELEASE_RUN_TESTS=${OPENIM_RELEASE_RUN_TESTS-y} OPENIM_RELEASE_RUN_TESTS=${OPENIM_RELEASE_RUN_TESTS-y}
openim::golang::setup_env # Function to show help message
openim::build::verify_prereqs show_help() {
openim::release::verify_prereqs echo "Usage: $(basename $0) [options]"
#openim::build::build_image echo "Options:"
openim::build::build_command echo " -h, --help Show this help message"
openim::release::package_tarballs echo " -se, --setup-env Execute setup environment"
openim::release::updload_tarballs echo " -vp, --verify-prereqs Execute prerequisites verification"
git push origin ${VERSION} echo " -bc, --build-command Execute build command"
#openim::release::github_release echo " -bi, --build-image Execute build image (default: not executed)"
#openim::release::generate_changelog echo " -pt, --package-tarballs Execute package tarballs"
echo " -ut, --upload-tarballs Execute upload tarballs"
echo " -gr, --github-release Execute GitHub release"
echo " -gc, --generate-changelog Execute generate changelog"
}
# Initialize all actions to false
perform_setup_env=false
perform_verify_prereqs=false
perform_build_command=false
perform_build_image=false # New flag for build image
perform_package_tarballs=false
perform_upload_tarballs=false
perform_github_release=false
perform_generate_changelog=false
# Process command-line arguments
while getopts "hsevpbciptutgrgc-" opt; do
case "${opt}" in
h) show_help; exit 0 ;;
se) perform_setup_env=true ;;
vp) perform_verify_prereqs=true ;;
bc) perform_build_command=true ;;
bi) perform_build_image=true ;; # Handling new option
pt) perform_package_tarballs=true ;;
ut) perform_upload_tarballs=true ;;
gr) perform_github_release=true ;;
gc) perform_generate_changelog=true ;;
--) case "${OPTARG}" in
help) show_help; exit 0 ;;
setup-env) perform_setup_env=true ;;
verify-prereqs) perform_verify_prereqs=true ;;
build-command) perform_build_command=true ;;
build-image) perform_build_image=true ;; # Handling new long option
package-tarballs) perform_package_tarballs=true ;;
upload-tarballs) perform_upload_tarballs=true ;;
github-release) perform_github_release=true ;;
generate-changelog) perform_generate_changelog=true ;;
*) echo "Invalid option: --${OPTARG}"; show_help; exit 1 ;;
esac ;;
*) show_help; exit 1 ;;
esac
done
# Enable all actions by default if no options are provided
if [ "$#" -eq 0 ]; then
perform_setup_env=true
perform_verify_prereqs=true
perform_build_command=true
perform_package_tarballs=true
perform_upload_tarballs=true
perform_github_release=true
perform_generate_changelog=true
# TODO: Not enabling build_image by default
# perform_build_image=true
fi
# Function to perform actions
perform_action() {
local flag=$1
local message=$2
local command=$3
if [ "$flag" == true ]; then
openim::log::info "## $message..."
if ! $command; then
openim::log::errexit "Error in $message"
fi
fi
}
echo "Starting script execution..."
perform_action $perform_setup_env "Setting up environment" "openim::golang::setup_env"
perform_action $perform_verify_prereqs "Verifying prerequisites" "openim::build::verify_prereqs && openim::release::verify_prereqs"
perform_action $perform_build_command "Executing build command" "openim::build::build_command"
perform_action $perform_build_image "Building image" "openim::build::build_image"
perform_action $perform_package_tarballs "Packaging tarballs" "openim::release::package_tarballs"
perform_action $perform_upload_tarballs "Uploading tarballs" "openim::release::upload_tarballs"
perform_action $perform_github_release "Creating GitHub release" "openim::release::github_release"
perform_action $perform_generate_changelog "Generating changelog" "openim::release::generate_changelog"
openim::log::success "OpenIM Relase Script Execution Completed."

@ -33,8 +33,8 @@ import (
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/redis/go-redis/v9" "github.com/redis/go-redis/v9"
"gopkg.in/yaml.v3" "gopkg.in/yaml.v3"
) )
@ -43,9 +43,10 @@ const (
// defaultCfgPath is the default path of the configuration file. // defaultCfgPath is the default path of the configuration file.
defaultCfgPath = "../../../../../config/config.yaml" defaultCfgPath = "../../../../../config/config.yaml"
minioHealthCheckDuration = 1 minioHealthCheckDuration = 1
maxRetry = 100 maxRetry = 300
componentStartErrCode = 6000 componentStartErrCode = 6000
configErrCode = 6001 configErrCode = 6001
mongoConnTimeout = 30 * time.Second
) )
const ( const (
@ -56,7 +57,6 @@ const (
var ( var (
cfgPath = flag.String("c", defaultCfgPath, "Path to the configuration file") cfgPath = flag.String("c", defaultCfgPath, "Path to the configuration file")
ErrComponentStart = errs.NewCodeError(componentStartErrCode, "ComponentStartErr") ErrComponentStart = errs.NewCodeError(componentStartErrCode, "ComponentStartErr")
ErrConfig = errs.NewCodeError(configErrCode, "Config file is incorrect") ErrConfig = errs.NewCodeError(configErrCode, "Config file is incorrect")
) )
@ -95,7 +95,7 @@ func main() {
for i := 0; i < maxRetry; i++ { for i := 0; i < maxRetry; i++ {
if i != 0 { if i != 0 {
time.Sleep(3 * time.Second) time.Sleep(1 * time.Second)
} }
fmt.Printf("Checking components Round %v...\n", i+1) fmt.Printf("Checking components Round %v...\n", i+1)
@ -141,19 +141,25 @@ func getEnv(key, fallback string) string {
return fallback return fallback
} }
// checkMongo checks the MongoDB connection // checkMongo checks the MongoDB connection without retries
func checkMongo() (string, error) { func checkMongo() (string, error) {
// Use environment variables or fallback to config
uri := getEnv("MONGO_URI", buildMongoURI()) uri := getEnv("MONGO_URI", buildMongoURI())
client, err := mongo.Connect(context.TODO(), options.Client().ApplyURI(uri)) ctx, cancel := context.WithTimeout(context.Background(), mongoConnTimeout)
defer cancel()
str := "ths addr is:" + strings.Join(config.Config.Mongo.Address, ",") str := "ths addr is:" + strings.Join(config.Config.Mongo.Address, ",")
client, err := mongo.Connect(ctx, options.Client().ApplyURI(uri))
if err != nil { if err != nil {
return "", errs.Wrap(errStr(err, str)) return "", errs.Wrap(errStr(err, str))
} }
defer client.Disconnect(context.TODO()) defer client.Disconnect(context.Background())
ctx, cancel = context.WithTimeout(context.Background(), mongoConnTimeout)
defer cancel()
if err = client.Ping(context.TODO(), nil); err != nil { if err = client.Ping(ctx, nil); err != nil {
return "", errs.Wrap(errStr(err, str)) return "", errs.Wrap(errStr(err, str))
} }
@ -222,8 +228,8 @@ func checkMinio() (string, error) {
defer cancel() defer cancel()
if minioClient.IsOffline() { if minioClient.IsOffline() {
// str := fmt.Sprintf("Minio server is offline;%s", str) str := fmt.Sprintf("Minio server is offline;%s", str)
// return "", ErrComponentStart.Wrap(str) return "", ErrComponentStart.Wrap(str)
} }
// Check for localhost in API URL and Minio SignEndpoint // Check for localhost in API URL and Minio SignEndpoint

Loading…
Cancel
Save