AndrewZuo01 2 years ago
commit e600876aff

@ -201,7 +201,6 @@ jobs:
- name: Build, Start, Check Services and Print Logs for Ubuntu
if: runner.os == 'Linux'
run: |
sudo make init && \
sudo make build && \
sudo make start && \
sudo make check || \

@ -3,12 +3,36 @@
before:
hooks:
- make clean
# You may remove this if you don't use go modules.
- make tidy
- make copyright.add
# you may remove this if you don't need go generate
- go generate ./...
git:
# What should be used to sort tags when gathering the current and previous
# tags if there are more than one tag in the same commit.
#
# Default: '-version:refname'
tag_sort: -version:creatordate
# What should be used to specify prerelease suffix while sorting tags when gathering
# the current and previous tags if there are more than one tag in the same commit.
#
# Since: v1.17
prerelease_suffix: "-"
# Tags to be ignored by GoReleaser.
# This means that GoReleaser will not pick up tags that match any of the
# provided values as either previous or current tags.
#
# Templates: allowed.
# Since: v1.21.
ignore_tags:
- nightly
# - "{{.Env.IGNORE_TAG}}"
snapshot:
name_template: "{{ incpatch .Version }}-next"
@ -495,4 +519,4 @@ checksum:
algorithm: sha256
release:
prerelease: auto
prerelease: auto

@ -506,7 +506,7 @@ callback:
# The number of ports needs to be consistent with msg_transfer_service_num in script/path_info.sh
prometheus:
enable: false
prometheusUrl: 172.28.0.1:13000
grafanaUrl: 172.28.0.1:13000
apiPrometheusPort: [20100]
userPrometheusPort: [ 20110 ]
friendPrometheusPort: [ 20120 ]

@ -1,42 +1,23 @@
# Copyright © 2023 OpenIM. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================
# ========= Basic Configuration ========
# ======================================
# The user for authentication or system operations.
# Default: OPENIM_USER=root
USER=${OPENIM_USER}
# Password associated with the specified user for authentication.
# Default: PASSWORD=openIM123
PASSWORD=${PASSWORD}
# Base URL for the application programming interface (API).
# Default: API_URL=http://172.28.0.1:10002
API_URL=${API_URL}
# Directory path for storing data files or related information.
# Default: DATA_DIR=./
# -----------------------------------------------------------------------------
# General Configuration
# This section contains general configuration options for the entire environment.
# These options can be set via environment variables. If both environment variables
# and settings in this .env file exist, the environment variables take precedence.
# -----------------------------------------------------------------------------
# ==========================
# General Configuration
# ==========================
# These settings apply to the overall environment.
# Data storage directory for persistent data.
# Example: DATA_DIR=/path/to/data
DATA_DIR=${DATA_DIR}
# Choose the appropriate image address, the default is GITHUB image,
# you can choose docker hub, for Chinese users can choose Ali Cloud
# export IMAGE_REGISTRY="ghcr.io/openimsdk"
# export IMAGE_REGISTRY="openim"
# export IMAGE_REGISTRY="registry.cn-hangzhou.aliyuncs.com/openimsdk"
# Docker image registry. Uncomment the preferred one.
# Options: ghcr.io/openimsdk, openim, registry.cn-hangzhou.aliyuncs.com/openimsdk
# IMAGE_REGISTRY="ghcr.io/openimsdk"
# IMAGE_REGISTRY="openim"
# IMAGE_REGISTRY="registry.cn-hangzhou.aliyuncs.com/openimsdk"
IMAGE_REGISTRY=${IMAGE_REGISTRY}
# ======================================
@ -47,10 +28,9 @@ IMAGE_REGISTRY=${IMAGE_REGISTRY}
# Default: DOCKER_BRIDGE_SUBNET=172.28.0.0/16
DOCKER_BRIDGE_SUBNET=${DOCKER_BRIDGE_SUBNET}
# Gateway for the Docker network.
# Default: DOCKER_BRIDGE_GATEWAY=172.28.0.1
# Set and specify the IP addresses of some containers. Generally speaking,
# you do not need to modify these configurations to facilitate debugging
DOCKER_BRIDGE_GATEWAY=${DOCKER_BRIDGE_GATEWAY}
MONGO_NETWORK_ADDRESS=${MONGO_NETWORK_ADDRESS}
REDIS_NETWORK_ADDRESS=${REDIS_NETWORK_ADDRESS}
KAFKA_NETWORK_ADDRESS=${KAFKA_NETWORK_ADDRESS}
@ -65,25 +45,45 @@ NODE_EXPORTER_NETWORK_ADDRESS=${NODE_EXPORTER_NETWORK_ADDRESS}
OPENIM_ADMIN_FRONT_NETWORK_ADDRESS=${OPENIM_ADMIN_FRONT_NETWORK_ADDRESS}
ALERT_MANAGER_NETWORK_ADDRESS=${ALERT_MANAGER_NETWORK_ADDRESS}
# ===============================================
# = Component Extension Configuration =
# ===============================================
# ==============================================================================
# Configuration Update Instructions
# ==============================================================================
# This header outlines the methods to update common variables in config.yaml and .env files.
# These instructions are vital for maintaining the OpenIM environment's configuration.
#
# METHOD 1: Regenerate All Configurations
# ----------------------------------------
# Use this method to regenerate all configurations.
# Steps:
# 1. Delete existing config files:
# - openim-server/config/config.yaml
# - openim-chat/config/config.yaml
# 2. Modify the .env file as required.
# 3. Run 'docker compose up -d'. This will regenerate:
# - config/config.yaml
#
# METHOD 2: Modify Individual Configuration Files
# -----------------------------------------------
# Use this method to update specific configuration files.
# Steps:
# 1. Modify the .env file as necessary.
# 2. Update the corresponding entries in:
# - config/config.yaml
# 3. Restart the services with 'docker compose up -d'.
# 4. Special Note: If you modify OPENIM_IP, API_OPENIM_PORT, or MINIO_PORT in .env,
# ensure to update the corresponding services and configurations accordingly.
#
# It is essential to follow these methods to ensure consistent and correct application behavior.
# ==============================================================================
# Local IP address of the service. Modify if necessary.
# Example: OPENIM_IP=172.28.0.1
OPENIM_IP=${OPENIM_IP}
# ============ Component Extension Configuration ==========
# ----- ZooKeeper Configuration -----
# Address or hostname for the ZooKeeper service.
# Default: ZOOKEEPER_ADDRESS=172.28.0.1
ZOOKEEPER_ADDRESS=${ZOOKEEPER_NETWORK_ADDRESS}
# Port for ZooKeeper service.
# Default: ZOOKEEPER_PORT=12181
ZOOKEEPER_PORT=${ZOOKEEPER_PORT}
# ----- MongoDB Configuration -----
# Address or hostname for the MongoDB service.
# Default: MONGO_ADDRESS=172.28.0.1
MONGO_ADDRESS=${MONGO_NETWORK_ADDRESS}
# Port on which MongoDB service is running.
# Default: MONGO_PORT=37017
# MONGO_PORT=${MONGO_PORT}
@ -101,9 +101,6 @@ MONGO_PASSWORD=${MONGO_PASSWORD}
MONGO_DATABASE=${MONGO_DATABASE}
# ----- Redis Configuration -----
# Address or hostname for the Redis service.
# Default: REDIS_ADDRESS=172.28.0.1
REDIS_ADDRESS=${REDIS_NETWORK_ADDRESS}
# Port on which Redis in-memory data structure store is running.
# Default: REDIS_PORT=16379
@ -113,11 +110,6 @@ REDIS_PORT=${REDIS_PORT}
# Default: REDIS_PASSWORD=openIM123
REDIS_PASSWORD=${REDIS_PASSWORD}
# ----- Kafka Configuration -----
# Address or hostname for the Kafka service.
# Default: KAFKA_ADDRESS=172.28.0.1
KAFKA_ADDRESS=${KAFKA_NETWORK_ADDRESS}
# Kakfa username to authenticate with the Kafka service.
# KAFKA_USERNAME=${KAFKA_USERNAME}
@ -129,20 +121,13 @@ KAFKA_PORT=${KAFKA_PORT}
# Default: KAFKA_LATESTMSG_REDIS_TOPIC=latestMsgToRedis
KAFKA_LATESTMSG_REDIS_TOPIC=${KAFKA_LATESTMSG_REDIS_TOPIC}
# Topic in Kafka for pushing messages (e.g. notifications or updates).
# Default: KAFKA_MSG_PUSH_TOPIC=msgToPush
KAFKA_MSG_PUSH_TOPIC=${KAFKA_MSG_PUSH_TOPIC}
# Topic in Kafka for storing offline messages in MongoDB.
# Default: KAFKA_OFFLINEMSG_MONGO_TOPIC=offlineMsgToMongoMysql
KAFKA_OFFLINEMSG_MONGO_TOPIC=${KAFKA_OFFLINEMSG_MONGO_TOPIC}
# ----- MinIO Configuration ----
# Address or hostname for the MinIO object storage service.
# Default: MINIO_ADDRESS=172.28.0.1
MINIO_ADDRESS=${MINIO_NETWORK_ADDRESS}
# Port on which MinIO object storage service is running.
# MINIO_PORT
# ----------
# MINIO_PORT sets the port for the MinIO object storage service.
# Upon changing this port, the MinIO endpoint URLs in the `config/config.yaml` file must be updated
# to reflect this change. The endpoints include both the 'endpoint' and 'signEndpoint'
# under the MinIO configuration.
#
# Default: MINIO_PORT=10005
MINIO_PORT=${MINIO_PORT}
@ -155,19 +140,11 @@ MINIO_PORT=${MINIO_PORT}
MINIO_SECRET_KEY=${MINIO_SECRET_KEY}
# ----- Prometheus Configuration -----
# Address or hostname for the Prometheus service.
# Default: PROMETHEUS_ADDRESS=172.28.0.1
PROMETHEUS_ADDRESS=${PROMETHEUS_NETWORK_ADDRESS}
# Port on which Prometheus service is running.
# Default: PROMETHEUS_PORT=19090
PROMETHEUS_PORT=${PROMETHEUS_PORT}
# ----- Grafana Configuration -----
# Address or hostname for the Grafana service.
# Default: GRAFANA_ADDRESS=172.28.0.1
GRAFANA_ADDRESS=${GRAFANA_NETWORK_ADDRESS}
# Port on which Grafana service is running.
# Default: GRAFANA_PORT=13000
GRAFANA_PORT=${GRAFANA_PORT}
@ -184,27 +161,22 @@ OPENIM_WEB_DIST_PATH=${OPENIM_WEB_DIST_PATH}
# Default: OPENIM_WEB_PORT=11001
OPENIM_WEB_PORT=${OPENIM_WEB_PORT}
# Address or hostname for the OpenIM web service.
# Default: OPENIM_WEB_ADDRESS=172.28.0.1
OPENIM_WEB_ADDRESS=${OPENIM_WEB_NETWORK_ADDRESS}
# ======================================
# ========= OpenIM Server ==============
# ======================================
# Address or hostname for the OpenIM server.
# Default: OPENIM_SERVER_ADDRESS=172.28.0.1
OPENIM_SERVER_ADDRESS=${OPENIM_SERVER_NETWORK_ADDRESS}
# Port for the OpenIM WebSockets.
# Default: OPENIM_WS_PORT=10001
OPENIM_WS_PORT=${OPENIM_WS_PORT}
# Port for the OpenIM API.
# API_OPENIM_PORT
# ---------------
# This variable defines the port on which the OpenIM API service will listen.
# When changing this port, it's essential to update the apiURL in the config.yaml file
# to ensure the API service is accessible at the new port.
#
# Default: API_OPENIM_PORT=10002
API_OPENIM_PORT=${API_OPENIM_PORT}
# ======================================
# ========== OpenIM Chat ===============
# ======================================
@ -213,19 +185,20 @@ API_OPENIM_PORT=${API_OPENIM_PORT}
# Default: CHAT_IMAGE_VERSION=main
CHAT_IMAGE_VERSION=${CHAT_IMAGE_VERSION}
# Address or hostname for the OpenIM chat service.
# Default: OPENIM_CHAT_ADDRESS=172.28.0.1
OPENIM_CHAT_ADDRESS=${OPENIM_CHAT_NETWORK_ADDRESS}
# Port for the OpenIM chat API.
# Default: OPENIM_CHAT_API_PORT=10008
# !!! TODO: Do not change the chat port https://github.com/openimsdk/chat/issues/365
OPENIM_CHAT_API_PORT=${OPENIM_CHAT_API_PORT}
# Port for the OpenIM admin API.
# Default: OPENIM_ADMIN_API_PORT=10009
# !!! TODO: Do not change the chat port https://github.com/openimsdk/chat/issues/365
OPENIM_ADMIN_API_PORT=${OPENIM_ADMIN_API_PORT}
# Directory path for storing data files or related information for OpenIM chat.
# Default: OPENIM_CHAT_DATA_DIR=./openim-chat/main
OPENIM_CHAT_DATA_DIR=${OPENIM_CHAT_DATA_DIR}
# ======================================
# ========== OpenIM Admin ==============
# ======================================
@ -233,10 +206,6 @@ OPENIM_CHAT_DATA_DIR=${OPENIM_CHAT_DATA_DIR}
# Branch name for OpenIM server.
# Default: SERVER_IMAGE_VERSION=main
SERVER_IMAGE_VERSION=${SERVER_IMAGE_VERSION}
# Port for the OpenIM admin API.
# Default: OPENIM_ADMIN_API_PORT=10009
OPENIM_ADMIN_API_PORT=${OPENIM_ADMIN_API_PORT}
# Port for the node exporter.
# Default: NODE_EXPORTER_PORT=19100
@ -256,4 +225,4 @@ OPENIM_ADMIN_FRONT_PORT=${OPENIM_ADMIN_FRONT_PORT}
# Port for the alertmanager.
# Default: ALERT_MANAGER_PORT=19093
ALERT_MANAGER_PORT=${ALERT_MANAGER_PORT}
ALERT_MANAGER_PORT=${ALERT_MANAGER_PORT}

@ -506,7 +506,7 @@ callback:
# The number of ports needs to be consistent with msg_transfer_service_num in script/path_info.sh
prometheus:
enable: ${PROMETHEUS_ENABLE}
prometheusUrl: ${PROMETHEUS_URL}
grafanaUrl: ${GRAFANA_URL}
apiPrometheusPort: [${API_PROM_PORT}]
userPrometheusPort: [ ${USER_PROM_PORT} ]
friendPrometheusPort: [ ${FRIEND_PROM_PORT} ]

@ -122,9 +122,9 @@ services:
server:
ipv4_address: ${OPENIM_WEB_NETWORK_ADDRESS:-172.28.0.7}
# Uncomment and configure the following services as needed
## Uncomment and configure the following services as needed
# openim-admin:
# image: ${IMAGE_REGISTRY:-ghcr.io/openimsdk}/openim-admin-front:v3.4.0
# image: ${IMAGE_REGISTRY:-ghcr.io/openimsdk}/openim-admin:toc-base-open-docker.35
# container_name: openim-admin
# restart: always
# ports:
@ -167,6 +167,12 @@ services:
# hostname: grafana
# user: root
# restart: always
# environment:
# - GF_SECURITY_ALLOW_EMBEDDING=true
# - GF_SESSION_COOKIE_SAMESITE=none
# - GF_SESSION_COOKIE_SECURE=true
# - GF_AUTH_ANONYMOUS_ENABLED=true
# - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
# ports:
# - "${GRAFANA_PORT:-13000}:3000"
# volumes:

@ -4,7 +4,7 @@ go 1.19
require (
firebase.google.com/go v3.13.0+incompatible
github.com/OpenIMSDK/protocol v0.0.41
github.com/OpenIMSDK/protocol v0.0.43
github.com/OpenIMSDK/tools v0.0.21
github.com/bwmarrin/snowflake v0.3.0 // indirect
github.com/dtm-labs/rockscache v0.1.1

@ -20,8 +20,8 @@ github.com/AndrewZuo01/protocol v0.0.0-20231219031520-648989b91fca/go.mod h1:F25
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/IBM/sarama v1.41.3 h1:MWBEJ12vHC8coMjdEXFq/6ftO6DUZnQlFYcxtOJFa7c=
github.com/IBM/sarama v1.41.3/go.mod h1:Xxho9HkHd4K/MDUo/T/sOqwtX/17D33++E9Wib6hUdQ=
github.com/OpenIMSDK/protocol v0.0.42 h1:vIWXqZJZZ1ddleJA25fxhjZ1GyEHATpYM3wVWh4/+PY=
github.com/OpenIMSDK/protocol v0.0.42/go.mod h1:F25dFrwrIx3lkNoiuf6FkCfxuwf8L4Z8UIsdTHP/r0Y=
github.com/OpenIMSDK/protocol v0.0.43 h1:8B921vEyO7r0AfQfZd7kCycYja+hJ2vuIZsKge/WRhU=
github.com/OpenIMSDK/protocol v0.0.43/go.mod h1:F25dFrwrIx3lkNoiuf6FkCfxuwf8L4Z8UIsdTHP/r0Y=
github.com/OpenIMSDK/tools v0.0.21 h1:iTapc2mIEVH/xl5Nd6jfwPub11Pgp44tVcE1rjB3a48=
github.com/OpenIMSDK/tools v0.0.21/go.mod h1:eg+q4A34Qmu73xkY0mt37FHGMCMfC6CtmOnm0kFEGFI=
github.com/QcloudApi/qcloud_sign_golang v0.0.0-20141224014652-e4130a326409/go.mod h1:1pk82RBxDY/JZnPQrtqHlUFfCctgdorsd9M06fMynOM=

@ -130,5 +130,5 @@ func (o *ThirdApi) SearchLogs(c *gin.Context) {
}
func GetPrometheus(c *gin.Context) {
c.Redirect(http.StatusFound, config2.Config.Prometheus.PrometheusUrl)
c.Redirect(http.StatusFound, config2.Config.Prometheus.GrafanaUrl)
}

@ -26,6 +26,7 @@ const (
Compression = "compression"
GzipCompressionProtocol = "gzip"
BackgroundStatus = "isBackground"
MsgResp = "isMsgResp"
)
const (

@ -16,7 +16,10 @@ package msggateway
import (
"context"
"encoding/json"
"errors"
"fmt"
"github.com/OpenIMSDK/tools/apiresp"
"net/http"
"os"
"os/signal"
@ -342,11 +345,7 @@ func (ws *WsServer) multiTerminalLoginChecker(clientOK bool, oldClients []*Clien
if !clientOK {
return
}
isDeleteUser := ws.clients.deleteClients(newClient.UserID, oldClients)
if isDeleteUser {
ws.onlineUserNum.Add(-1)
}
ws.clients.deleteClients(newClient.UserID, oldClients)
for _, c := range oldClients {
err := c.KickOnlineMessage()
if err != nil {
@ -422,84 +421,102 @@ func (ws *WsServer) unregisterClient(client *Client) {
)
}
func (ws *WsServer) wsHandler(w http.ResponseWriter, r *http.Request) {
connContext := newContext(w, r)
func (ws *WsServer) ParseWSArgs(r *http.Request) (args *WSArgs, err error) {
var v WSArgs
defer func() {
args = &v
}()
query := r.URL.Query()
v.MsgResp, _ = strconv.ParseBool(query.Get(MsgResp))
if ws.onlineUserConnNum.Load() >= ws.wsMaxConnNum {
httpError(connContext, errs.ErrConnOverMaxNumLimit)
return
return nil, errs.ErrConnOverMaxNumLimit.Wrap("over max conn num limit")
}
var (
token string
userID string
platformIDStr string
exists bool
compression bool
)
token, exists = connContext.Query(Token)
if !exists {
httpError(connContext, errs.ErrConnArgsErr)
return
if v.Token = query.Get(Token); v.Token == "" {
return nil, errs.ErrConnArgsErr.Wrap("token is empty")
}
userID, exists = connContext.Query(WsUserID)
if !exists {
httpError(connContext, errs.ErrConnArgsErr)
return
if v.UserID = query.Get(WsUserID); v.UserID == "" {
return nil, errs.ErrConnArgsErr.Wrap("sendID is empty")
}
platformIDStr, exists = connContext.Query(PlatformID)
if !exists {
httpError(connContext, errs.ErrConnArgsErr)
return
platformIDStr := query.Get(PlatformID)
if platformIDStr == "" {
return nil, errs.ErrConnArgsErr.Wrap("platformID is empty")
}
platformID, err := strconv.Atoi(platformIDStr)
if err != nil {
httpError(connContext, errs.ErrConnArgsErr)
return
return nil, errs.ErrConnArgsErr.Wrap("platformID is not int")
}
if err = authverify.WsVerifyToken(token, userID, platformID); err != nil {
httpError(connContext, err)
return
v.PlatformID = platformID
if err = authverify.WsVerifyToken(v.Token, v.UserID, platformID); err != nil {
return nil, err
}
m, err := ws.cache.GetTokensWithoutError(context.Background(), userID, platformID)
if query.Get(Compression) == GzipCompressionProtocol {
v.Compression = true
}
if r.Header.Get(Compression) == GzipCompressionProtocol {
v.Compression = true
}
m, err := ws.cache.GetTokensWithoutError(context.Background(), v.UserID, platformID)
if err != nil {
httpError(connContext, err)
return
return nil, err
}
if v, ok := m[token]; ok {
if v, ok := m[v.Token]; ok {
switch v {
case constant.NormalToken:
case constant.KickedToken:
httpError(connContext, errs.ErrTokenKicked.Wrap())
return
return nil, errs.ErrTokenKicked.Wrap()
default:
httpError(connContext, errs.ErrTokenUnknown.Wrap())
return
return nil, errs.ErrTokenUnknown.Wrap(fmt.Sprintf("token status is %d", v))
}
} else {
httpError(connContext, errs.ErrTokenNotExist.Wrap())
return
return nil, errs.ErrTokenNotExist.Wrap()
}
return &v, nil
}
wsLongConn := newGWebSocket(WebSocket, ws.handshakeTimeout, ws.writeBufferSize)
err = wsLongConn.GenerateLongConn(w, r)
if err != nil {
httpError(connContext, err)
return
}
compressProtoc, exists := connContext.Query(Compression)
if exists {
if compressProtoc == GzipCompressionProtocol {
compression = true
type WSArgs struct {
Token string
UserID string
PlatformID int
Compression bool
MsgResp bool
}
func (ws *WsServer) wsHandler(w http.ResponseWriter, r *http.Request) {
connContext := newContext(w, r)
args, pErr := ws.ParseWSArgs(r)
var wsLongConn *GWebSocket
if args.MsgResp {
wsLongConn = newGWebSocket(WebSocket, ws.handshakeTimeout, ws.writeBufferSize)
if err := wsLongConn.GenerateLongConn(w, r); err != nil {
httpError(connContext, err)
return
}
}
compressProtoc, exists = connContext.GetHeader(Compression)
if exists {
if compressProtoc == GzipCompressionProtocol {
compression = true
data, err := json.Marshal(apiresp.ParseError(pErr))
if err != nil {
_ = wsLongConn.Close()
return
}
if err := wsLongConn.WriteMessage(MessageText, data); err != nil {
_ = wsLongConn.Close()
return
}
if pErr != nil {
_ = wsLongConn.Close()
return
}
} else {
if pErr != nil {
httpError(connContext, pErr)
return
}
wsLongConn = newGWebSocket(WebSocket, ws.handshakeTimeout, ws.writeBufferSize)
if err := wsLongConn.GenerateLongConn(w, r); err != nil {
httpError(connContext, err)
return
}
}
client := ws.clientPool.Get().(*Client)
client.ResetClient(connContext, wsLongConn, connContext.GetBackground(), compression, ws, token)
client.ResetClient(connContext, wsLongConn, connContext.GetBackground(), args.Compression, ws, args.Token)
ws.registerChan <- client
go client.readMessage()
}

@ -16,7 +16,6 @@ package user
import (
"context"
pbuser "github.com/OpenIMSDK/protocol/user"
"github.com/OpenIMSDK/tools/utils"
@ -67,16 +66,16 @@ func CallbackBeforeUpdateUserInfoEx(ctx context.Context, req *pbuser.UpdateUserI
cbReq := &cbapi.CallbackBeforeUpdateUserInfoExReq{
CallbackCommand: cbapi.CallbackBeforeUpdateUserInfoExCommand,
UserID: req.UserInfo.UserID,
FaceURL: &req.UserInfo.FaceURL,
Nickname: &req.UserInfo.Nickname,
FaceURL: req.UserInfo.FaceURL,
Nickname: req.UserInfo.Nickname,
}
resp := &cbapi.CallbackBeforeUpdateUserInfoExResp{}
if err := http.CallBackPostReturn(ctx, config.Config.Callback.CallbackUrl, cbReq, resp, config.Config.Callback.CallbackBeforeUpdateUserInfoEx); err != nil {
return err
}
utils.NotNilReplace(&req.UserInfo.FaceURL, resp.FaceURL)
utils.NotNilReplace(req.UserInfo.FaceURL, resp.FaceURL)
utils.NotNilReplace(req.UserInfo.Ex, resp.Ex)
utils.NotNilReplace(&req.UserInfo.Nickname, resp.Nickname)
utils.NotNilReplace(req.UserInfo.Nickname, resp.Nickname)
return nil
}
func CallbackAfterUpdateUserInfoEx(ctx context.Context, req *pbuser.UpdateUserInfoExReq) error {

@ -168,7 +168,7 @@ func (s *userServer) UpdateUserInfoEx(ctx context.Context, req *pbuser.UpdateUse
if err != nil {
return nil, err
}
if req.UserInfo.Nickname != "" || req.UserInfo.FaceURL != "" {
if req.UserInfo.Nickname != nil || req.UserInfo.FaceURL != nil {
if err := s.groupRpcClient.NotificationUserInfoUpdate(ctx, req.UserInfo.UserID); err != nil {
log.ZError(ctx, "NotificationUserInfoUpdate", err)
}
@ -515,7 +515,7 @@ func (s *userServer) SearchNotificationAccount(ctx context.Context, req *pbuser.
return resp, nil
}
_, users, err := s.UserDatabase.Page(ctx, req.Pagination)
users, err := s.UserDatabase.FindNotification(ctx, constant.AppNotificationAdmin)
if err != nil {
return nil, err
}
@ -558,7 +558,7 @@ func (s *userServer) userModelToResp(users []*relation.UserModel) *pbuser.Search
accounts := make([]*pbuser.NotificationAccountInfo, 0)
var total int64
for _, v := range users {
if v.AppMangerLevel == constant.AppNotificationAdmin || v.AppMangerLevel == constant.AppAdmin {
if v.AppMangerLevel == constant.AppNotificationAdmin && !utils.IsContain(v.UserID, config.Config.IMAdmin.UserID) {
temp := &pbuser.NotificationAccountInfo{
UserID: v.UserID,
FaceURL: v.FaceURL,

@ -38,6 +38,9 @@ func CheckAccessV3(ctx context.Context, ownerUserID string) (err error) {
if utils.IsContain(opUserID, config.Config.Manager.UserID) {
return nil
}
if utils.IsContain(opUserID, config.Config.IMAdmin.UserID) {
return nil
}
if opUserID == ownerUserID {
return nil
}
@ -45,13 +48,16 @@ func CheckAccessV3(ctx context.Context, ownerUserID string) (err error) {
}
func IsAppManagerUid(ctx context.Context) bool {
return utils.IsContain(mcontext.GetOpUserID(ctx), config.Config.Manager.UserID)
return utils.IsContain(mcontext.GetOpUserID(ctx), config.Config.Manager.UserID) || utils.IsContain(mcontext.GetOpUserID(ctx), config.Config.IMAdmin.UserID)
}
func CheckAdmin(ctx context.Context) error {
if utils.IsContain(mcontext.GetOpUserID(ctx), config.Config.Manager.UserID) {
return nil
}
if utils.IsContain(mcontext.GetOpUserID(ctx), config.Config.IMAdmin.UserID) {
return nil
}
return errs.ErrNoPermission.Wrap(fmt.Sprintf("user %s is not admin userID", mcontext.GetOpUserID(ctx)))
}
func CheckIMAdmin(ctx context.Context) error {
@ -69,7 +75,8 @@ func ParseRedisInterfaceToken(redisToken any) (*tokenverify.Claims, error) {
}
func IsManagerUserID(opUserID string) bool {
return utils.IsContain(opUserID, config.Config.Manager.UserID)
return utils.IsContain(opUserID, config.Config.Manager.UserID) || utils.IsContain(opUserID, config.Config.IMAdmin.UserID)
}
func WsVerifyToken(token, userID string, platformID int) error {

@ -47,22 +47,22 @@ type CallbackAfterUpdateUserInfoResp struct {
type CallbackBeforeUpdateUserInfoExReq struct {
CallbackCommand `json:"callbackCommand"`
UserID string `json:"userID"`
Nickname *string `json:"nickName"`
FaceURL *string `json:"faceURL"`
Nickname *wrapperspb.StringValue `json:"nickName"`
FaceURL *wrapperspb.StringValue `json:"faceURL"`
Ex *wrapperspb.StringValue `json:"ex"`
}
type CallbackBeforeUpdateUserInfoExResp struct {
CommonCallbackResp
Nickname *string `json:"nickName"`
FaceURL *string `json:"faceURL"`
Nickname *wrapperspb.StringValue `json:"nickName"`
FaceURL *wrapperspb.StringValue `json:"faceURL"`
Ex *wrapperspb.StringValue `json:"ex"`
}
type CallbackAfterUpdateUserInfoExReq struct {
CallbackCommand `json:"callbackCommand"`
UserID string `json:"userID"`
Nickname string `json:"nickName"`
FaceURL string `json:"faceURL"`
Nickname *wrapperspb.StringValue `json:"nickName"`
FaceURL *wrapperspb.StringValue `json:"faceURL"`
Ex *wrapperspb.StringValue `json:"ex"`
}
type CallbackAfterUpdateUserInfoExResp struct {

@ -314,7 +314,7 @@ type configStruct struct {
Prometheus struct {
Enable bool `yaml:"enable"`
PrometheusUrl string `yaml:"prometheusUrl"`
GrafanaUrl string `yaml:"grafanaUrl"`
ApiPrometheusPort []int `yaml:"apiPrometheusPort"`
UserPrometheusPort []int `yaml:"userPrometheusPort"`
FriendPrometheusPort []int `yaml:"friendPrometheusPort"`

@ -79,14 +79,17 @@ func UserPb2DBMapEx(user *sdkws.UserInfoWithEx) map[string]any {
val := make(map[string]any)
// Map fields from UserInfoWithEx to val
val["nickname"] = user.Nickname
val["face_url"] = user.FaceURL
if user.Nickname != nil {
val["nickname"] = user.Nickname.Value
}
if user.FaceURL != nil {
val["face_url"] = user.FaceURL.Value
}
if user.Ex != nil {
val["ex"] = user.Ex.Value
}
if user.GlobalRecvMsgOpt != 0 {
val["global_recv_msg_opt"] = user.GlobalRecvMsgOpt
if user.GlobalRecvMsgOpt != nil {
val["global_recv_msg_opt"] = user.GlobalRecvMsgOpt.Value
}
return val

@ -197,7 +197,7 @@ func (g *groupDatabase) UpdateGroup(ctx context.Context, groupID string, data ma
func (g *groupDatabase) DismissGroup(ctx context.Context, groupID string, deleteMember bool) error {
return g.ctxTx.Transaction(ctx, func(ctx context.Context) error {
c := g.cache.NewCache()
if err := g.groupDB.UpdateState(ctx, groupID, constant.GroupStatusDismissed); err != nil {
if err := g.groupDB.UpdateStatus(ctx, groupID, constant.GroupStatusDismissed); err != nil {
return err
}
if deleteMember {

@ -40,6 +40,8 @@ type UserDatabase interface {
Find(ctx context.Context, userIDs []string) (users []*relation.UserModel, err error)
// Find userInfo By Nickname
FindByNickname(ctx context.Context, nickname string) (users []*relation.UserModel, err error)
// Find notificationAccounts
FindNotification(ctx context.Context, level int64) (users []*relation.UserModel, err error)
// Create Insert multiple external guarantees that the userID is not repeated and does not exist in the db
Create(ctx context.Context, users []*relation.UserModel) (err error)
// Update update (non-zero value) external guarantee userID exists
@ -140,6 +142,11 @@ func (u *userDatabase) FindByNickname(ctx context.Context, nickname string) (use
return u.userDB.TakeByNickname(ctx, nickname)
}
// Find notificationAccouts
func (u *userDatabase) FindNotification(ctx context.Context, level int64) (users []*relation.UserModel, err error) {
return u.userDB.TakeNotification(ctx, level)
}
// Create Insert multiple external guarantees that the userID is not repeated and does not exist in the db.
func (u *userDatabase) Create(ctx context.Context, users []*relation.UserModel) (err error) {
return u.tx.Transaction(ctx, func(ctx context.Context) error {

@ -49,8 +49,8 @@ func (g *GroupMgo) Create(ctx context.Context, groups []*relation.GroupModel) (e
return mgoutil.InsertMany(ctx, g.coll, groups)
}
func (g *GroupMgo) UpdateState(ctx context.Context, groupID string, state int32) (err error) {
return g.UpdateMap(ctx, groupID, map[string]any{"state": state})
func (g *GroupMgo) UpdateStatus(ctx context.Context, groupID string, status int32) (err error) {
return g.UpdateMap(ctx, groupID, map[string]any{"status": status})
}
func (g *GroupMgo) UpdateMap(ctx context.Context, groupID string, args map[string]any) (err error) {

@ -51,7 +51,11 @@ func (g *GroupMemberMgo) Create(ctx context.Context, groupMembers []*relation.Gr
}
func (g *GroupMemberMgo) Delete(ctx context.Context, groupID string, userIDs []string) (err error) {
return mgoutil.DeleteMany(ctx, g.coll, bson.M{"group_id": groupID, "user_id": bson.M{"$in": userIDs}})
filter := bson.M{"group_id": groupID}
if len(userIDs) > 0 {
filter["user_id"] = bson.M{"$in": userIDs}
}
return mgoutil.DeleteMany(ctx, g.coll, filter)
}
func (g *GroupMemberMgo) UpdateRoleLevel(ctx context.Context, groupID string, userID string, roleLevel int32) error {
@ -84,8 +88,8 @@ func (g *GroupMemberMgo) FindRoleLevelUserIDs(ctx context.Context, groupID strin
}
func (g *GroupMemberMgo) SearchMember(ctx context.Context, keyword string, groupID string, pagination pagination.Pagination) (total int64, groupList []*relation.GroupMemberModel, err error) {
//TODO implement me
panic("implement me")
filter := bson.M{"group_id": groupID, "nickname": bson.M{"$regex": keyword}}
return mgoutil.FindPage[*relation.GroupMemberModel](ctx, g.coll, filter, pagination)
}
func (g *GroupMemberMgo) FindUserJoinedGroupID(ctx context.Context, userID string) (groupIDs []string, err error) {

@ -65,6 +65,10 @@ func (u *UserMgo) Take(ctx context.Context, userID string) (user *relation.UserM
return mgoutil.FindOne[*relation.UserModel](ctx, u.coll, bson.M{"user_id": userID})
}
func (u *UserMgo) TakeNotification(ctx context.Context, level int64) (user []*relation.UserModel, err error) {
return mgoutil.Find[*relation.UserModel](ctx, u.coll, bson.M{"app_manger_level": level})
}
func (u *UserMgo) TakeByNickname(ctx context.Context, nickname string) (user []*relation.UserModel, err error) {
return mgoutil.Find[*relation.UserModel](ctx, u.coll, bson.M{"nickname": nickname})
}

@ -42,7 +42,7 @@ type GroupModel struct {
type GroupModelInterface interface {
Create(ctx context.Context, groups []*GroupModel) (err error)
UpdateMap(ctx context.Context, groupID string, args map[string]any) (err error)
UpdateState(ctx context.Context, groupID string, state int32) (err error)
UpdateStatus(ctx context.Context, groupID string, status int32) (err error)
Find(ctx context.Context, groupIDs []string) (groups []*GroupModel, err error)
Take(ctx context.Context, groupID string) (group *GroupModel, err error)
Search(ctx context.Context, keyword string, pagination pagination.Pagination) (total int64, groups []*GroupModel, err error)

@ -53,6 +53,7 @@ type UserModelInterface interface {
UpdateByMap(ctx context.Context, userID string, args map[string]any) (err error)
Find(ctx context.Context, userIDs []string) (users []*UserModel, err error)
Take(ctx context.Context, userID string) (user *UserModel, err error)
TakeNotification(ctx context.Context, level int64) (user []*UserModel, err error)
TakeByNickname(ctx context.Context, nickname string) (user []*UserModel, err error)
Page(ctx context.Context, pagination pagination.Pagination) (count int64, users []*UserModel, err error)
Exist(ctx context.Context, userID string) (exist bool, err error)

@ -55,14 +55,17 @@ func (cli *K8sDR) Register(serviceName, host string, port int, opts ...grpc.Dial
return nil
}
func (cli *K8sDR) UnRegister() error {
return nil
}
func (cli *K8sDR) CreateRpcRootNodes(serviceNames []string) error {
return nil
}
func (cli *K8sDR) RegisterConf2Registry(key string, conf []byte) error {
return nil
@ -123,6 +126,8 @@ func getMsgGatewayHost(ctx context.Context) []string {
log.ZInfo(ctx, "getMsgGatewayHost", "instance", instance, "selfPodName", selfPodName, "replicas", replicas, "ns", ns, "ret", ret)
return ret
}
// GetConns returns the gRPC client connections to the specified service.
func (cli *K8sDR) GetConns(ctx context.Context, serviceName string, opts ...grpc.DialOption) ([]*grpc.ClientConn, error) {
if serviceName != config.Config.RpcRegisterName.OpenImMessageGatewayName {
@ -142,6 +147,7 @@ func (cli *K8sDR) GetConns(ctx context.Context, serviceName string, opts ...grpc
return ret, nil
}
}
func (cli *K8sDR) GetConn(ctx context.Context, serviceName string, opts ...grpc.DialOption) (*grpc.ClientConn, error) {
return grpc.DialContext(ctx, serviceName, append(cli.options, opts...)...)
@ -151,9 +157,11 @@ func (cli *K8sDR) GetSelfConnTarget() string {
return cli.rpcRegisterAddr
}
func (cli *K8sDR) AddOption(opts ...grpc.DialOption) {
cli.options = append(cli.options, opts...)
}
func (cli *K8sDR) CloseConn(conn *grpc.ClientConn) {
conn.Close()
}

@ -64,6 +64,9 @@ func NewUserRpcClient(client discoveryregistry.SvcDiscoveryRegistry) UserRpcClie
// GetUsersInfo retrieves information for multiple users based on their user IDs.
func (u *UserRpcClient) GetUsersInfo(ctx context.Context, userIDs []string) ([]*sdkws.UserInfo, error) {
if len(userIDs) == 0 {
return []*sdkws.UserInfo{}, nil
}
resp, err := u.Client.GetDesignateUsers(ctx, &user.GetDesignateUsersReq{
UserIDs: userIDs,
})

@ -30,29 +30,32 @@ OPENIM_VERBOSE=4
openim::log::info "\n# Begin to check all openim service"
# OpenIM status
# Elegant printing function
# Elegant printing function
print_services_and_ports() {
service_names=("$1[@]")
service_ports=("$2[@]")
local service_names=("$@")
local half_length=$((${#service_names[@]} / 2))
local service_ports=("${service_names[@]:half_length}")
echo "+-------------------------+----------+"
echo "| Service Name | Port |"
echo "+-------------------------+----------+"
for index in "${!service_names}"; do
printf "| %-23s | %-8s |\n" "${!service_names[$index]}" "${!service_ports[$index]}"
for ((index=0; index < half_length; index++)); do
printf "| %-23s | %-8s |\n" "${service_names[$index]}" "${service_ports[$index]}"
done
echo "+-------------------------+----------+"
}
# Assuming OPENIM_SERVER_NAME_TARGETS and OPENIM_SERVER_PORT_TARGETS are defined
# Similarly for OPENIM_DEPENDENCY_TARGETS and OPENIM_DEPENDENCY_PORT_TARGETS
# Print out services and their ports
print_services_and_ports OPENIM_SERVER_NAME_TARGETS OPENIM_SERVER_PORT_TARGETS
print_services_and_ports "${OPENIM_SERVER_NAME_TARGETS[@]}" "${OPENIM_SERVER_PORT_TARGETS[@]}"
# Print out dependencies and their ports
print_services_and_ports OPENIM_DEPENDENCY_TARGETS OPENIM_DEPENDENCY_PORT_TARGETS
print_services_and_ports "${OPENIM_DEPENDENCY_TARGETS[@]}" "${OPENIM_DEPENDENCY_PORT_TARGETS[@]}"
# OpenIM check
echo "++ The port being checked: ${OPENIM_SERVER_PORT_LISTARIES[@]}"
@ -89,4 +92,4 @@ else
echo "++++ Check all openim service ports successfully !"
fi
set -e
set -e

@ -43,10 +43,20 @@ fi
"${OPENIM_ROOT}"/scripts/init-config.sh
pushd "${OPENIM_ROOT}"
${DOCKER_COMPOSE_COMMAND} stop
curl https://gitee.com/openimsdk/openim-docker/raw/main/example/full-openim-server-and-chat.yml -o docker-compose.yml
curl https://raw.githubusercontent.com/openimsdk/openim-docker/main/docker-compose.yaml -o docker-compose.yml
${DOCKER_COMPOSE_COMMAND} up -d
sleep 60
# Wait for a short period to allow containers to initialize
sleep 30
# Check the status of the containers
if ! ${DOCKER_COMPOSE_COMMAND} ps | grep -q 'Up'; then
echo "Error: One or more docker containers failed to start."
${DOCKER_COMPOSE_COMMAND} logs
fi
sleep 30 # Keep the original 60-second wait, adjusted for the 10-second check above
${DOCKER_COMPOSE_COMMAND} logs openim-server
${DOCKER_COMPOSE_COMMAND} ps
popd
popd

@ -285,7 +285,6 @@ readonly ALERTMANAGER_SEND_RESOLVED=${ALERTMANAGER_SEND_RESOLVED:-"{SEND_RESOLVE
###################### Grafana 配置信息 ######################
def "GRAFANA_PORT" "13000" # Grafana的端口
def "GRAFANA_ADDRESS" "${DOCKER_BRIDGE_GATEWAY}" # Grafana的地址
###################### RPC Port Configuration Variables ######################
# For launching multiple programs, just fill in multiple ports separated by commas
# For example:
@ -378,8 +377,8 @@ def "CALLBACK_TIMEOUT" "5" # 最长超时时间
def "CALLBACK_FAILED_CONTINUE" "true" # 失败后是否继续
###################### Prometheus 配置信息 ######################
# 是否启用 Prometheus
readonly PROMETHEUS_ENABLE=${PROMETHEUS_ENABLE:-'false'}
def "PROMETHEUS_URL" "${GRAFANA_ADDRESS}:${GRAFANA_PORT}"
readonly PROMETHEUS_ENABLE=${PROMETHEUS_ENABLE:-'true'}
readonly GRAFANA_URL=${GRAFANA_URL:-"http://${OPENIM_IP}:${GRAFANA_PORT}/"}
# Api 服务的 Prometheus 端口
readonly API_PROM_PORT=${API_PROM_PORT:-'20100'}
# User 服务的 Prometheus 端口

@ -223,8 +223,8 @@ func checkMinio() (string, error) {
defer cancel()
if minioClient.IsOffline() {
str := fmt.Sprintf("Minio server is offline;%s", str)
return "", ErrComponentStart.Wrap(str)
// str := fmt.Sprintf("Minio server is offline;%s", str)
// return "", ErrComponentStart.Wrap(str)
}
// Check for localhost in API URL and Minio SignEndpoint

Loading…
Cancel
Save