* refactor: refactor workflows contents.

* add tool workflows.

* update field.

* fix: remove chat error.

* Fix err.

* fix error.

* remove cn comment.

* update workflows files.

* update infra config.

* move workflows.

* feat: update bot.

* fix: solve uncorrect outdated msg get.

* update get docIDs logic.

* update

* update skip logic.

* fix

* update.

* fix: delay deleteObject func.

* remove unused content.

* update log type.

* feat: implement request batch count limit.

* update

* update

* feat: add rocksTimeout

* feat: wrap logs

* feat: add logs

* feat: listen config

* feat: enable listen TIME_WAIT port

* feat: add logs

* feat: cache batch

* chore: enable fullUserCache

* feat: push rpc num

* feat: push err

* feat: with operationID

* feat: sleep

* feat: change 1s

* feat: change log

* feat: implement Getbatch in rpcCache.

* feat: print getOnline cost

* feat: change log

* feat: change kafka and push config

* feat: del interface

* feat: fix err

* feat: change config

* feat: go mod

* feat: change config

* feat: change config

* feat: add sleep in push

* feat: warn logs

* feat: logs

* feat: logs

* feat: change port

* feat: start config

* feat: remove port reuse

* feat: prometheus config

* feat: prometheus config

* feat: prometheus config

* feat: add long time send msg to grafana

* feat: init

* feat: init

* feat: implement offline push.

* feat: batch get user online

* feat: implement batch Push spilt

* update go mod

* Revert "feat: change port"

This reverts commit 06d5e944

* feat: change port

* feat: change config

* feat: implement kafka producer and consumer.

* update format,

* add PushMQ log.

* feat: get all online users and init push

* feat: lock in online cache

* feat: config

* fix: init online status

* fix: add logs

* fix: userIDs

* fix: add logs

* feat: update Handler logic.

* update MQ logic.

* update

* update

* fix: method name

* fix: update OfflinePushConsumerHandler.

* fix: prommetrics

* fix: add logs

* fix: ctx

* fix: log

* fix: config

* feat: change port

* fix: atomic online cache status

---------

Co-authored-by: Monet Lee <monet_lee@163.com>
pull/2617/head
icey-yu 2 weeks ago committed by GitHub
parent b703449615
commit 0276c7df60
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

File diff suppressed because it is too large Load Diff

@ -11,6 +11,6 @@ prometheus:
# Whether to enable prometheus
enable: true
# Prometheus listening ports, must match the number of api.ports
ports: [ 20502 ]
ports: [ 12002 ]
# This address can be accessed via a browser
grafanaURL: http://127.0.0.1:13000/

@ -2,13 +2,13 @@ rpc:
# The IP address where this RPC service registers itself; if left blank, it defaults to the internal network IP
registerIP:
# List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports
ports: [ 10140 ]
ports: [ 10140, 10141, 10142, 10143, 10144, 10145, 10146, 10147, 10148, 10149, 10150, 10151, 10152, 10153, 10154, 10155 ]
prometheus:
# Enable or disable Prometheus monitoring
enable: true
# List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup
ports: [ 20640 ]
ports: [ 12140, 12141, 12142, 12143, 12144, 12145, 12146, 12147, 12148, 12149, 12150, 12151, 12152, 12153, 12154, 12155 ]
# IP address that the RPC/WebSocket service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP
listenIP: 0.0.0.0
@ -25,6 +25,3 @@ longConnSvr:
# 1: For Android, iOS, Windows, Mac, and web platforms, only one instance can be online at a time
multiLoginPolicy: 1

@ -3,4 +3,4 @@ prometheus:
enable: true
# List of ports that Prometheus listens on; each port corresponds to an instance of monitoring. Ensure these are managed accordingly
# Because four instances have been launched, four ports need to be specified
ports: [ 20600, 20601, 20602, 20603 ]
ports: [ 12020, 12021, 12022, 12023, 12024, 12025, 12026, 12027 ]

@ -1,16 +1,16 @@
rpc:
# The IP address where this RPC service registers itself; if left blank, it defaults to the internal network IP
registerIP:
registerIP:
# IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP
listenIP: 0.0.0.0
# List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports
ports: [ 10170, 10171, 10172, 10173 ]
ports: [ 10170, 10171, 10172, 10173, 10174, 10175, 10176, 10177, 10178, 10179, 10180, 10181, 10182, 10183, 10184, 10185 ]
prometheus:
# Enable or disable Prometheus monitoring
enable: true
# List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup
ports: [ 20670, 20671, 20672, 20673 ]
ports: [ 12170, 12171, 12172, 12173, 12174, 12175, 12176, 12177, 12178, 12179, 12180, 12181, 12182, 12183, 12184, 12185 ]
maxConcurrentWorkers: 3
#Use geTui for offline push notifications, or choose fcm or jpns; corresponding configuration settings must be specified.
@ -38,9 +38,4 @@ iosPush:
badgeCount: true
production: false
fullUserCache: true

@ -4,15 +4,14 @@ rpc:
# IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP
listenIP: 0.0.0.0
# List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports
ports: [ 10160 ]
ports: [ 10200 ]
prometheus:
# Enable or disable Prometheus monitoring
enable: true
# List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup
ports: [ 20660 ]
ports: [ 12200 ]
tokenPolicy:
# Token validity period, in days
expire: 90

@ -4,10 +4,10 @@ rpc:
# IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP
listenIP: 0.0.0.0
# List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports
ports: [ 10180 ]
ports: [ 10220 ]
prometheus:
# Enable or disable Prometheus monitoring
enable: true
# List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup
ports: [ 20680 ]
ports: [ 12220 ]

@ -4,10 +4,10 @@ rpc:
# IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP
listenIP: 0.0.0.0
# List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports
ports: [ 10120 ]
ports: [ 10240 ]
prometheus:
# Enable or disable Prometheus monitoring
enable: true
# List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup
ports: [ 20620 ]
ports: [ 12240 ]

@ -4,13 +4,13 @@ rpc:
# IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP
listenIP: 0.0.0.0
# List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports
ports: [ 10150 ]
ports: [ 10260 ]
prometheus:
# Enable or disable Prometheus monitoring
enable: true
# List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup
ports: [ 20650 ]
ports: [ 12260 ]
enableHistoryForNewMembers: true
enableHistoryForNewMembers: true

@ -4,17 +4,14 @@ rpc:
# IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP
listenIP: 0.0.0.0
# List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports
ports: [ 10130 ]
ports: [ 10280 ]
prometheus:
# Enable or disable Prometheus monitoring
enable: true
# List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup
ports: [ 20630 ]
ports: [ 12280 ]
# Does sending messages require friend verification
friendVerify: false

@ -4,13 +4,13 @@ rpc:
# IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP
listenIP: 0.0.0.0
# List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports
ports: [ 10190 ]
ports: [ 10300 ]
prometheus:
# Enable or disable Prometheus monitoring
enable: true
# List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup
ports: [ 20690 ]
ports: [ 12300 ]
object:
@ -37,4 +37,4 @@ object:
accessKeyID:
accessKeySecret:
sessionToken:
publicRead: false
publicRead: false

@ -4,14 +4,10 @@ rpc:
# Listening IP; 0.0.0.0 means both internal and external IPs are listened to, if blank, the internal network IP is automatically obtained by default
listenIP: 0.0.0.0
# Listening ports; if multiple are configured, multiple instances will be launched, and must be consistent with the number of prometheus.ports
ports: [ 10110 ]
ports: [ 10320 ]
prometheus:
# Whether to enable prometheus
enable: true
# Prometheus listening ports, must be consistent with the number of rpc.ports
ports: [ 20610 ]
ports: [ 12320 ]

@ -28,56 +28,59 @@ scrape_configs:
- targets: [ internal_ip:20500 ]
- job_name: openimserver-openim-api
static_configs:
- targets: [ internal_ip:20502 ]
- targets: [ internal_ip:12002 ]
labels:
namespace: default
- job_name: openimserver-openim-msggateway
static_configs:
- targets: [ internal_ip:20640 ]
- targets: [ internal_ip:12140 ]
# - targets: [ internal_ip:12140, internal_ip:12141, internal_ip:12142, internal_ip:12143, internal_ip:12144, internal_ip:12145, internal_ip:12146, internal_ip:12147, internal_ip:12148, internal_ip:12149, internal_ip:12150, internal_ip:12151, internal_ip:12152, internal_ip:12153, internal_ip:12154, internal_ip:12155 ]
labels:
namespace: default
- job_name: openimserver-openim-msgtransfer
static_configs:
- targets: [ internal_ip:20600, internal_ip:20601, internal_ip:20602, internal_ip:20603 ]
- targets: [ internal_ip:12020, internal_ip:12021, internal_ip:12022, internal_ip:12023, internal_ip:12024, internal_ip:12025, internal_ip:12026, internal_ip:12027 ]
# - targets: [ internal_ip:12020, internal_ip:12021, internal_ip:12022, internal_ip:12023, internal_ip:12024, internal_ip:12025, internal_ip:12026, internal_ip:12027, internal_ip:12028, internal_ip:12029, internal_ip:12030, internal_ip:12031, internal_ip:12032, internal_ip:12033, internal_ip:12034, internal_ip:12035 ]
labels:
namespace: default
- job_name: openimserver-openim-push
static_configs:
- targets: [ internal_ip:20670, internal_ip:20671, internal_ip:20672, internal_ip:20673]
- targets: [ internal_ip:12170, internal_ip:12171, internal_ip:12172, internal_ip:12173, internal_ip:12174, internal_ip:12175, internal_ip:12176, internal_ip:12177 ]
# - targets: [ internal_ip:12170, internal_ip:12171, internal_ip:12172, internal_ip:12173, internal_ip:12174, internal_ip:12175, internal_ip:12176, internal_ip:12177, internal_ip:12178, internal_ip:12179, internal_ip:12180, internal_ip:12181, internal_ip:12182, internal_ip:12183, internal_ip:12184, internal_ip:12185 ]
labels:
namespace: default
- job_name: openimserver-openim-rpc-auth
static_configs:
- targets: [ internal_ip:20600 ]
- targets: [ internal_ip:12200 ]
labels:
namespace: default
- job_name: openimserver-openim-rpc-conversation
static_configs:
- targets: [ internal_ip:20680 ]
- targets: [ internal_ip:12220 ]
labels:
namespace: default
- job_name: openimserver-openim-rpc-friend
static_configs:
- targets: [ internal_ip:20620 ]
- targets: [ internal_ip:12240 ]
labels:
namespace: default
- job_name: openimserver-openim-rpc-group
static_configs:
- targets: [ internal_ip:20650 ]
- targets: [ internal_ip:12260 ]
labels:
namespace: default
- job_name: openimserver-openim-rpc-msg
static_configs:
- targets: [ internal_ip:20630 ]
- targets: [ internal_ip:12280 ]
labels:
namespace: default
- job_name: openimserver-openim-rpc-third
static_configs:
- targets: [ internal_ip:20690 ]
- targets: [ internal_ip:12300 ]
labels:
namespace: default
- job_name: openimserver-openim-rpc-user
static_configs:
- targets: [ internal_ip:20610 ]
- targets: [ internal_ip:12320 ]
labels:
namespace: default

@ -186,4 +186,3 @@ services:
# networks:
# - openim

@ -12,8 +12,8 @@ require (
github.com/gorilla/websocket v1.5.1
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
github.com/mitchellh/mapstructure v1.5.0
github.com/openimsdk/protocol v0.0.72-alpha.17
github.com/openimsdk/tools v0.0.50-alpha.11
github.com/openimsdk/protocol v0.0.72-alpha.18
github.com/openimsdk/tools v0.0.50-alpha.12
github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_golang v1.18.0
github.com/stretchr/testify v1.9.0

@ -336,10 +336,10 @@ github.com/onsi/gomega v1.25.0 h1:Vw7br2PCDYijJHSfBOWhov+8cAnUf8MfMaIOV323l6Y=
github.com/onsi/gomega v1.25.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM=
github.com/openimsdk/gomake v0.0.14-alpha.5 h1:VY9c5x515lTfmdhhPjMvR3BBRrRquAUCFsz7t7vbv7Y=
github.com/openimsdk/gomake v0.0.14-alpha.5/go.mod h1:PndCozNc2IsQIciyn9mvEblYWZwJmAI+06z94EY+csI=
github.com/openimsdk/protocol v0.0.72-alpha.17 h1:kB7eyjJHdkc8lpSlLIHskHzbodxkIG4eaK908iQLVdI=
github.com/openimsdk/protocol v0.0.72-alpha.17/go.mod h1:OZQA9FR55lseYoN2Ql1XAHYKHJGu7OMNkUbuekrKCM8=
github.com/openimsdk/tools v0.0.50-alpha.11 h1:ClhkRjUVJWbmOiQ14G6do/ES1a6ZueDITv40Apwq/Tc=
github.com/openimsdk/tools v0.0.50-alpha.11/go.mod h1:h1cYmfyaVtgFbKmb1Cfsl8XwUOMTt8ubVUQrdGtsUh4=
github.com/openimsdk/protocol v0.0.72-alpha.18 h1:EytTtgZuXMG1cgTlJryqXXSO1J3t3wrLIn3Os2PRBEE=
github.com/openimsdk/protocol v0.0.72-alpha.18/go.mod h1:OZQA9FR55lseYoN2Ql1XAHYKHJGu7OMNkUbuekrKCM8=
github.com/openimsdk/tools v0.0.50-alpha.12 h1:rV3BxgqN+F79vZvdoQ+97Eob8ScsRVEM8D+Wrcl23uo=
github.com/openimsdk/tools v0.0.50-alpha.12/go.mod h1:h1cYmfyaVtgFbKmb1Cfsl8XwUOMTt8ubVUQrdGtsUh4=
github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4=
github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=

@ -58,7 +58,7 @@ func Start(ctx context.Context, index int, conf *Config) error {
)
hubServer := NewServer(rpcPort, longServer, conf, func(srv *Server) error {
longServer.online = rpccache.NewOnlineCache(srv.userRcp, nil, rdb, longServer.subscriberUserOnlineStatusChanges)
longServer.online, _ = rpccache.NewOnlineCache(srv.userRcp, nil, rdb, false, longServer.subscriberUserOnlineStatusChanges)
return nil
})

@ -265,7 +265,7 @@ func (ws *WsServer) registerClient(client *Client) {
if clientOK {
ws.clients.Set(client.UserID, client)
// There is already a connection to the platform
log.ZInfo(client.ctx, "repeat login", "userID", client.UserID, "platformID",
log.ZDebug(client.ctx, "repeat login", "userID", client.UserID, "platformID",
client.PlatformID, "old remote addr", getRemoteAdders(oldClients))
ws.onlineUserConnNum.Add(1)
} else {
@ -293,7 +293,7 @@ func (ws *WsServer) registerClient(client *Client) {
wg.Wait()
log.ZInfo(
log.ZDebug(
client.ctx,
"user online",
"online user Num",
@ -360,7 +360,7 @@ func (ws *WsServer) unregisterClient(client *Client) {
ws.onlineUserConnNum.Add(-1)
ws.subscription.DelClient(client)
//ws.SetUserOnlineStatus(client.ctx, client, constant.Offline)
log.ZInfo(client.ctx, "user offline", "close reason", client.closedErr, "online user Num",
log.ZDebug(client.ctx, "user offline", "close reason", client.closedErr, "online user Num",
ws.onlineUserNum.Load(), "online user conn Num",
ws.onlineUserConnNum.Load(),
)

@ -111,7 +111,7 @@ func Start(ctx context.Context, index int, config *Config) error {
if err != nil {
return err
}
msgTransfer := &MsgTransfer{
historyCH: historyCH,
historyMongoCH: historyMongoCH,

@ -238,6 +238,7 @@ func (och *OnlineHistoryRedisConsumerHandler) categorizeMessageLists(totalMsgs [
}
func (och *OnlineHistoryRedisConsumerHandler) handleMsg(ctx context.Context, key, conversationID string, storageList, notStorageList []*ContextMsg) {
log.ZInfo(ctx, "handle storage msg")
for _, storageMsg := range storageList {
log.ZDebug(ctx, "handle storage msg", "msg", storageMsg.message.String())
}
@ -254,16 +255,20 @@ func (och *OnlineHistoryRedisConsumerHandler) handleMsg(ctx context.Context, key
log.ZError(ctx, "batch data insert to redis err", err, "storageMsgList", storageMessageList)
return
}
log.ZInfo(ctx, "BatchInsertChat2Cache end")
if isNewConversation {
switch msg.SessionType {
case constant.ReadGroupChatType:
log.ZInfo(ctx, "group chat first create conversation", "conversationID",
log.ZDebug(ctx, "group chat first create conversation", "conversationID",
conversationID)
userIDs, err := och.groupRpcClient.GetGroupMemberIDs(ctx, msg.GroupID)
if err != nil {
log.ZWarn(ctx, "get group member ids error", err, "conversationID",
conversationID)
} else {
log.ZInfo(ctx, "GetGroupMemberIDs end")
if err := och.conversationRpcClient.GroupChatFirstCreateConversation(ctx,
msg.GroupID, userIDs); err != nil {
log.ZWarn(ctx, "single chat first create conversation error", err,
@ -282,13 +287,16 @@ func (och *OnlineHistoryRedisConsumerHandler) handleMsg(ctx context.Context, key
}
}
log.ZDebug(ctx, "success incr to next topic")
log.ZInfo(ctx, "success incr to next topic")
err = och.msgTransferDatabase.MsgToMongoMQ(ctx, key, conversationID, storageMessageList, lastSeq)
if err != nil {
log.ZError(ctx, "Msg To MongoDB MQ error", err, "conversationID",
conversationID, "storageList", storageMessageList, "lastSeq", lastSeq)
}
log.ZInfo(ctx, "MsgToMongoMQ end")
och.toPushTopic(ctx, key, conversationID, storageList)
log.ZInfo(ctx, "toPushTopic end")
}
}
@ -319,7 +327,7 @@ func (och *OnlineHistoryRedisConsumerHandler) handleNotification(ctx context.Con
func (och *OnlineHistoryRedisConsumerHandler) toPushTopic(ctx context.Context, key, conversationID string, msgs []*ContextMsg) {
for _, v := range msgs {
log.ZDebug(ctx, "push msg to topic", "msg", v.message.String())
och.msgTransferDatabase.MsgToPushMQ(v.ctx, key, conversationID, v.message)
_, _, _ = och.msgTransferDatabase.MsgToPushMQ(v.ctx, key, conversationID, v.message)
}
}
@ -344,7 +352,7 @@ func (och *OnlineHistoryRedisConsumerHandler) Cleanup(_ sarama.ConsumerGroupSess
func (och *OnlineHistoryRedisConsumerHandler) ConsumeClaim(session sarama.ConsumerGroupSession,
claim sarama.ConsumerGroupClaim) error { // a instance in the consumer group
log.ZInfo(context.Background(), "online new session msg come", "highWaterMarkOffset",
log.ZDebug(context.Background(), "online new session msg come", "highWaterMarkOffset",
claim.HighWaterMarkOffset(), "topic", claim.Topic(), "partition", claim.Partition())
och.redisMessageBatches.OnComplete = func(lastMessage *sarama.ConsumerMessage, totalCount int) {
session.MarkMessage(lastMessage, "")

@ -57,7 +57,7 @@ func (mc *OnlineHistoryMongoConsumerHandler) handleChatWs2Mongo(ctx context.Cont
log.ZError(ctx, "msgFromMQ.MsgData is empty", nil, "cMsg", cMsg)
return
}
log.ZInfo(ctx, "mongo consumer recv msg", "msgs", msgFromMQ.String())
log.ZDebug(ctx, "mongo consumer recv msg", "msgs", msgFromMQ.String())
err = mc.msgTransferDatabase.BatchInsertChat2DB(ctx, msgFromMQ.ConversationID, msgFromMQ.MsgData, msgFromMQ.LastSeq)
if err != nil {
log.ZError(

@ -28,6 +28,6 @@ type Dummy struct {
}
func (d *Dummy) Push(ctx context.Context, userIDs []string, title, content string, opts *options.Opts) error {
log.ZInfo(ctx, "dummy push")
log.ZDebug(ctx, "dummy push")
return nil
}

@ -23,7 +23,6 @@ import (
"time"
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/tools/errs"
@ -100,7 +99,6 @@ func (g *Client) Push(ctx context.Context, userIDs []string, title, content stri
if err = g.batchPush(ctx, token, userIDs[i:end], pushReq); err != nil {
log.ZError(ctx, "batchPush failed", err, "index", index, "token", token, "req", pushReq)
}
}
if err = g.batchPush(ctx, token, userIDs, pushReq); err != nil {
log.ZError(ctx, "batchPush failed", err, "index", index, "token", token, "req", pushReq)

@ -63,7 +63,7 @@ func (o *OfflinePushConsumerHandler) handleMsg2OfflinePush(ctx context.Context,
}
}
func (c *OfflinePushConsumerHandler) getOfflinePushInfos(msg *sdkws.MsgData) (title, content string, opts *options.Opts, err error) {
func (o *OfflinePushConsumerHandler) getOfflinePushInfos(msg *sdkws.MsgData) (title, content string, opts *options.Opts, err error) {
type AtTextElem struct {
Text string `json:"text,omitempty"`
AtUserList []string `json:"atUserList,omitempty"`
@ -108,12 +108,12 @@ func (c *OfflinePushConsumerHandler) getOfflinePushInfos(msg *sdkws.MsgData) (ti
return
}
func (c *OfflinePushConsumerHandler) offlinePushMsg(ctx context.Context, msg *sdkws.MsgData, offlinePushUserIDs []string) error {
title, content, opts, err := c.getOfflinePushInfos(msg)
func (o *OfflinePushConsumerHandler) offlinePushMsg(ctx context.Context, msg *sdkws.MsgData, offlinePushUserIDs []string) error {
title, content, opts, err := o.getOfflinePushInfos(msg)
if err != nil {
return err
}
err = c.offlinePusher.Push(ctx, offlinePushUserIDs, title, content, opts)
err = o.offlinePusher.Push(ctx, offlinePushUserIDs, title, content, opts)
if err != nil {
prommetrics.MsgOfflinePushFailedCounter.Inc()
return err

@ -27,12 +27,12 @@ func newEmptyOnlinePusher() *emptyOnlinePusher {
func (emptyOnlinePusher) GetConnsAndOnlinePush(ctx context.Context, msg *sdkws.MsgData,
pushToUserIDs []string) (wsResults []*msggateway.SingleMsgToUserResults, err error) {
log.ZWarn(ctx, "emptyOnlinePusher GetConnsAndOnlinePush", nil)
log.ZInfo(ctx, "emptyOnlinePusher GetConnsAndOnlinePush", nil)
return nil, nil
}
func (u emptyOnlinePusher) GetOnlinePushFailedUserIDs(ctx context.Context, msg *sdkws.MsgData,
wsResults []*msggateway.SingleMsgToUserResults, pushToUserIDs *[]string) []string {
log.ZWarn(ctx, "emptyOnlinePusher GetOnlinePushFailedUserIDs", nil)
log.ZInfo(ctx, "emptyOnlinePusher GetOnlinePushFailedUserIDs", nil)
return nil
}

@ -27,6 +27,9 @@ import (
"github.com/openimsdk/tools/utils/timeutil"
"github.com/redis/go-redis/v9"
"google.golang.org/protobuf/proto"
"math/rand"
"strconv"
"time"
)
type ConsumerHandler struct {
@ -55,6 +58,7 @@ func NewConsumerHandler(config *Config, database controller.PushDatabase, offlin
}
userRpcClient := rpcclient.NewUserRpcClient(client, config.Share.RpcRegisterName.User, config.Share.IMAdminUserID)
consumerHandler.offlinePusher = offlinePusher
consumerHandler.onlinePusher = NewOnlinePusher(client, config)
consumerHandler.groupRpcClient = rpcclient.NewGroupRpcClient(client, config.Share.RpcRegisterName.Group)
@ -65,7 +69,10 @@ func NewConsumerHandler(config *Config, database controller.PushDatabase, offlin
consumerHandler.webhookClient = webhook.NewWebhookClient(config.WebhooksConfig.URL)
consumerHandler.config = config
consumerHandler.pushDatabase = database
consumerHandler.onlineCache = rpccache.NewOnlineCache(userRpcClient, consumerHandler.groupLocalCache, rdb, nil)
consumerHandler.onlineCache, err = rpccache.NewOnlineCache(userRpcClient, consumerHandler.groupLocalCache, rdb, config.RpcConfig.FullUserCache, nil)
if err != nil {
return nil, err
}
return &consumerHandler, nil
}
@ -108,6 +115,14 @@ func (*ConsumerHandler) Setup(sarama.ConsumerGroupSession) error { return nil }
func (*ConsumerHandler) Cleanup(sarama.ConsumerGroupSession) error { return nil }
func (c *ConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error {
c.onlineCache.Lock.Lock()
for c.onlineCache.CurrentPhase.Load() < rpccache.DoSubscribeOver {
c.onlineCache.Cond.Wait()
}
c.onlineCache.Lock.Unlock()
ctx := mcontext.SetOperationID(context.TODO(), strconv.FormatInt(time.Now().UnixNano()+int64(rand.Uint32()), 10))
log.ZInfo(ctx, "begin consume messages")
for msg := range claim.Messages() {
ctx := c.pushConsumerGroup.GetContextFromMsg(msg)
c.handleMs2PsChat(ctx, msg.Value)
@ -118,20 +133,27 @@ func (c *ConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSession, claim s
// Push2User Suitable for two types of conversations, one is SingleChatType and the other is NotificationChatType.
func (c *ConsumerHandler) Push2User(ctx context.Context, userIDs []string, msg *sdkws.MsgData) (err error) {
log.ZDebug(ctx, "Get msg from msg_transfer And push msg", "userIDs", userIDs, "msg", msg.String())
log.ZInfo(ctx, "Get msg from msg_transfer And push msg", "userIDs", userIDs, "msg", msg.String())
defer func(duration time.Time) {
t := time.Since(duration)
log.ZInfo(ctx, "Get msg from msg_transfer And push msg", "msg", msg.String(), "time cost", t)
}(time.Now())
if err := c.webhookBeforeOnlinePush(ctx, &c.config.WebhooksConfig.BeforeOnlinePush, userIDs, msg); err != nil {
return err
}
log.ZInfo(ctx, "webhookBeforeOnlinePush end")
wsResults, err := c.GetConnsAndOnlinePush(ctx, msg, userIDs)
if err != nil {
return err
}
log.ZDebug(ctx, "single and notification push result", "result", wsResults, "msg", msg, "push_to_userID", userIDs)
log.ZInfo(ctx, "single and notification push result", "result", wsResults, "msg", msg, "push_to_userID", userIDs)
if !c.shouldPushOffline(ctx, msg) {
return nil
}
log.ZInfo(ctx, "shouldPushOffline end")
for _, v := range wsResults {
//message sender do not need offline push
@ -150,7 +172,7 @@ func (c *ConsumerHandler) Push2User(ctx context.Context, userIDs []string, msg *
offlinePushUserID, msg, nil); err != nil {
return err
}
log.ZInfo(ctx, "webhookBeforeOfflinePush end")
err = c.offlinePushMsg(ctx, msg, offlinePushUserID)
if err != nil {
log.ZWarn(ctx, "offlinePushMsg failed", err, "offlinePushUserID", offlinePushUserID, "msg", msg)
@ -172,21 +194,11 @@ func (c *ConsumerHandler) shouldPushOffline(_ context.Context, msg *sdkws.MsgDat
}
func (c *ConsumerHandler) GetConnsAndOnlinePush(ctx context.Context, msg *sdkws.MsgData, pushToUserIDs []string) ([]*msggateway.SingleMsgToUserResults, error) {
var (
onlineUserIDs []string
offlineUserIDs []string
)
for _, userID := range pushToUserIDs {
online, err := c.onlineCache.GetUserOnline(ctx, userID)
if err != nil {
return nil, err
}
if online {
onlineUserIDs = append(onlineUserIDs, userID)
} else {
offlineUserIDs = append(offlineUserIDs, userID)
}
onlineUserIDs, offlineUserIDs, err := c.onlineCache.GetUsersOnline(ctx, pushToUserIDs)
if err != nil {
return nil, err
}
log.ZDebug(ctx, "GetConnsAndOnlinePush online cache", "sendID", msg.SendID, "recvID", msg.RecvID, "groupID", msg.GroupID, "sessionType", msg.SessionType, "clientMsgID", msg.ClientMsgID, "serverMsgID", msg.ServerMsgID, "offlineUserIDs", offlineUserIDs, "onlineUserIDs", onlineUserIDs)
var result []*msggateway.SingleMsgToUserResults
if len(onlineUserIDs) > 0 {
@ -205,35 +217,42 @@ func (c *ConsumerHandler) GetConnsAndOnlinePush(ctx context.Context, msg *sdkws.
}
func (c *ConsumerHandler) Push2Group(ctx context.Context, groupID string, msg *sdkws.MsgData) (err error) {
log.ZDebug(ctx, "Get group msg from msg_transfer and push msg", "msg", msg.String(), "groupID", groupID)
log.ZInfo(ctx, "Get group msg from msg_transfer and push msg", "msg", msg.String(), "groupID", groupID)
defer func(duration time.Time) {
t := time.Since(duration)
log.ZInfo(ctx, "Get group msg from msg_transfer and push msg end", "msg", msg.String(), "groupID", groupID, "time cost", t)
}(time.Now())
var pushToUserIDs []string
if err = c.webhookBeforeGroupOnlinePush(ctx, &c.config.WebhooksConfig.BeforeGroupOnlinePush, groupID, msg,
&pushToUserIDs); err != nil {
return err
}
log.ZInfo(ctx, "webhookBeforeGroupOnlinePush end")
err = c.groupMessagesHandler(ctx, groupID, &pushToUserIDs, msg)
if err != nil {
return err
}
log.ZInfo(ctx, "groupMessagesHandler end")
wsResults, err := c.GetConnsAndOnlinePush(ctx, msg, pushToUserIDs)
if err != nil {
return err
}
log.ZDebug(ctx, "group push result", "result", wsResults, "msg", msg)
log.ZInfo(ctx, "group push result", "result", wsResults, "msg", msg)
if !c.shouldPushOffline(ctx, msg) {
return nil
}
needOfflinePushUserIDs := c.onlinePusher.GetOnlinePushFailedUserIDs(ctx, msg, wsResults, &pushToUserIDs)
log.ZInfo(ctx, "GetOnlinePushFailedUserIDs end")
//filter some user, like don not disturb or don't need offline push etc.
needOfflinePushUserIDs, err = c.filterGroupMessageOfflinePush(ctx, groupID, msg, needOfflinePushUserIDs)
if err != nil {
return err
}
log.ZInfo(ctx, "filterGroupMessageOfflinePush end")
// Use offline push messaging
if len(needOfflinePushUserIDs) > 0 {
@ -295,7 +314,7 @@ func (c *ConsumerHandler) groupMessagesHandler(ctx context.Context, groupID stri
if unmarshalNotificationElem(msg.Content, &tips) != nil {
return err
}
log.ZInfo(ctx, "GroupDismissedNotificationInfo****", "groupID", groupID, "num", len(*pushToUserIDs), "list", pushToUserIDs)
log.ZDebug(ctx, "GroupDismissedNotificationInfo****", "groupID", groupID, "num", len(*pushToUserIDs), "list", pushToUserIDs)
if len(c.config.Share.IMAdminUserID) > 0 {
ctx = mcontext.WithOpUserIDContext(ctx, c.config.Share.IMAdminUserID[0])
}
@ -379,6 +398,7 @@ func (c *ConsumerHandler) getOfflinePushInfos(msg *sdkws.MsgData) (title, conten
}
return
}
func (c *ConsumerHandler) DeleteMemberAndSetConversationSeq(ctx context.Context, groupID string, userIDs []string) error {
conversationID := msgprocessor.GetConversationIDBySessionType(constant.ReadGroupChatType, groupID)
maxSeq, err := c.msgRpcClient.GetConversationMaxSeq(ctx, conversationID)
@ -387,6 +407,7 @@ func (c *ConsumerHandler) DeleteMemberAndSetConversationSeq(ctx context.Context,
}
return c.conversationRpcClient.SetConversationMaxSeq(ctx, userIDs, conversationID, maxSeq)
}
func unmarshalNotificationElem(bytes []byte, t any) error {
var notification sdkws.NotificationElem
if err := json.Unmarshal(bytes, &notification); err != nil {

@ -67,7 +67,7 @@ func (m *msgServer) ClearMsg(ctx context.Context, req *msg.ClearMsgReq) (_ *msg.
return nil, err
}
log.ZInfo(ctx, "clearing message", "docNum", docNum, "msgNum", msgNum, "cost", time.Since(start))
log.ZDebug(ctx, "clearing message", "docNum", docNum, "msgNum", msgNum, "cost", time.Since(start))
return &msg.ClearMsgResp{}, nil
}

@ -2,6 +2,8 @@ package user
import (
"context"
"github.com/openimsdk/tools/utils/datautil"
"github.com/openimsdk/protocol/constant"
pbuser "github.com/openimsdk/protocol/user"
)
@ -80,3 +82,22 @@ func (s *userServer) SetUserOnlineStatus(ctx context.Context, req *pbuser.SetUse
}
return &pbuser.SetUserOnlineStatusResp{}, nil
}
func (s *userServer) GetAllOnlineUsers(ctx context.Context, req *pbuser.GetAllOnlineUsersReq) (*pbuser.GetAllOnlineUsersResp, error) {
resMap, nextCursor, err := s.online.GetAllOnlineUsers(ctx, req.Cursor)
if err != nil {
return nil, err
}
resp := &pbuser.GetAllOnlineUsersResp{
StatusList: make([]*pbuser.OnlineStatus, 0, len(resMap)),
NextCursor: nextCursor,
}
for userID, plats := range resMap {
resp.StatusList = append(resp.StatusList, &pbuser.OnlineStatus{
UserID: userID,
Status: int32(datautil.If(len(plats) > 0, constant.Online, constant.Offline)),
PlatformIDs: plats,
})
}
return resp, nil
}

@ -79,13 +79,13 @@ func Start(ctx context.Context, config *CronTaskConfig) error {
now := time.Now()
deltime := now.Add(-time.Hour * 24 * time.Duration(config.CronTask.RetainChatRecords))
ctx := mcontext.SetOperationID(ctx, fmt.Sprintf("cron_%d_%d", os.Getpid(), deltime.UnixMilli()))
log.ZInfo(ctx, "clear chat records", "deltime", deltime, "timestamp", deltime.UnixMilli())
log.ZDebug(ctx, "clear chat records", "deltime", deltime, "timestamp", deltime.UnixMilli())
if _, err := msgClient.ClearMsg(ctx, &msg.ClearMsgReq{Timestamp: deltime.UnixMilli()}); err != nil {
log.ZError(ctx, "cron clear chat records failed", err, "deltime", deltime, "cont", time.Since(now))
return
}
log.ZInfo(ctx, "cron clear chat records success", "deltime", deltime, "cont", time.Since(now))
log.ZDebug(ctx, "cron clear chat records success", "deltime", deltime, "cont", time.Since(now))
}
if _, err := crontab.AddFunc(config.CronTask.CronExecuteTime, clearMsgFunc); err != nil {
return errs.Wrap(err)
@ -95,7 +95,7 @@ func Start(ctx context.Context, config *CronTaskConfig) error {
msgDestructFunc := func() {
now := time.Now()
ctx := mcontext.SetOperationID(ctx, fmt.Sprintf("cron_%d_%d", os.Getpid(), now.UnixMilli()))
log.ZInfo(ctx, "msg destruct cron start", "now", now)
log.ZDebug(ctx, "msg destruct cron start", "now", now)
conversations, err := conversationClient.GetConversationsNeedDestructMsgs(ctx, &pbconversation.GetConversationsNeedDestructMsgsReq{})
if err != nil {
@ -108,7 +108,7 @@ func Start(ctx context.Context, config *CronTaskConfig) error {
return
}
}
log.ZInfo(ctx, "msg destruct cron task completed", "cont", time.Since(now))
log.ZDebug(ctx, "msg destruct cron task completed", "cont", time.Since(now))
}
if _, err := crontab.AddFunc(config.CronTask.CronExecuteTime, msgDestructFunc); err != nil {
return errs.Wrap(err)
@ -119,18 +119,18 @@ func Start(ctx context.Context, config *CronTaskConfig) error {
// now := time.Now()
// deleteTime := now.Add(-time.Hour * 24 * time.Duration(config.CronTask.FileExpireTime))
// ctx := mcontext.SetOperationID(ctx, fmt.Sprintf("cron_%d_%d", os.Getpid(), deleteTime.UnixMilli()))
// log.ZInfo(ctx, "deleteoutDatedData ", "deletetime", deleteTime, "timestamp", deleteTime.UnixMilli())
// log.ZDebug(ctx, "deleteoutDatedData ", "deletetime", deleteTime, "timestamp", deleteTime.UnixMilli())
// if _, err := thirdClient.DeleteOutdatedData(ctx, &third.DeleteOutdatedDataReq{ExpireTime: deleteTime.UnixMilli()}); err != nil {
// log.ZError(ctx, "cron deleteoutDatedData failed", err, "deleteTime", deleteTime, "cont", time.Since(now))
// return
// }
// log.ZInfo(ctx, "cron deleteoutDatedData success", "deltime", deleteTime, "cont", time.Since(now))
// log.ZDebug(ctx, "cron deleteoutDatedData success", "deltime", deleteTime, "cont", time.Since(now))
// }
// if _, err := crontab.AddFunc(config.CronTask.CronExecuteTime, deleteObjectFunc); err != nil {
// return errs.Wrap(err)
// }
log.ZInfo(ctx, "start cron task", "CronExecuteTime", config.CronTask.CronExecuteTime)
log.ZDebug(ctx, "start cron task", "CronExecuteTime", config.CronTask.CronExecuteTime)
crontab.Start()
<-ctx.Done()
return nil

@ -224,6 +224,7 @@ type Push struct {
BadgeCount bool `mapstructure:"badgeCount"`
Production bool `mapstructure:"production"`
} `mapstructure:"iosPush"`
FullUserCache bool `mapstructure:"fullUserCache"`
}
type Auth struct {

@ -54,15 +54,11 @@ func Start[T any](ctx context.Context, discovery *config.Discovery, prometheusCo
log.CInfo(ctx, "RPC server is initializing", "rpcRegisterName", rpcRegisterName, "rpcPort", rpcPort,
"prometheusPorts", prometheusConfig.Ports)
rpcTcpAddr := net.JoinHostPort(network.GetListenIP(listenIP), strconv.Itoa(rpcPort))
listener, err := net.Listen(
"tcp",
rpcTcpAddr,
)
if err != nil {
return errs.WrapMsg(err, "listen err", "rpcTcpAddr", rpcTcpAddr)
}
defer listener.Close()
client, err := kdisc.NewDiscoveryRegister(discovery, share)
if err != nil {
return err

@ -1,6 +1,9 @@
package cachekey
import "time"
import (
"strings"
"time"
)
const (
OnlineKey = "ONLINE:"
@ -11,3 +14,7 @@ const (
func GetOnlineKey(userID string) string {
return OnlineKey + userID
}
func GetOnlineKeyUserID(key string) string {
return strings.TrimPrefix(key, OnlineKey)
}

@ -5,4 +5,5 @@ import "context"
type OnlineCache interface {
GetOnline(ctx context.Context, userID string) ([]int32, error)
SetUserOnline(ctx context.Context, userID string, online, offline []int32) error
GetAllOnlineUsers(ctx context.Context, cursor uint64) (map[string][]int32, uint64, error)
}

@ -4,6 +4,7 @@ import (
"context"
"encoding/json"
"github.com/dtm-labs/rockscache"
"github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log"
"github.com/redis/go-redis/v9"
"golang.org/x/sync/singleflight"
@ -65,6 +66,7 @@ func batchGetCache2[K comparable, V any](ctx context.Context, rcClient *rockscac
}
bs, err := json.Marshal(value)
if err != nil {
log.ZError(ctx, "marshal failed", err)
return nil, err
}
cacheIndex[index] = string(bs)
@ -72,7 +74,7 @@ func batchGetCache2[K comparable, V any](ctx context.Context, rcClient *rockscac
return cacheIndex, nil
})
if err != nil {
return nil, err
return nil, errs.WrapMsg(err, "FetchBatch2 failed")
}
for index, data := range indexCache {
if data == "" {
@ -80,7 +82,7 @@ func batchGetCache2[K comparable, V any](ctx context.Context, rcClient *rockscac
}
var value V
if err := json.Unmarshal([]byte(data), &value); err != nil {
return nil, err
return nil, errs.WrapMsg(err, "Unmarshal failed")
}
if cb, ok := any(&value).(BatchCacheCallback[K]); ok {
cb.BatchCache(keyId[keys[index]])

@ -28,6 +28,10 @@ import (
"time"
)
const (
rocksCacheTimeout = 11 * time.Second
)
// BatchDeleterRedis is a concrete implementation of the BatchDeleter interface based on Redis and RocksCache.
type BatchDeleterRedis struct {
redisClient redis.UniversalClient
@ -106,6 +110,8 @@ func (c *BatchDeleterRedis) AddKeys(keys ...string) {
// GetRocksCacheOptions returns the default configuration options for RocksCache.
func GetRocksCacheOptions() *rockscache.Options {
opts := rockscache.NewDefaultOptions()
opts.LockExpire = rocksCacheTimeout
opts.WaitReplicasTimeout = rocksCacheTimeout
opts.StrongConsistency = true
opts.RandomExpireAdjustment = 0.2

@ -2,8 +2,10 @@ package redis
import (
"context"
"fmt"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
"github.com/openimsdk/protocol/constant"
"github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log"
"github.com/redis/go-redis/v9"
@ -49,6 +51,36 @@ func (s *userOnline) GetOnline(ctx context.Context, userID string) ([]int32, err
return platformIDs, nil
}
func (s *userOnline) GetAllOnlineUsers(ctx context.Context, cursor uint64) (map[string][]int32, uint64, error) {
result := make(map[string][]int32)
keys, nextCursor, err := s.rdb.Scan(ctx, cursor, fmt.Sprintf("%s*", cachekey.OnlineKey), constant.ParamMaxLength).Result()
if err != nil {
return nil, 0, err
}
for _, key := range keys {
userID := cachekey.GetOnlineKeyUserID(key)
strValues, err := s.rdb.ZRange(ctx, key, 0, -1).Result()
if err != nil {
return nil, 0, err
}
values := make([]int32, 0, len(strValues))
for _, value := range strValues {
intValue, err := strconv.Atoi(value)
if err != nil {
return nil, 0, errs.Wrap(err)
}
values = append(values, int32(intValue))
}
result[userID] = values
}
return result, nextCursor, nil
}
func (s *userOnline) SetUserOnline(ctx context.Context, userID string, online, offline []int32) error {
script := `
local key = KEYS[1]

@ -20,7 +20,9 @@ type EvictCallback[K comparable, V any] simplelru.EvictCallback[K, V]
type LRU[K comparable, V any] interface {
Get(key K, fetch func() (V, error)) (V, error)
Set(key K, value V)
SetHas(key K, value V) bool
GetBatch(keys []K, fetch func(keys []K) (map[K]V, error)) (map[K]V, error)
Del(key K) bool
Stop()
}

@ -51,6 +51,11 @@ type ExpirationLRU[K comparable, V any] struct {
target Target
}
func (x *ExpirationLRU[K, V]) GetBatch(keys []K, fetch func(keys []K) (map[K]V, error)) (map[K]V, error) {
//TODO implement me
panic("implement me")
}
func (x *ExpirationLRU[K, V]) Get(key K, fetch func() (V, error)) (V, error) {
x.lock.Lock()
v, ok := x.core.Get(key)
@ -99,5 +104,11 @@ func (x *ExpirationLRU[K, V]) SetHas(key K, value V) bool {
return false
}
func (x *ExpirationLRU[K, V]) Set(key K, value V) {
x.lock.Lock()
defer x.lock.Unlock()
x.core.Add(key, &expirationLruItem[V]{value: value})
}
func (x *ExpirationLRU[K, V]) Stop() {
}

@ -88,18 +88,76 @@ func (x *LayLRU[K, V]) Get(key K, fetch func() (V, error)) (V, error) {
return v.value, v.err
}
//func (x *LayLRU[K, V]) Set(key K, value V) {
// x.lock.Lock()
// x.core.Add(key, &layLruItem[V]{value: value, expires: time.Now().Add(x.successTTL).UnixMilli()})
// x.lock.Unlock()
//}
//
func (x *LayLRU[K, V]) GetBatch(keys []K, fetch func(keys []K) (map[K]V, error)) (map[K]V, error) {
var (
err error
once sync.Once
)
x.lock.Lock()
res := make(map[K]V)
queries := make([]K, 0)
setVs := make(map[K]*layLruItem[V])
for _, key := range keys {
v, ok := x.core.Get(key)
if ok {
x.lock.Unlock()
v.lock.Lock()
expires, value, err1 := v.expires, v.value, v.err
if expires != 0 && expires > time.Now().UnixMilli() {
v.lock.Unlock()
x.target.IncrGetHit()
res[key] = value
if err1 != nil {
once.Do(func() {
err = err1
})
}
continue
}
}
queries = append(queries, key)
x.lock.Unlock()
}
values, err1 := fetch(queries)
if err1 != nil {
once.Do(func() {
err = err1
})
}
for key, val := range values {
v := &layLruItem[V]{}
v.value = val
if err == nil {
v.expires = time.Now().Add(x.successTTL).UnixMilli()
x.target.IncrGetSuccess()
} else {
v.expires = time.Now().Add(x.failedTTL).UnixMilli()
x.target.IncrGetFailed()
}
setVs[key] = v
x.lock.Lock()
x.core.Add(key, v)
x.lock.Unlock()
res[key] = val
}
return res, err
}
//func (x *LayLRU[K, V]) Has(key K) bool {
// x.lock.Lock()
// defer x.lock.Unlock()
// return x.core.Contains(key)
//}
func (x *LayLRU[K, V]) Set(key K, value V) {
x.lock.Lock()
defer x.lock.Unlock()
x.core.Add(key, &layLruItem[V]{value: value, expires: time.Now().Add(x.successTTL).UnixMilli()})
}
func (x *LayLRU[K, V]) SetHas(key K, value V) bool {
x.lock.Lock()
defer x.lock.Unlock()

@ -32,6 +32,29 @@ type slotLRU[K comparable, V any] struct {
hash func(k K) uint64
}
func (x *slotLRU[K, V]) GetBatch(keys []K, fetch func(keys []K) (map[K]V, error)) (map[K]V, error) {
var (
slotKeys = make(map[uint64][]K)
vs = make(map[K]V)
)
for _, k := range keys {
index := x.getIndex(k)
slotKeys[index] = append(slotKeys[index], k)
}
for k, v := range slotKeys {
batches, err := x.slots[k].GetBatch(v, fetch)
if err != nil {
return nil, err
}
for key, value := range batches {
vs[key] = value
}
}
return vs, nil
}
func (x *slotLRU[K, V]) getIndex(k K) uint64 {
return x.hash(k) % x.n
}
@ -40,6 +63,10 @@ func (x *slotLRU[K, V]) Get(key K, fetch func() (V, error)) (V, error) {
return x.slots[x.getIndex(key)].Get(key, fetch)
}
func (x *slotLRU[K, V]) Set(key K, value V) {
x.slots[x.getIndex(key)].Set(key, value)
}
func (x *slotLRU[K, V]) SetHas(key K, value V) bool {
return x.slots[x.getIndex(key)].SetHas(key, value)
}

@ -2,60 +2,197 @@ package rpccache
import (
"context"
"fmt"
"github.com/openimsdk/protocol/constant"
"github.com/openimsdk/protocol/user"
"math/rand"
"strconv"
"sync"
"sync/atomic"
"time"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
"github.com/openimsdk/open-im-server/v3/pkg/localcache"
"github.com/openimsdk/open-im-server/v3/pkg/localcache/lru"
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
"github.com/openimsdk/open-im-server/v3/pkg/util/useronline"
"github.com/openimsdk/tools/db/cacheutil"
"github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/mcontext"
"github.com/redis/go-redis/v9"
"math/rand"
"strconv"
"time"
)
func NewOnlineCache(user rpcclient.UserRpcClient, group *GroupLocalCache, rdb redis.UniversalClient, fn func(ctx context.Context, userID string, platformIDs []int32)) *OnlineCache {
func NewOnlineCache(user rpcclient.UserRpcClient, group *GroupLocalCache, rdb redis.UniversalClient, fullUserCache bool, fn func(ctx context.Context, userID string, platformIDs []int32)) (*OnlineCache, error) {
l := &sync.Mutex{}
x := &OnlineCache{
user: user,
group: group,
local: lru.NewSlotLRU(1024, localcache.LRUStringHash, func() lru.LRU[string, []int32] {
user: user,
group: group,
fullUserCache: fullUserCache,
Lock: l,
Cond: sync.NewCond(l),
}
ctx := mcontext.SetOperationID(context.TODO(), strconv.FormatInt(time.Now().UnixNano()+int64(rand.Uint32()), 10))
switch x.fullUserCache {
case true:
log.ZDebug(ctx, "fullUserCache is true")
x.mapCache = cacheutil.NewCache[string, []int32]()
go func() {
if err := x.initUsersOnlineStatus(ctx); err != nil {
log.ZError(ctx, "initUsersOnlineStatus failed", err)
}
}()
case false:
log.ZDebug(ctx, "fullUserCache is false")
x.lruCache = lru.NewSlotLRU(1024, localcache.LRUStringHash, func() lru.LRU[string, []int32] {
return lru.NewLayLRU[string, []int32](2048, cachekey.OnlineExpire/2, time.Second*3, localcache.EmptyTarget{}, func(key string, value []int32) {})
}),
})
x.CurrentPhase.Store(DoSubscribeOver)
x.Cond.Broadcast()
}
go func() {
ctx := mcontext.SetOperationID(context.Background(), cachekey.OnlineChannel+strconv.FormatUint(rand.Uint64(), 10))
for message := range rdb.Subscribe(ctx, cachekey.OnlineChannel).Channel() {
userID, platformIDs, err := useronline.ParseUserOnlineStatus(message.Payload)
x.doSubscribe(ctx, rdb, fn)
}()
return x, nil
}
const (
Begin uint32 = iota
DoOnlineStatusOver
DoSubscribeOver
)
type OnlineCache struct {
user rpcclient.UserRpcClient
group *GroupLocalCache
// fullUserCache if enabled, caches the online status of all users using mapCache;
// otherwise, only a portion of users' online statuses (regardless of whether they are online) will be cached using lruCache.
fullUserCache bool
lruCache lru.LRU[string, []int32]
mapCache *cacheutil.Cache[string, []int32]
Lock *sync.Mutex
Cond *sync.Cond
CurrentPhase atomic.Uint32
}
func (o *OnlineCache) initUsersOnlineStatus(ctx context.Context) (err error) {
log.ZDebug(ctx, "init users online status begin")
var (
totalSet atomic.Int64
maxTries = 5
retryInterval = time.Second * 5
resp *user.GetAllOnlineUsersResp
)
defer func(t time.Time) {
log.ZInfo(ctx, "init users online status end", "cost", time.Since(t), "totalSet", totalSet.Load())
o.CurrentPhase.Store(DoOnlineStatusOver)
o.Cond.Broadcast()
}(time.Now())
retryOperation := func(operation func() error, operationName string) error {
for i := 0; i < maxTries; i++ {
if err = operation(); err != nil {
log.ZWarn(ctx, fmt.Sprintf("initUsersOnlineStatus: %s failed", operationName), err)
time.Sleep(retryInterval)
} else {
return nil
}
}
return err
}
cursor := uint64(0)
for resp == nil || resp.NextCursor != 0 {
if err = retryOperation(func() error {
resp, err = o.user.GetAllOnlineUsers(ctx, cursor)
if err != nil {
log.ZError(ctx, "OnlineCache setUserOnline redis subscribe parseUserOnlineStatus", err, "payload", message.Payload, "channel", message.Channel)
continue
return err
}
for _, u := range resp.StatusList {
if u.Status == constant.Online {
o.setUserOnline(u.UserID, u.PlatformIDs)
}
totalSet.Add(1)
}
storageCache := x.setUserOnline(userID, platformIDs)
log.ZDebug(ctx, "OnlineCache setUserOnline", "userID", userID, "platformIDs", platformIDs, "payload", message.Payload, "storageCache", storageCache)
cursor = resp.NextCursor
return nil
}, "getAllOnlineUsers"); err != nil {
return err
}
}
return nil
}
func (o *OnlineCache) doSubscribe(ctx context.Context, rdb redis.UniversalClient, fn func(ctx context.Context, userID string, platformIDs []int32)) {
o.Lock.Lock()
ch := rdb.Subscribe(ctx, cachekey.OnlineChannel).Channel()
for o.CurrentPhase.Load() < DoOnlineStatusOver {
o.Cond.Wait()
}
o.Lock.Unlock()
log.ZInfo(ctx, "begin doSubscribe")
doMessage := func(message *redis.Message) {
userID, platformIDs, err := useronline.ParseUserOnlineStatus(message.Payload)
if err != nil {
log.ZError(ctx, "OnlineCache setHasUserOnline redis subscribe parseUserOnlineStatus", err, "payload", message.Payload, "channel", message.Channel)
return
}
log.ZDebug(ctx, fmt.Sprintf("get subscribe %s message", cachekey.OnlineChannel), "useID", userID, "platformIDs", platformIDs)
switch o.fullUserCache {
case true:
if len(platformIDs) == 0 {
// offline
o.mapCache.Delete(userID)
} else {
o.mapCache.Store(userID, platformIDs)
}
case false:
storageCache := o.setHasUserOnline(userID, platformIDs)
log.ZDebug(ctx, "OnlineCache setHasUserOnline", "userID", userID, "platformIDs", platformIDs, "payload", message.Payload, "storageCache", storageCache)
if fn != nil {
fn(ctx, userID, platformIDs)
}
}
}()
return x
}
}
type OnlineCache struct {
user rpcclient.UserRpcClient
group *GroupLocalCache
local lru.LRU[string, []int32]
if o.CurrentPhase.Load() == DoOnlineStatusOver {
for done := false; !done; {
select {
case message := <-ch:
doMessage(message)
default:
o.CurrentPhase.Store(DoSubscribeOver)
o.Cond.Broadcast()
done = true
}
}
}
for message := range ch {
doMessage(message)
}
}
func (o *OnlineCache) getUserOnlinePlatform(ctx context.Context, userID string) ([]int32, error) {
platformIDs, err := o.local.Get(userID, func() ([]int32, error) {
platformIDs, err := o.lruCache.Get(userID, func() ([]int32, error) {
return o.user.GetUserOnlinePlatform(ctx, userID)
})
if err != nil {
log.ZError(ctx, "OnlineCache GetUserOnlinePlatform", err, "userID", userID)
return nil, err
}
log.ZDebug(ctx, "OnlineCache GetUserOnlinePlatform", "userID", userID, "platformIDs", platformIDs)
//log.ZDebug(ctx, "OnlineCache GetUserOnlinePlatform", "userID", userID, "platformIDs", platformIDs)
return platformIDs, nil
}
@ -69,6 +206,16 @@ func (o *OnlineCache) GetUserOnlinePlatform(ctx context.Context, userID string)
return platformIDs, nil
}
// func (o *OnlineCache) GetUserOnlinePlatformBatch(ctx context.Context, userIDs []string) (map[string]int32, error) {
// platformIDs, err := o.getUserOnlinePlatform(ctx, userIDs)
// if err != nil {
// return nil, err
// }
// tmp := make([]int32, len(platformIDs))
// copy(tmp, platformIDs)
// return platformIDs, nil
// }
func (o *OnlineCache) GetUserOnline(ctx context.Context, userID string) (bool, error) {
platformIDs, err := o.getUserOnlinePlatform(ctx, userID)
if err != nil {
@ -77,10 +224,68 @@ func (o *OnlineCache) GetUserOnline(ctx context.Context, userID string) (bool, e
return len(platformIDs) > 0, nil
}
func (o *OnlineCache) getUserOnlinePlatformBatch(ctx context.Context, userIDs []string) (map[string][]int32, error) {
platformIDsMap, err := o.lruCache.GetBatch(userIDs, func(missingUsers []string) (map[string][]int32, error) {
platformIDsMap := make(map[string][]int32)
usersStatus, err := o.user.GetUsersOnlinePlatform(ctx, missingUsers)
if err != nil {
return nil, err
}
for _, u := range usersStatus {
platformIDsMap[u.UserID] = u.PlatformIDs
}
return platformIDsMap, nil
})
if err != nil {
log.ZError(ctx, "OnlineCache GetUserOnlinePlatform", err, "userID", userIDs)
return nil, err
}
return platformIDsMap, nil
}
func (o *OnlineCache) GetUsersOnline(ctx context.Context, userIDs []string) ([]string, []string, error) {
t := time.Now()
var (
onlineUserIDs = make([]string, 0, len(userIDs))
offlineUserIDs = make([]string, 0, len(userIDs))
)
switch o.fullUserCache {
case true:
for _, userID := range userIDs {
if _, ok := o.mapCache.Load(userID); ok {
onlineUserIDs = append(onlineUserIDs, userID)
} else {
offlineUserIDs = append(offlineUserIDs, userID)
}
}
case false:
userOnlineMap, err := o.getUserOnlinePlatformBatch(ctx, userIDs)
if err != nil {
return nil, nil, err
}
for key, value := range userOnlineMap {
if len(value) > 0 {
onlineUserIDs = append(onlineUserIDs, key)
} else {
offlineUserIDs = append(offlineUserIDs, key)
}
}
}
log.ZInfo(ctx, "get users online", "online users length", len(userIDs), "offline users length", len(offlineUserIDs), "cost", time.Since(t))
return userIDs, offlineUserIDs, nil
}
//func (o *OnlineCache) GetUsersOnline(ctx context.Context, userIDs []string) ([]string, error) {
// onlineUserIDs := make([]string, 0, len(userIDs))
// for _, userID := range userIDs {
// online, err := o.GetUserOnline(ctx, userID)
// online, err := o.GetUserOnline(ctx, userID)
// if err != nil {
// return nil, err
// }
@ -111,6 +316,15 @@ func (o *OnlineCache) GetUserOnline(ctx context.Context, userID string) (bool, e
// return onlineUserIDs, nil
//}
func (o *OnlineCache) setUserOnline(userID string, platformIDs []int32) bool {
return o.local.SetHas(userID, platformIDs)
func (o *OnlineCache) setUserOnline(userID string, platformIDs []int32) {
switch o.fullUserCache {
case true:
o.mapCache.Store(userID, platformIDs)
case false:
o.lruCache.Set(userID, platformIDs)
}
}
func (o *OnlineCache) setHasUserOnline(userID string, platformIDs []int32) bool {
return o.lruCache.SetHas(userID, platformIDs)
}

@ -169,6 +169,15 @@ func (u *UserRpcClient) Access(ctx context.Context, ownerUserID string) error {
return authverify.CheckAccessV3(ctx, ownerUserID, u.imAdminUserID)
}
// GetAllUserID retrieves all user IDs with pagination options.
func (u *UserRpcClient) GetAllUserID(ctx context.Context, pageNumber, showNumber int32) (*user.GetAllUserIDResp, error) {
resp, err := u.Client.GetAllUserID(ctx, &user.GetAllUserIDReq{Pagination: &sdkws.RequestPagination{PageNumber: pageNumber, ShowNumber: showNumber}})
if err != nil {
return nil, err
}
return resp, nil
}
// GetAllUserIDs retrieves all user IDs with pagination options.
func (u *UserRpcClient) GetAllUserIDs(ctx context.Context, pageNumber, showNumber int32) ([]string, error) {
resp, err := u.Client.GetAllUserID(ctx, &user.GetAllUserIDReq{Pagination: &sdkws.RequestPagination{PageNumber: pageNumber, ShowNumber: showNumber}})
@ -215,3 +224,7 @@ func (u *UserRpcClient) GetUserOnlinePlatform(ctx context.Context, userID string
}
return resp[0].PlatformIDs, nil
}
func (u *UserRpcClient) GetAllOnlineUsers(ctx context.Context, cursor uint64) (*user.GetAllOnlineUsersResp, error) {
return u.Client.GetAllOnlineUsers(ctx, &user.GetAllOnlineUsersReq{Cursor: cursor})
}

@ -3,8 +3,8 @@ serviceBinaries:
openim-crontask: 1
openim-rpc-user: 1
openim-msggateway: 1
openim-push: 4
openim-msgtransfer: 4
openim-push: 8
openim-msgtransfer: 8
openim-rpc-conversation: 1
openim-rpc-auth: 1
openim-rpc-group: 1

Loading…
Cancel
Save