diff --git a/cmd/openim-api/main.go b/cmd/openim-api/main.go index 58e540c05..e29ed2a59 100644 --- a/cmd/openim-api/main.go +++ b/cmd/openim-api/main.go @@ -25,5 +25,4 @@ func main() { if err := cmd.NewApiCmd().Exec(); err != nil { program.ExitWithError(err) } - } diff --git a/go.mod b/go.mod index 6302453cf..6450f3615 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,7 @@ require ( github.com/gorilla/websocket v1.5.1 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/openimsdk/protocol v0.0.69-alpha.22 + github.com/openimsdk/protocol v0.0.69-alpha.24 github.com/openimsdk/tools v0.0.49-alpha.45 github.com/pkg/errors v0.9.1 // indirect github.com/prometheus/client_golang v1.18.0 diff --git a/go.sum b/go.sum index 0be33db42..92350d745 100644 --- a/go.sum +++ b/go.sum @@ -262,8 +262,6 @@ github.com/onsi/gomega v1.25.0 h1:Vw7br2PCDYijJHSfBOWhov+8cAnUf8MfMaIOV323l6Y= github.com/onsi/gomega v1.25.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM= github.com/openimsdk/gomake v0.0.14-alpha.5 h1:VY9c5x515lTfmdhhPjMvR3BBRrRquAUCFsz7t7vbv7Y= github.com/openimsdk/gomake v0.0.14-alpha.5/go.mod h1:PndCozNc2IsQIciyn9mvEblYWZwJmAI+06z94EY+csI= -github.com/openimsdk/protocol v0.0.69-alpha.22 h1:kifZWVNDkg9diXFJUJ/Q9xFc80cveBhc+1dUXcE9xHQ= -github.com/openimsdk/protocol v0.0.69-alpha.22/go.mod h1:OZQA9FR55lseYoN2Ql1XAHYKHJGu7OMNkUbuekrKCM8= github.com/openimsdk/tools v0.0.49-alpha.45 h1:XIzCoef4myybOiIlGuRY9FTtGBisZFC4Uy4PhG0ZWQ0= github.com/openimsdk/tools v0.0.49-alpha.45/go.mod h1:HtSRjPTL8PsuZ+PhR5noqzrYBF0sdwW3/O/sWVucWg8= github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= diff --git a/internal/msggateway/client.go b/internal/msggateway/client.go index 0581a025b..bed454e49 100644 --- a/internal/msggateway/client.go +++ b/internal/msggateway/client.go @@ -72,6 +72,8 @@ type Client struct { closed atomic.Bool closedErr error token string + subLock sync.Mutex + subUserIDs map[string]struct{} } // ResetClient updates the client's state with new connection and context information. @@ -202,6 +204,8 @@ func (c *Client) handleMessage(message []byte) error { resp, messageErr = c.longConnServer.UserLogout(ctx, binaryReq) case WsSetBackgroundStatus: resp, messageErr = c.setAppBackgroundStatus(ctx, binaryReq) + case WsSubUserOnlineStatus: + resp, messageErr = c.longConnServer.SubUserOnlineStatus(ctx, c, binaryReq) default: return fmt.Errorf( "ReqIdentifier failed,sendID:%s,msgIncr:%s,reqIdentifier:%d", diff --git a/internal/msggateway/compressor_test.go b/internal/msggateway/compressor_test.go index 173c9bb20..952bd4d95 100644 --- a/internal/msggateway/compressor_test.go +++ b/internal/msggateway/compressor_test.go @@ -16,10 +16,10 @@ package msggateway import ( "crypto/rand" + "github.com/stretchr/testify/assert" "sync" "testing" - - "github.com/stretchr/testify/assert" + "unsafe" ) func mockRandom() []byte { @@ -132,3 +132,8 @@ func BenchmarkDecompressWithSyncPool(b *testing.B) { assert.Equal(b, nil, err) } } + +func TestName(t *testing.T) { + t.Log(unsafe.Sizeof(Client{})) + +} diff --git a/internal/msggateway/constant.go b/internal/msggateway/constant.go index 64664ac0a..bc74ed583 100644 --- a/internal/msggateway/constant.go +++ b/internal/msggateway/constant.go @@ -43,6 +43,7 @@ const ( WSKickOnlineMsg = 2002 WsLogoutMsg = 2003 WsSetBackgroundStatus = 2004 + WsSubUserOnlineStatus = 2005 WSDataError = 3001 ) diff --git a/internal/msggateway/hub_server.go b/internal/msggateway/hub_server.go index 8ff6d1001..3891aa532 100644 --- a/internal/msggateway/hub_server.go +++ b/internal/msggateway/hub_server.go @@ -19,6 +19,7 @@ import ( "github.com/openimsdk/open-im-server/v3/pkg/authverify" "github.com/openimsdk/open-im-server/v3/pkg/common/servererrs" "github.com/openimsdk/open-im-server/v3/pkg/common/startrpc" + "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" "github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/msggateway" "github.com/openimsdk/tools/discovery" @@ -31,6 +32,10 @@ import ( func (s *Server) InitServer(ctx context.Context, config *Config, disCov discovery.SvcDiscoveryRegistry, server *grpc.Server) error { s.LongConnServer.SetDiscoveryRegistry(disCov, config) msggateway.RegisterMsgGatewayServer(server, s) + s.userRcp = rpcclient.NewUserRpcClient(disCov, config.Share.RpcRegisterName.User, config.Share.IMAdminUserID) + if s.ready != nil { + return s.ready(s) + } return nil } @@ -50,18 +55,21 @@ type Server struct { LongConnServer LongConnServer config *Config pushTerminal map[int]struct{} + ready func(srv *Server) error + userRcp rpcclient.UserRpcClient } func (s *Server) SetLongConnServer(LongConnServer LongConnServer) { s.LongConnServer = LongConnServer } -func NewServer(rpcPort int, longConnServer LongConnServer, conf *Config) *Server { +func NewServer(rpcPort int, longConnServer LongConnServer, conf *Config, ready func(srv *Server) error) *Server { s := &Server{ rpcPort: rpcPort, LongConnServer: longConnServer, pushTerminal: make(map[int]struct{}), config: conf, + ready: ready, } s.pushTerminal[constant.IOSPlatformID] = struct{}{} s.pushTerminal[constant.AndroidPlatformID] = struct{}{} diff --git a/internal/msggateway/init.go b/internal/msggateway/init.go index f4d8b0381..44e79e412 100644 --- a/internal/msggateway/init.go +++ b/internal/msggateway/init.go @@ -17,6 +17,8 @@ package msggateway import ( "context" "github.com/openimsdk/open-im-server/v3/pkg/common/config" + "github.com/openimsdk/open-im-server/v3/pkg/rpccache" + "github.com/openimsdk/tools/db/redisutil" "github.com/openimsdk/tools/utils/datautil" "time" @@ -26,6 +28,7 @@ import ( type Config struct { MsgGateway config.MsgGateway Share config.Share + RedisConfig config.Redis WebhooksConfig config.Webhooks Discovery config.Discovery } @@ -42,18 +45,25 @@ func Start(ctx context.Context, index int, conf *Config) error { if err != nil { return err } - longServer, err := NewWsServer( + rdb, err := redisutil.NewRedisClient(ctx, conf.RedisConfig.Build()) + if err != nil { + return err + } + longServer := NewWsServer( conf, WithPort(wsPort), WithMaxConnNum(int64(conf.MsgGateway.LongConnSvr.WebsocketMaxConnNum)), WithHandshakeTimeout(time.Duration(conf.MsgGateway.LongConnSvr.WebsocketTimeout)*time.Second), WithMessageMaxMsgLength(conf.MsgGateway.LongConnSvr.WebsocketMaxMsgLen), ) - if err != nil { - return err - } - hubServer := NewServer(rpcPort, longServer, conf) + hubServer := NewServer(rpcPort, longServer, conf, func(srv *Server) error { + longServer.online = rpccache.NewOnlineCache(srv.userRcp, nil, rdb, longServer.subscriberUserOnlineStatusChanges) + return nil + }) + + go longServer.ChangeOnlineStatus(4) + netDone := make(chan error) go func() { err = hubServer.Start(ctx, index, conf) diff --git a/internal/msggateway/n_ws_server.go b/internal/msggateway/n_ws_server.go index defec16df..e903084a9 100644 --- a/internal/msggateway/n_ws_server.go +++ b/internal/msggateway/n_ws_server.go @@ -18,6 +18,7 @@ import ( "context" "fmt" "github.com/openimsdk/open-im-server/v3/pkg/common/webhook" + "github.com/openimsdk/open-im-server/v3/pkg/rpccache" pbAuth "github.com/openimsdk/protocol/auth" "github.com/openimsdk/tools/mcontext" "net/http" @@ -48,6 +49,7 @@ type LongConnServer interface { KickUserConn(client *Client) error UnRegister(c *Client) SetKickHandlerInfo(i *kickHandler) + SubUserOnlineStatus(ctx context.Context, client *Client, data *Req) ([]byte, error) Compressor Encoder MessageHandler @@ -60,7 +62,9 @@ type WsServer struct { registerChan chan *Client unregisterChan chan *Client kickHandlerChan chan *kickHandler - clients *UserMap + clients UserMap + online *rpccache.OnlineCache + subscription *Subscription clientPool sync.Pool onlineUserNum atomic.Int64 onlineUserConnNum atomic.Int64 @@ -90,18 +94,18 @@ func (ws *WsServer) SetDiscoveryRegistry(disCov discovery.SvcDiscoveryRegistry, ws.disCov = disCov } -func (ws *WsServer) SetUserOnlineStatus(ctx context.Context, client *Client, status int32) { - err := ws.userClient.SetUserStatus(ctx, client.UserID, status, client.PlatformID) - if err != nil { - log.ZWarn(ctx, "SetUserStatus err", err) - } - switch status { - case constant.Online: - ws.webhookAfterUserOnline(ctx, &ws.msgGatewayConfig.WebhooksConfig.AfterUserOnline, client.UserID, client.PlatformID, client.IsBackground, client.ctx.GetConnID()) - case constant.Offline: - ws.webhookAfterUserOffline(ctx, &ws.msgGatewayConfig.WebhooksConfig.AfterUserOffline, client.UserID, client.PlatformID, client.ctx.GetConnID()) - } -} +//func (ws *WsServer) SetUserOnlineStatus(ctx context.Context, client *Client, status int32) { +// err := ws.userClient.SetUserStatus(ctx, client.UserID, status, client.PlatformID) +// if err != nil { +// log.ZWarn(ctx, "SetUserStatus err", err) +// } +// switch status { +// case constant.Online: +// ws.webhookAfterUserOnline(ctx, &ws.msgGatewayConfig.WebhooksConfig.AfterUserOnline, client.UserID, client.PlatformID, client.IsBackground, client.ctx.GetConnID()) +// case constant.Offline: +// ws.webhookAfterUserOffline(ctx, &ws.msgGatewayConfig.WebhooksConfig.AfterUserOffline, client.UserID, client.PlatformID, client.ctx.GetConnID()) +// } +//} func (ws *WsServer) UnRegister(c *Client) { ws.unregisterChan <- c @@ -119,11 +123,13 @@ func (ws *WsServer) GetUserPlatformCons(userID string, platform int) ([]*Client, return ws.clients.Get(userID, platform) } -func NewWsServer(msgGatewayConfig *Config, opts ...Option) (*WsServer, error) { +func NewWsServer(msgGatewayConfig *Config, opts ...Option) *WsServer { var config configs for _, o := range opts { o(&config) } + //userRpcClient := rpcclient.NewUserRpcClient(client, config.Share.RpcRegisterName.User, config.Share.IMAdminUserID) + v := validator.New() return &WsServer{ msgGatewayConfig: msgGatewayConfig, @@ -141,10 +147,11 @@ func NewWsServer(msgGatewayConfig *Config, opts ...Option) (*WsServer, error) { kickHandlerChan: make(chan *kickHandler, 1000), validate: v, clients: newUserMap(), + subscription: newSubscription(), Compressor: NewGzipCompressor(), Encoder: NewGobEncoder(), webhookClient: webhook.NewWebhookClient(msgGatewayConfig.WebhooksConfig.URL), - }, nil + } } func (ws *WsServer) Run(done chan error) error { @@ -278,11 +285,11 @@ func (ws *WsServer) registerClient(client *Client) { }() } - wg.Add(1) - go func() { - defer wg.Done() - ws.SetUserOnlineStatus(client.ctx, client, constant.Online) - }() + //wg.Add(1) + //go func() { + // defer wg.Done() + // ws.SetUserOnlineStatus(client.ctx, client, constant.Online) + //}() wg.Wait() @@ -309,7 +316,7 @@ func getRemoteAdders(client []*Client) string { } func (ws *WsServer) KickUserConn(client *Client) error { - ws.clients.deleteClients(client.UserID, []*Client{client}) + ws.clients.DeleteClients(client.UserID, []*Client{client}) return client.KickOnlineMessage() } @@ -325,7 +332,7 @@ func (ws *WsServer) multiTerminalLoginChecker(clientOK bool, oldClients []*Clien if !clientOK { return } - ws.clients.deleteClients(newClient.UserID, oldClients) + ws.clients.DeleteClients(newClient.UserID, oldClients) for _, c := range oldClients { err := c.KickOnlineMessage() if err != nil { @@ -345,13 +352,16 @@ func (ws *WsServer) multiTerminalLoginChecker(clientOK bool, oldClients []*Clien func (ws *WsServer) unregisterClient(client *Client) { defer ws.clientPool.Put(client) - isDeleteUser := ws.clients.delete(client.UserID, client.ctx.GetRemoteAddr()) + isDeleteUser := ws.clients.DeleteClients(client.UserID, []*Client{client}) if isDeleteUser { ws.onlineUserNum.Add(-1) prommetrics.OnlineUserGauge.Dec() } ws.onlineUserConnNum.Add(-1) - ws.SetUserOnlineStatus(client.ctx, client, constant.Offline) + client.subLock.Lock() + clear(client.subUserIDs) + client.subLock.Unlock() + //ws.SetUserOnlineStatus(client.ctx, client, constant.Offline) log.ZInfo(client.ctx, "user offline", "close reason", client.closedErr, "online user Num", ws.onlineUserNum.Load(), "online user conn Num", ws.onlineUserConnNum.Load(), diff --git a/internal/msggateway/online.go b/internal/msggateway/online.go new file mode 100644 index 000000000..b50608f93 --- /dev/null +++ b/internal/msggateway/online.go @@ -0,0 +1,112 @@ +package msggateway + +import ( + "context" + "crypto/md5" + "encoding/binary" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey" + pbuser "github.com/openimsdk/protocol/user" + "github.com/openimsdk/tools/log" + "github.com/openimsdk/tools/mcontext" + "github.com/openimsdk/tools/utils/datautil" + "math/rand" + "strconv" + "time" +) + +func (ws *WsServer) ChangeOnlineStatus(concurrent int) { + if concurrent < 1 { + concurrent = 1 + } + const renewalTime = cachekey.OnlineExpire / 3 + //const renewalTime = time.Second * 10 + renewalTicker := time.NewTicker(renewalTime) + + requestChs := make([]chan *pbuser.SetUserOnlineStatusReq, concurrent) + changeStatus := make([][]UserState, concurrent) + + for i := 0; i < concurrent; i++ { + requestChs[i] = make(chan *pbuser.SetUserOnlineStatusReq, 64) + changeStatus[i] = make([]UserState, 0, 100) + } + + mergeTicker := time.NewTicker(time.Second) + + local2pb := func(u UserState) *pbuser.UserOnlineStatus { + return &pbuser.UserOnlineStatus{ + UserID: u.UserID, + Online: u.Online, + Offline: u.Offline, + } + } + + rNum := rand.Uint64() + pushUserState := func(us ...UserState) { + for _, u := range us { + sum := md5.Sum([]byte(u.UserID)) + i := (binary.BigEndian.Uint64(sum[:]) + rNum) % uint64(concurrent) + changeStatus[i] = append(changeStatus[i], u) + status := changeStatus[i] + if len(status) == cap(status) { + req := &pbuser.SetUserOnlineStatusReq{ + Status: datautil.Slice(status, local2pb), + } + changeStatus[i] = status[:0] + select { + case requestChs[i] <- req: + default: + log.ZError(context.Background(), "user online processing is too slow", nil) + } + } + } + } + + pushAllUserState := func() { + for i, status := range changeStatus { + if len(status) == 0 { + continue + } + req := &pbuser.SetUserOnlineStatusReq{ + Status: datautil.Slice(status, local2pb), + } + changeStatus[i] = status[:0] + select { + case requestChs[i] <- req: + default: + log.ZError(context.Background(), "user online processing is too slow", nil) + } + } + } + + opIdCtx := mcontext.SetOperationID(context.Background(), "r"+strconv.FormatUint(rNum, 10)) + doRequest := func(req *pbuser.SetUserOnlineStatusReq) { + ctx, cancel := context.WithTimeout(opIdCtx, time.Second*5) + defer cancel() + if _, err := ws.userClient.Client.SetUserOnlineStatus(ctx, req); err != nil { + log.ZError(ctx, "update user online status", err) + } + } + + for i := 0; i < concurrent; i++ { + go func(ch <-chan *pbuser.SetUserOnlineStatusReq) { + for req := range ch { + doRequest(req) + } + }(requestChs[i]) + } + + for { + select { + case <-mergeTicker.C: + pushAllUserState() + case now := <-renewalTicker.C: + deadline := now.Add(-cachekey.OnlineExpire / 3) + users := ws.clients.GetAllUserStatus(deadline, now) + log.ZDebug(context.Background(), "renewal ticker", "deadline", deadline, "nowtime", now, "num", len(users)) + pushUserState(users...) + case state := <-ws.clients.UserState(): + log.ZDebug(context.Background(), "OnlineCache user online change", "userID", state.UserID, "online", state.Online, "offline", state.Offline) + pushUserState(state) + } + } +} diff --git a/internal/msggateway/subscription.go b/internal/msggateway/subscription.go new file mode 100644 index 000000000..9460f5dbf --- /dev/null +++ b/internal/msggateway/subscription.go @@ -0,0 +1,181 @@ +package msggateway + +import ( + "context" + "encoding/json" + "github.com/openimsdk/protocol/constant" + "github.com/openimsdk/protocol/sdkws" + "github.com/openimsdk/tools/log" + "github.com/openimsdk/tools/utils/datautil" + "github.com/openimsdk/tools/utils/idutil" + "google.golang.org/protobuf/proto" + "sync" + "time" +) + +func (ws *WsServer) subscriberUserOnlineStatusChanges(ctx context.Context, userID string, platformIDs []int32) { + if ws.clients.RecvSubChange(userID, platformIDs) { + log.ZDebug(ctx, "gateway receive subscription message and go back online", "userID", userID, "platformIDs", platformIDs) + } else { + log.ZDebug(ctx, "gateway ignore user online status changes", "userID", userID, "platformIDs", platformIDs) + } + ws.pushUserIDOnlineStatus(ctx, userID, platformIDs) +} + +func (ws *WsServer) SubUserOnlineStatus(ctx context.Context, client *Client, data *Req) ([]byte, error) { + var sub sdkws.SubUserOnlineStatus + if err := proto.Unmarshal(data.Data, &sub); err != nil { + return nil, err + } + ws.subscription.Sub(client, sub.SubscribeUserID, sub.UnsubscribeUserID) + var resp sdkws.SubUserOnlineStatusTips + if len(sub.SubscribeUserID) > 0 { + resp.Subscribers = make([]*sdkws.SubUserOnlineStatusElem, 0, len(sub.SubscribeUserID)) + for _, userID := range sub.SubscribeUserID { + platformIDs, err := ws.online.GetUserOnlinePlatform(ctx, userID) + if err != nil { + return nil, err + } + resp.Subscribers = append(resp.Subscribers, &sdkws.SubUserOnlineStatusElem{ + UserID: userID, + OnlinePlatformIDs: platformIDs, + }) + } + } + return proto.Marshal(&resp) +} + +type subClient struct { + clients map[string]*Client +} + +func newSubscription() *Subscription { + return &Subscription{ + userIDs: make(map[string]*subClient), + } +} + +type Subscription struct { + lock sync.RWMutex + userIDs map[string]*subClient +} + +func (s *Subscription) GetClient(userID string) []*Client { + s.lock.RLock() + defer s.lock.RUnlock() + cs, ok := s.userIDs[userID] + if !ok { + return nil + } + clients := make([]*Client, 0, len(cs.clients)) + for _, client := range cs.clients { + clients = append(clients, client) + } + return clients +} + +func (s *Subscription) DelClient(client *Client) { + client.subLock.Lock() + userIDs := datautil.Keys(client.subUserIDs) + for _, userID := range userIDs { + delete(client.subUserIDs, userID) + } + client.subLock.Unlock() + if len(userIDs) == 0 { + return + } + addr := client.ctx.GetRemoteAddr() + s.lock.Lock() + defer s.lock.Unlock() + for _, userID := range userIDs { + sub, ok := s.userIDs[userID] + if !ok { + continue + } + delete(sub.clients, addr) + if len(sub.clients) == 0 { + delete(s.userIDs, userID) + } + } +} + +func (s *Subscription) Sub(client *Client, addUserIDs, delUserIDs []string) { + if len(addUserIDs)+len(delUserIDs) == 0 { + return + } + var ( + del = make(map[string]struct{}) + add = make(map[string]struct{}) + ) + client.subLock.Lock() + for _, userID := range delUserIDs { + if _, ok := client.subUserIDs[userID]; !ok { + continue + } + del[userID] = struct{}{} + delete(client.subUserIDs, userID) + } + for _, userID := range addUserIDs { + delete(del, userID) + if _, ok := client.subUserIDs[userID]; ok { + continue + } + client.subUserIDs[userID] = struct{}{} + } + client.subLock.Unlock() + if len(del)+len(add) == 0 { + return + } + addr := client.ctx.GetRemoteAddr() + s.lock.Lock() + defer s.lock.Unlock() + for userID := range del { + sub, ok := s.userIDs[userID] + if !ok { + continue + } + delete(sub.clients, addr) + if len(sub.clients) == 0 { + delete(s.userIDs, userID) + } + } + for userID := range add { + sub, ok := s.userIDs[userID] + if !ok { + sub = &subClient{clients: make(map[string]*Client)} + s.userIDs[userID] = sub + } + sub.clients[addr] = client + } +} + +func (ws *WsServer) pushUserIDOnlineStatus(ctx context.Context, userID string, platformIDs []int32) { + clients := ws.subscription.GetClient(userID) + if len(clients) == 0 { + return + } + msgContent, err := json.Marshal(platformIDs) + if err != nil { + log.ZError(ctx, "pushUserIDOnlineStatus json.Marshal", err) + return + } + now := time.Now().UnixMilli() + msgID := idutil.GetMsgIDByMD5(userID) + msg := &sdkws.MsgData{ + SendID: userID, + ClientMsgID: msgID, + ServerMsgID: msgID, + SenderPlatformID: constant.AdminPlatformID, + SessionType: constant.NotificationChatType, + ContentType: constant.UserSubscribeOnlineStatusNotification, + Content: msgContent, + SendTime: now, + CreateTime: now, + } + for _, client := range clients { + msg.RecvID = client.UserID + if err := client.PushMessage(ctx, msg); err != nil { + log.ZError(ctx, "UserSubscribeOnlineStatusNotification push failed", err, "userID", client.UserID, "platformID", client.PlatformID, "changeUserID", userID, "content", msgContent) + } + } +} diff --git a/internal/msggateway/user_map.go b/internal/msggateway/user_map.go index 79cc53d1b..bd1f19728 100644 --- a/internal/msggateway/user_map.go +++ b/internal/msggateway/user_map.go @@ -1,135 +1,185 @@ -// Copyright © 2023 OpenIM. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package msggateway import ( - "context" - "sync" - - "github.com/openimsdk/tools/log" "github.com/openimsdk/tools/utils/datautil" + "sync" + "time" ) -type UserMap struct { - m sync.Map +type UserMap interface { + GetAll(userID string) ([]*Client, bool) + Get(userID string, platformID int) ([]*Client, bool, bool) + Set(userID string, v *Client) + DeleteClients(userID string, clients []*Client) (isDeleteUser bool) + UserState() <-chan UserState + GetAllUserStatus(deadline time.Time, nowtime time.Time) []UserState + RecvSubChange(userID string, platformIDs []int32) bool } -func newUserMap() *UserMap { - return &UserMap{} +type UserState struct { + UserID string + Online []int32 + Offline []int32 } -func (u *UserMap) GetAll(key string) ([]*Client, bool) { - allClients, ok := u.m.Load(key) - if ok { - return allClients.([]*Client), ok - } - return nil, ok +type UserPlatform struct { + Time time.Time + Clients []*Client } -func (u *UserMap) Get(key string, platformID int) ([]*Client, bool, bool) { - allClients, userExisted := u.m.Load(key) - if userExisted { - var clients []*Client - for _, client := range allClients.([]*Client) { - if client.PlatformID == platformID { - clients = append(clients, client) - } - } - if len(clients) > 0 { - return clients, userExisted, true - } - return clients, userExisted, false +func (u *UserPlatform) PlatformIDs() []int32 { + if len(u.Clients) == 0 { + return nil + } + platformIDs := make([]int32, 0, len(u.Clients)) + for _, client := range u.Clients { + platformIDs = append(platformIDs, int32(client.PlatformID)) } - return nil, userExisted, false + return platformIDs } -// Set adds a client to the map. -func (u *UserMap) Set(key string, v *Client) { - allClients, existed := u.m.Load(key) - if existed { - log.ZDebug(context.Background(), "Set existed", "user_id", key, "client_user_id", v.UserID) - oldClients := allClients.([]*Client) - oldClients = append(oldClients, v) - u.m.Store(key, oldClients) - } else { - log.ZDebug(context.Background(), "Set not existed", "user_id", key, "client_user_id", v.UserID) +func (u *UserPlatform) PlatformIDSet() map[int32]struct{} { + if len(u.Clients) == 0 { + return nil + } + platformIDs := make(map[int32]struct{}) + for _, client := range u.Clients { + platformIDs[int32(client.PlatformID)] = struct{}{} + } + return platformIDs +} - var clients []*Client - clients = append(clients, v) - u.m.Store(key, clients) +func newUserMap() UserMap { + return &userMap{ + data: make(map[string]*UserPlatform), + ch: make(chan UserState, 10000), } } -func (u *UserMap) delete(key string, connRemoteAddr string) (isDeleteUser bool) { - // Attempt to load the clients associated with the key. - allClients, existed := u.m.Load(key) - if !existed { - // Return false immediately if the key does not exist. +type userMap struct { + lock sync.RWMutex + data map[string]*UserPlatform + ch chan UserState +} + +func (u *userMap) RecvSubChange(userID string, platformIDs []int32) bool { + u.lock.RLock() + defer u.lock.RUnlock() + result, ok := u.data[userID] + if !ok { return false } - - // Convert allClients to a slice of *Client. - oldClients := allClients.([]*Client) - var remainingClients []*Client - for _, client := range oldClients { - // Keep clients that do not match the connRemoteAddr. - if client.ctx.GetRemoteAddr() != connRemoteAddr { - remainingClients = append(remainingClients, client) - } + localPlatformIDs := result.PlatformIDSet() + for _, platformID := range platformIDs { + delete(localPlatformIDs, platformID) } + if len(localPlatformIDs) == 0 { + return false + } + u.push(userID, result, nil) + return true +} - // If no clients remain after filtering, delete the key from the map. - if len(remainingClients) == 0 { - u.m.Delete(key) +func (u *userMap) push(userID string, userPlatform *UserPlatform, offline []int32) bool { + select { + case u.ch <- UserState{UserID: userID, Online: userPlatform.PlatformIDs(), Offline: offline}: + userPlatform.Time = time.Now() return true + default: + return false } +} - // Otherwise, update the key with the remaining clients. - u.m.Store(key, remainingClients) - return false +func (u *userMap) GetAll(userID string) ([]*Client, bool) { + u.lock.RLock() + defer u.lock.RUnlock() + result, ok := u.data[userID] + if !ok { + return nil, false + } + return result.Clients, true } -func (u *UserMap) deleteClients(key string, clients []*Client) (isDeleteUser bool) { - m := datautil.SliceToMapAny(clients, func(c *Client) (string, struct{}) { - return c.ctx.GetRemoteAddr(), struct{}{} - }) - allClients, existed := u.m.Load(key) - if !existed { - // If the key doesn't exist, return false. - return false +func (u *userMap) Get(userID string, platformID int) ([]*Client, bool, bool) { + u.lock.RLock() + defer u.lock.RUnlock() + result, ok := u.data[userID] + if !ok { + return nil, false, false } + var clients []*Client + for _, client := range result.Clients { + if client.PlatformID == platformID { + clients = append(clients, client) + } + } + return clients, true, len(clients) > 0 +} - // Filter out clients that are in the deleteMap. - oldClients := allClients.([]*Client) - var remainingClients []*Client - for _, client := range oldClients { - if _, shouldBeDeleted := m[client.ctx.GetRemoteAddr()]; !shouldBeDeleted { - remainingClients = append(remainingClients, client) +func (u *userMap) Set(userID string, client *Client) { + u.lock.Lock() + defer u.lock.Unlock() + result, ok := u.data[userID] + if ok { + result.Clients = append(result.Clients, client) + } else { + result = &UserPlatform{ + Clients: []*Client{client}, } + u.data[userID] = result } + u.push(client.UserID, result, nil) +} - // Update or delete the key based on the remaining clients. - if len(remainingClients) == 0 { - u.m.Delete(key) - return true +func (u *userMap) DeleteClients(userID string, clients []*Client) (isDeleteUser bool) { + if len(clients) == 0 { + return false } + u.lock.Lock() + defer u.lock.Unlock() + result, ok := u.data[userID] + if !ok { + return false + } + offline := make([]int32, 0, len(clients)) + deleteAddr := datautil.SliceSetAny(clients, func(client *Client) string { + return client.ctx.GetRemoteAddr() + }) + tmp := result.Clients + result.Clients = result.Clients[:0] + for _, client := range tmp { + if _, delCli := deleteAddr[client.ctx.GetRemoteAddr()]; delCli { + offline = append(offline, int32(client.PlatformID)) + } else { + result.Clients = append(result.Clients, client) + } + } + defer u.push(userID, result, offline) + if len(result.Clients) > 0 { + return false + } + delete(u.data, userID) + return true +} - u.m.Store(key, remainingClients) - return false +func (u *userMap) GetAllUserStatus(deadline time.Time, nowtime time.Time) []UserState { + u.lock.RLock() + defer u.lock.RUnlock() + result := make([]UserState, 0, len(u.data)) + for userID, userPlatform := range u.data { + if userPlatform.Time.Before(deadline) { + continue + } + userPlatform.Time = nowtime + online := make([]int32, 0, len(userPlatform.Clients)) + for _, client := range userPlatform.Clients { + online = append(online, int32(client.PlatformID)) + } + result = append(result, UserState{UserID: userID, Online: online}) + } + return result } -func (u *UserMap) DeleteAll(key string) { - u.m.Delete(key) +func (u *userMap) UserState() <-chan UserState { + return u.ch } diff --git a/internal/push/push_handler.go b/internal/push/push_handler.go index dfe0e7b55..ed87b3929 100644 --- a/internal/push/push_handler.go +++ b/internal/push/push_handler.go @@ -28,6 +28,7 @@ import ( "github.com/openimsdk/open-im-server/v3/pkg/util/conversationutil" "github.com/openimsdk/protocol/constant" pbchat "github.com/openimsdk/protocol/msg" + "github.com/openimsdk/protocol/msggateway" pbpush "github.com/openimsdk/protocol/push" "github.com/openimsdk/protocol/sdkws" "github.com/openimsdk/tools/discovery" @@ -45,6 +46,7 @@ type ConsumerHandler struct { pushConsumerGroup *kafka.MConsumerGroup offlinePusher offlinepush.OfflinePusher onlinePusher OnlinePusher + onlineCache *rpccache.OnlineCache groupLocalCache *rpccache.GroupLocalCache conversationLocalCache *rpccache.ConversationLocalCache msgRpcClient rpcclient.MessageRpcClient @@ -63,16 +65,17 @@ func NewConsumerHandler(config *Config, offlinePusher offlinepush.OfflinePusher, if err != nil { return nil, err } + userRpcClient := rpcclient.NewUserRpcClient(client, config.Share.RpcRegisterName.User, config.Share.IMAdminUserID) consumerHandler.offlinePusher = offlinePusher consumerHandler.onlinePusher = NewOnlinePusher(client, config) consumerHandler.groupRpcClient = rpcclient.NewGroupRpcClient(client, config.Share.RpcRegisterName.Group) consumerHandler.groupLocalCache = rpccache.NewGroupLocalCache(consumerHandler.groupRpcClient, &config.LocalCacheConfig, rdb) consumerHandler.msgRpcClient = rpcclient.NewMessageRpcClient(client, config.Share.RpcRegisterName.Msg) consumerHandler.conversationRpcClient = rpcclient.NewConversationRpcClient(client, config.Share.RpcRegisterName.Conversation) - consumerHandler.conversationLocalCache = rpccache.NewConversationLocalCache(consumerHandler.conversationRpcClient, - &config.LocalCacheConfig, rdb) + consumerHandler.conversationLocalCache = rpccache.NewConversationLocalCache(consumerHandler.conversationRpcClient, &config.LocalCacheConfig, rdb) consumerHandler.webhookClient = webhook.NewWebhookClient(config.WebhooksConfig.URL) consumerHandler.config = config + consumerHandler.onlineCache = rpccache.NewOnlineCache(userRpcClient, consumerHandler.groupLocalCache, rdb, nil) return &consumerHandler, nil } @@ -125,12 +128,12 @@ func (c *ConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSession, claim s } // Push2User Suitable for two types of conversations, one is SingleChatType and the other is NotificationChatType. -func (c *ConsumerHandler) Push2User(ctx context.Context, userIDs []string, msg *sdkws.MsgData) error { +func (c *ConsumerHandler) Push2User(ctx context.Context, userIDs []string, msg *sdkws.MsgData) (err error) { log.ZDebug(ctx, "Get msg from msg_transfer And push msg", "userIDs", userIDs, "msg", msg.String()) if err := c.webhookBeforeOnlinePush(ctx, &c.config.WebhooksConfig.BeforeOnlinePush, userIDs, msg); err != nil { return err } - wsResults, err := c.onlinePusher.GetConnsAndOnlinePush(ctx, msg, userIDs) + wsResults, err := c.GetConnsAndOnlinePush(ctx, msg, userIDs) if err != nil { return err } @@ -179,6 +182,38 @@ func (c *ConsumerHandler) shouldPushOffline(_ context.Context, msg *sdkws.MsgDat return true } +func (c *ConsumerHandler) GetConnsAndOnlinePush(ctx context.Context, msg *sdkws.MsgData, pushToUserIDs []string) ([]*msggateway.SingleMsgToUserResults, error) { + var ( + onlineUserIDs []string + offlineUserIDs []string + ) + for _, userID := range pushToUserIDs { + online, err := c.onlineCache.GetUserOnline(ctx, userID) + if err != nil { + return nil, err + } + if online { + onlineUserIDs = append(onlineUserIDs, userID) + } else { + offlineUserIDs = append(offlineUserIDs, userID) + } + } + var result []*msggateway.SingleMsgToUserResults + if len(onlineUserIDs) > 0 { + var err error + result, err = c.onlinePusher.GetConnsAndOnlinePush(ctx, msg, pushToUserIDs) + if err != nil { + return nil, err + } + } + for _, userID := range offlineUserIDs { + result = append(result, &msggateway.SingleMsgToUserResults{ + UserID: userID, + }) + } + return result, nil +} + func (c *ConsumerHandler) Push2Group(ctx context.Context, groupID string, msg *sdkws.MsgData) (err error) { log.ZDebug(ctx, "Get group msg from msg_transfer and push msg", "msg", msg.String(), "groupID", groupID) var pushToUserIDs []string @@ -192,7 +227,7 @@ func (c *ConsumerHandler) Push2Group(ctx context.Context, groupID string, msg *s return err } - wsResults, err := c.onlinePusher.GetConnsAndOnlinePush(ctx, msg, pushToUserIDs) + wsResults, err := c.GetConnsAndOnlinePush(ctx, msg, pushToUserIDs) if err != nil { return err } diff --git a/internal/rpc/user/online.go b/internal/rpc/user/online.go new file mode 100644 index 000000000..e853ceae2 --- /dev/null +++ b/internal/rpc/user/online.go @@ -0,0 +1,122 @@ +package user + +import ( + "context" + "github.com/openimsdk/protocol/constant" + "github.com/openimsdk/protocol/sdkws" + pbuser "github.com/openimsdk/protocol/user" +) + +func (s *userServer) getUserOnlineStatus(ctx context.Context, userID string) (*pbuser.OnlineStatus, error) { + platformIDs, err := s.online.GetOnline(ctx, userID) + if err != nil { + return nil, err + } + status := pbuser.OnlineStatus{ + UserID: userID, + PlatformIDs: platformIDs, + } + if len(platformIDs) > 0 { + status.Status = constant.Online + } else { + status.Status = constant.Offline + } + return &status, nil +} + +func (s *userServer) getUsersOnlineStatus(ctx context.Context, userIDs []string) ([]*pbuser.OnlineStatus, error) { + res := make([]*pbuser.OnlineStatus, 0, len(userIDs)) + for _, userID := range userIDs { + status, err := s.getUserOnlineStatus(ctx, userID) + if err != nil { + return nil, err + } + res = append(res, status) + } + return res, nil +} + +// SubscribeOrCancelUsersStatus Subscribe online or cancel online users. +func (s *userServer) SubscribeOrCancelUsersStatus(ctx context.Context, req *pbuser.SubscribeOrCancelUsersStatusReq) (*pbuser.SubscribeOrCancelUsersStatusResp, error) { + if req.Genre == constant.SubscriberUser { + err := s.db.SubscribeUsersStatus(ctx, req.UserID, req.UserIDs) + if err != nil { + return nil, err + } + var status []*pbuser.OnlineStatus + status, err = s.getUsersOnlineStatus(ctx, req.UserIDs) + if err != nil { + return nil, err + } + return &pbuser.SubscribeOrCancelUsersStatusResp{StatusList: status}, nil + } else if req.Genre == constant.Unsubscribe { + err := s.db.UnsubscribeUsersStatus(ctx, req.UserID, req.UserIDs) + if err != nil { + return nil, err + } + } + return &pbuser.SubscribeOrCancelUsersStatusResp{}, nil +} + +// GetUserStatus Get the online status of the user. +func (s *userServer) GetUserStatus(ctx context.Context, req *pbuser.GetUserStatusReq) (*pbuser.GetUserStatusResp, error) { + res, err := s.getUsersOnlineStatus(ctx, req.UserIDs) + if err != nil { + return nil, err + } + return &pbuser.GetUserStatusResp{StatusList: res}, nil +} + +// SetUserStatus Synchronize user's online status. +func (s *userServer) SetUserStatus(ctx context.Context, req *pbuser.SetUserStatusReq) (*pbuser.SetUserStatusResp, error) { + var ( + online []int32 + offline []int32 + ) + switch req.Status { + case constant.Online: + online = []int32{req.PlatformID} + case constant.Offline: + online = []int32{req.PlatformID} + } + if err := s.online.SetUserOnline(ctx, req.UserID, online, offline); err != nil { + return nil, err + } + list, err := s.db.GetSubscribedList(ctx, req.UserID) + if err != nil { + return nil, err + } + for _, userID := range list { + tips := &sdkws.UserStatusChangeTips{ + FromUserID: req.UserID, + ToUserID: userID, + Status: req.Status, + PlatformID: req.PlatformID, + } + s.userNotificationSender.UserStatusChangeNotification(ctx, tips) + } + + return &pbuser.SetUserStatusResp{}, nil +} + +// GetSubscribeUsersStatus Get the online status of subscribers. +func (s *userServer) GetSubscribeUsersStatus(ctx context.Context, req *pbuser.GetSubscribeUsersStatusReq) (*pbuser.GetSubscribeUsersStatusResp, error) { + userList, err := s.db.GetAllSubscribeList(ctx, req.UserID) + if err != nil { + return nil, err + } + onlineStatusList, err := s.getUsersOnlineStatus(ctx, userList) + if err != nil { + return nil, err + } + return &pbuser.GetSubscribeUsersStatusResp{StatusList: onlineStatusList}, nil +} + +func (s *userServer) SetUserOnlineStatus(ctx context.Context, req *pbuser.SetUserOnlineStatusReq) (*pbuser.SetUserOnlineStatusResp, error) { + for _, status := range req.Status { + if err := s.online.SetUserOnline(ctx, status.UserID, status.Online, status.Offline); err != nil { + return nil, err + } + } + return &pbuser.SetUserOnlineStatusResp{}, nil +} diff --git a/internal/rpc/user/user.go b/internal/rpc/user/user.go index 2fea232b4..0b96077ec 100644 --- a/internal/rpc/user/user.go +++ b/internal/rpc/user/user.go @@ -19,6 +19,7 @@ import ( "errors" "github.com/openimsdk/open-im-server/v3/internal/rpc/friend" "github.com/openimsdk/open-im-server/v3/pkg/common/config" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo" tablerelation "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" @@ -50,6 +51,7 @@ import ( ) type userServer struct { + online cache.OnlineCache db controller.UserDatabase friendNotificationSender *friend.FriendNotificationSender userNotificationSender *UserNotificationSender @@ -60,11 +62,6 @@ type userServer struct { webhookClient *webhook.Client } -func (s *userServer) SetUserOnlineStatus(ctx context.Context, req *pbuser.SetUserOnlineStatusReq) (*pbuser.SetUserOnlineStatusResp, error) { - //TODO implement me - panic("implement me") -} - type Config struct { RpcConfig config.User RedisConfig config.Redis @@ -103,6 +100,7 @@ func Start(ctx context.Context, config *Config, client registry.SvcDiscoveryRegi msgRpcClient := rpcclient.NewMessageRpcClient(client, config.Share.RpcRegisterName.Msg) localcache.InitLocalCache(&config.LocalCacheConfig) u := &userServer{ + online: redis.NewUserOnline(rdb), db: database, RegisterCenter: client, friendRpcClient: &friendRpcClient, @@ -334,76 +332,6 @@ func (s *userServer) GetAllUserID(ctx context.Context, req *pbuser.GetAllUserIDR return &pbuser.GetAllUserIDResp{Total: int32(total), UserIDs: userIDs}, nil } -// SubscribeOrCancelUsersStatus Subscribe online or cancel online users. -func (s *userServer) SubscribeOrCancelUsersStatus(ctx context.Context, req *pbuser.SubscribeOrCancelUsersStatusReq) (resp *pbuser.SubscribeOrCancelUsersStatusResp, err error) { - if req.Genre == constant.SubscriberUser { - err = s.db.SubscribeUsersStatus(ctx, req.UserID, req.UserIDs) - if err != nil { - return nil, err - } - var status []*pbuser.OnlineStatus - status, err = s.db.GetUserStatus(ctx, req.UserIDs) - if err != nil { - return nil, err - } - return &pbuser.SubscribeOrCancelUsersStatusResp{StatusList: status}, nil - } else if req.Genre == constant.Unsubscribe { - err = s.db.UnsubscribeUsersStatus(ctx, req.UserID, req.UserIDs) - if err != nil { - return nil, err - } - } - return &pbuser.SubscribeOrCancelUsersStatusResp{}, nil -} - -// GetUserStatus Get the online status of the user. -func (s *userServer) GetUserStatus(ctx context.Context, req *pbuser.GetUserStatusReq) (resp *pbuser.GetUserStatusResp, - err error) { - onlineStatusList, err := s.db.GetUserStatus(ctx, req.UserIDs) - if err != nil { - return nil, err - } - return &pbuser.GetUserStatusResp{StatusList: onlineStatusList}, nil -} - -// SetUserStatus Synchronize user's online status. -func (s *userServer) SetUserStatus(ctx context.Context, req *pbuser.SetUserStatusReq) (resp *pbuser.SetUserStatusResp, - err error) { - err = s.db.SetUserStatus(ctx, req.UserID, req.Status, req.PlatformID) - if err != nil { - return nil, err - } - list, err := s.db.GetSubscribedList(ctx, req.UserID) - if err != nil { - return nil, err - } - for _, userID := range list { - tips := &sdkws.UserStatusChangeTips{ - FromUserID: req.UserID, - ToUserID: userID, - Status: req.Status, - PlatformID: req.PlatformID, - } - s.userNotificationSender.UserStatusChangeNotification(ctx, tips) - } - - return &pbuser.SetUserStatusResp{}, nil -} - -// GetSubscribeUsersStatus Get the online status of subscribers. -func (s *userServer) GetSubscribeUsersStatus(ctx context.Context, - req *pbuser.GetSubscribeUsersStatusReq) (*pbuser.GetSubscribeUsersStatusResp, error) { - userList, err := s.db.GetAllSubscribeList(ctx, req.UserID) - if err != nil { - return nil, err - } - onlineStatusList, err := s.db.GetUserStatus(ctx, userList) - if err != nil { - return nil, err - } - return &pbuser.GetSubscribeUsersStatusResp{StatusList: onlineStatusList}, nil -} - // ProcessUserCommandAdd user general function add. func (s *userServer) ProcessUserCommandAdd(ctx context.Context, req *pbuser.ProcessUserCommandAddReq) (*pbuser.ProcessUserCommandAddResp, error) { err := authverify.CheckAccessV3(ctx, req.UserID, s.config.Share.IMAdminUserID) diff --git a/internal/tools/cron_task.go b/internal/tools/cron_task.go index dcdcf2f40..1ef4943cd 100644 --- a/internal/tools/cron_task.go +++ b/internal/tools/cron_task.go @@ -17,9 +17,6 @@ package tools import ( "context" "fmt" - "os" - "time" - "github.com/openimsdk/open-im-server/v3/pkg/common/config" kdisc "github.com/openimsdk/open-im-server/v3/pkg/common/discoveryregister" "github.com/openimsdk/protocol/msg" @@ -28,6 +25,8 @@ import ( "github.com/openimsdk/tools/mw" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" + "os" + "time" "github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/log" diff --git a/pkg/common/cmd/msg_gateway.go b/pkg/common/cmd/msg_gateway.go index 78004094c..29d3fba33 100644 --- a/pkg/common/cmd/msg_gateway.go +++ b/pkg/common/cmd/msg_gateway.go @@ -37,6 +37,7 @@ func NewMsgGatewayCmd() *MsgGatewayCmd { ret.configMap = map[string]any{ OpenIMMsgGatewayCfgFileName: &msgGatewayConfig.MsgGateway, ShareFileName: &msgGatewayConfig.Share, + RedisConfigFileName: &msgGatewayConfig.RedisConfig, WebhooksConfigFileName: &msgGatewayConfig.WebhooksConfig, DiscoveryConfigFilename: &msgGatewayConfig.Discovery, } diff --git a/pkg/common/config/config.go b/pkg/common/config/config.go index a6c380e9f..1e0f953dc 100644 --- a/pkg/common/config/config.go +++ b/pkg/common/config/config.go @@ -15,15 +15,14 @@ package config import ( - "strings" - "time" - "github.com/openimsdk/tools/db/mongoutil" "github.com/openimsdk/tools/db/redisutil" "github.com/openimsdk/tools/mq/kafka" "github.com/openimsdk/tools/s3/cos" "github.com/openimsdk/tools/s3/minio" "github.com/openimsdk/tools/s3/oss" + "strings" + "time" ) type CacheConfig struct { diff --git a/pkg/common/storage/cache/cachekey/online.go b/pkg/common/storage/cache/cachekey/online.go new file mode 100644 index 000000000..164e5f2f4 --- /dev/null +++ b/pkg/common/storage/cache/cachekey/online.go @@ -0,0 +1,13 @@ +package cachekey + +import "time" + +const ( + OnlineKey = "ONLINE:" + OnlineChannel = "online_change" + OnlineExpire = time.Hour / 2 +) + +func GetOnlineKey(userID string) string { + return OnlineKey + userID +} diff --git a/pkg/common/storage/cache/cachekey/user.go b/pkg/common/storage/cache/cachekey/user.go index 7d06d4f75..473ca1b12 100644 --- a/pkg/common/storage/cache/cachekey/user.go +++ b/pkg/common/storage/cache/cachekey/user.go @@ -17,7 +17,6 @@ package cachekey const ( UserInfoKey = "USER_INFO:" UserGlobalRecvMsgOptKey = "USER_GLOBAL_RECV_MSG_OPT_KEY:" - olineStatusKey = "ONLINE_STATUS:" ) func GetUserInfoKey(userID string) string { @@ -27,7 +26,3 @@ func GetUserInfoKey(userID string) string { func GetUserGlobalRecvMsgOptKey(userID string) string { return UserGlobalRecvMsgOptKey + userID } - -func GetOnlineStatusKey(modKey string) string { - return olineStatusKey + modKey -} diff --git a/pkg/common/storage/cache/online.go b/pkg/common/storage/cache/online.go new file mode 100644 index 000000000..7669c8a11 --- /dev/null +++ b/pkg/common/storage/cache/online.go @@ -0,0 +1,8 @@ +package cache + +import "context" + +type OnlineCache interface { + GetOnline(ctx context.Context, userID string) ([]int32, error) + SetUserOnline(ctx context.Context, userID string, online, offline []int32) error +} diff --git a/pkg/common/storage/cache/redis/online.go b/pkg/common/storage/cache/redis/online.go new file mode 100644 index 000000000..dc6a5f775 --- /dev/null +++ b/pkg/common/storage/cache/redis/online.go @@ -0,0 +1,89 @@ +package redis + +import ( + "context" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey" + "github.com/openimsdk/tools/errs" + "github.com/redis/go-redis/v9" + "strconv" + "time" +) + +func NewUserOnline(rdb redis.UniversalClient) cache.OnlineCache { + return &userOnline{ + rdb: rdb, + expire: cachekey.OnlineExpire, + channelName: cachekey.OnlineChannel, + } +} + +type userOnline struct { + rdb redis.UniversalClient + expire time.Duration + channelName string +} + +func (s *userOnline) getUserOnlineKey(userID string) string { + return cachekey.GetOnlineKey(userID) +} + +func (s *userOnline) GetOnline(ctx context.Context, userID string) ([]int32, error) { + members, err := s.rdb.ZRangeByScore(ctx, s.getUserOnlineKey(userID), &redis.ZRangeBy{ + Min: strconv.FormatInt(time.Now().Unix(), 10), + Max: "+inf", + }).Result() + if err != nil { + return nil, errs.Wrap(err) + } + platformIDs := make([]int32, 0, len(members)) + for _, member := range members { + val, err := strconv.Atoi(member) + if err != nil { + return nil, errs.Wrap(err) + } + platformIDs = append(platformIDs, int32(val)) + } + return platformIDs, nil +} + +func (s *userOnline) SetUserOnline(ctx context.Context, userID string, online, offline []int32) error { + script := ` + local key = KEYS[1] + local score = ARGV[3] + local num1 = redis.call("ZCARD", key) + redis.call("ZREMRANGEBYSCORE", key, "-inf", ARGV[2]) + for i = 5, tonumber(ARGV[4])+4 do + redis.call("ZREM", key, ARGV[i]) + end + local num2 = redis.call("ZCARD", key) + for i = 5+tonumber(ARGV[4]), #ARGV do + redis.call("ZADD", key, score, ARGV[i]) + end + redis.call("EXPIRE", key, ARGV[1]) + local num3 = redis.call("ZCARD", key) + local change = (num1 ~= num2) or (num2 ~= num3) + if change then + local members = redis.call("ZRANGE", key, 0, -1) + table.insert(members, KEYS[2]) + redis.call("PUBLISH", KEYS[3], table.concat(members, ":")) + return 1 + else + return 0 + end +` + now := time.Now() + argv := make([]any, 0, 2+len(online)+len(offline)) + argv = append(argv, int32(s.expire/time.Second), now.Unix(), now.Add(s.expire).Unix(), int32(len(offline))) + for _, platformID := range offline { + argv = append(argv, platformID) + } + for _, platformID := range online { + argv = append(argv, platformID) + } + keys := []string{s.getUserOnlineKey(userID), userID, s.channelName} + if err := s.rdb.Eval(ctx, script, keys, argv).Err(); err != nil { + return err + } + return nil +} diff --git a/pkg/common/storage/cache/redis/seq_user_test.go b/pkg/common/storage/cache/redis/seq_user_test.go new file mode 100644 index 000000000..e4fd95922 --- /dev/null +++ b/pkg/common/storage/cache/redis/seq_user_test.go @@ -0,0 +1,79 @@ +package redis + +import ( + "context" + "fmt" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey" + "github.com/redis/go-redis/v9" + "log" + "strconv" + "sync/atomic" + "testing" + "time" +) + +func newTestOnline() *userOnline { + opt := &redis.Options{ + Addr: "172.16.8.48:16379", + Password: "openIM123", + DB: 0, + } + rdb := redis.NewClient(opt) + if err := rdb.Ping(context.Background()).Err(); err != nil { + panic(err) + } + return &userOnline{rdb: rdb, expire: time.Hour, channelName: "user_online"} +} + +func TestOnline(t *testing.T) { + ts := newTestOnline() + var count atomic.Int64 + for i := 0; i < 64; i++ { + go func(userID string) { + var err error + for i := 0; ; i++ { + if i%2 == 0 { + err = ts.SetUserOnline(context.Background(), userID, []int32{5, 6}, []int32{7, 8, 9}) + } else { + err = ts.SetUserOnline(context.Background(), userID, []int32{1, 2, 3}, []int32{4, 5, 6}) + } + if err != nil { + panic(err) + } + count.Add(1) + } + }(strconv.Itoa(10000 + i)) + } + + ticker := time.NewTicker(time.Second) + for range ticker.C { + t.Log(count.Swap(0)) + } +} + +func TestGetOnline(t *testing.T) { + ts := newTestOnline() + ctx := context.Background() + pIDs, err := ts.GetOnline(ctx, "10000") + if err != nil { + panic(err) + } + t.Log(pIDs) +} + +func TestRecvOnline(t *testing.T) { + ts := newTestOnline() + ctx := context.Background() + pubsub := ts.rdb.Subscribe(ctx, cachekey.OnlineChannel) + + _, err := pubsub.Receive(ctx) + if err != nil { + log.Fatalf("Could not subscribe: %v", err) + } + + ch := pubsub.Channel() + + for msg := range ch { + fmt.Printf("Received message from channel %s: %s\n", msg.Channel, msg.Payload) + } +} diff --git a/pkg/common/storage/cache/redis/user.go b/pkg/common/storage/cache/redis/user.go index 3de01563b..c3accd2c3 100644 --- a/pkg/common/storage/cache/redis/user.go +++ b/pkg/common/storage/cache/redis/user.go @@ -17,7 +17,6 @@ package redis import ( "context" "encoding/json" - "errors" "github.com/dtm-labs/rockscache" "github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" @@ -29,8 +28,6 @@ import ( "github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/log" "github.com/redis/go-redis/v9" - "hash/crc32" - "strconv" "time" ) @@ -61,9 +58,9 @@ func NewUserCacheRedis(rdb redis.UniversalClient, localCache *config.LocalCache, } } -func (u *UserCacheRedis) getOnlineStatusKey(modKey string) string { - return cachekey.GetOnlineStatusKey(modKey) -} +//func (u *UserCacheRedis) getOnlineStatusKey(modKey string) string { +// return cachekey.GetOnlineStatusKey(modKey) +//} func (u *UserCacheRedis) CloneUserCache() cache.UserCache { return &UserCacheRedis{ @@ -131,97 +128,6 @@ func (u *UserCacheRedis) DelUsersGlobalRecvMsgOpt(userIDs ...string) cache.UserC return cache } -// GetUserStatus get user status. -func (u *UserCacheRedis) GetUserStatus(ctx context.Context, userIDs []string) ([]*user.OnlineStatus, error) { - userStatus := make([]*user.OnlineStatus, 0, len(userIDs)) - for _, userID := range userIDs { - UserIDNum := crc32.ChecksumIEEE([]byte(userID)) - modKey := strconv.Itoa(int(UserIDNum % statusMod)) - var onlineStatus user.OnlineStatus - key := u.getOnlineStatusKey(modKey) - result, err := u.rdb.HGet(ctx, key, userID).Result() - if err != nil { - if errors.Is(err, redis.Nil) { - // key or field does not exist - userStatus = append(userStatus, &user.OnlineStatus{ - UserID: userID, - Status: constant.Offline, - PlatformIDs: nil, - }) - - continue - } else { - return nil, errs.Wrap(err) - } - } - err = json.Unmarshal([]byte(result), &onlineStatus) - if err != nil { - return nil, errs.Wrap(err) - } - onlineStatus.UserID = userID - onlineStatus.Status = constant.Online - userStatus = append(userStatus, &onlineStatus) - } - - return userStatus, nil -} - -// SetUserStatus Set the user status and save it in redis. -func (u *UserCacheRedis) SetUserStatus(ctx context.Context, userID string, status, platformID int32) error { - UserIDNum := crc32.ChecksumIEEE([]byte(userID)) - modKey := strconv.Itoa(int(UserIDNum % statusMod)) - key := u.getOnlineStatusKey(modKey) - log.ZDebug(ctx, "SetUserStatus args", "userID", userID, "status", status, "platformID", platformID, "modKey", modKey, "key", key) - isNewKey, err := u.rdb.Exists(ctx, key).Result() - if err != nil { - return errs.Wrap(err) - } - if isNewKey == 0 { - if status == constant.Online { - onlineStatus := user.OnlineStatus{ - UserID: userID, - Status: constant.Online, - PlatformIDs: []int32{platformID}, - } - jsonData, err := json.Marshal(&onlineStatus) - if err != nil { - return errs.Wrap(err) - } - _, err = u.rdb.HSet(ctx, key, userID, string(jsonData)).Result() - if err != nil { - return errs.Wrap(err) - } - u.rdb.Expire(ctx, key, userOlineStatusExpireTime) - - return nil - } - } - - isNil := false - result, err := u.rdb.HGet(ctx, key, userID).Result() - if err != nil { - if errors.Is(err, redis.Nil) { - isNil = true - } else { - return errs.Wrap(err) - } - } - - if status == constant.Offline { - err = u.refreshStatusOffline(ctx, userID, status, platformID, isNil, err, result, key) - if err != nil { - return err - } - } else { - err = u.refreshStatusOnline(ctx, userID, platformID, isNil, err, result, key) - if err != nil { - return errs.Wrap(err) - } - } - - return nil -} - func (u *UserCacheRedis) refreshStatusOffline(ctx context.Context, userID string, status, platformID int32, isNil bool, err error, result, key string) error { if isNil { log.ZWarn(ctx, "this user not online,maybe trigger order not right", diff --git a/pkg/common/storage/cache/user.go b/pkg/common/storage/cache/user.go index 5101c0b6c..69a11635c 100644 --- a/pkg/common/storage/cache/user.go +++ b/pkg/common/storage/cache/user.go @@ -17,7 +17,6 @@ package cache import ( "context" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" - "github.com/openimsdk/protocol/user" ) type UserCache interface { @@ -28,6 +27,6 @@ type UserCache interface { DelUsersInfo(userIDs ...string) UserCache GetUserGlobalRecvMsgOpt(ctx context.Context, userID string) (opt int, err error) DelUsersGlobalRecvMsgOpt(userIDs ...string) UserCache - GetUserStatus(ctx context.Context, userIDs []string) ([]*user.OnlineStatus, error) - SetUserStatus(ctx context.Context, userID string, status, platformID int32) error + //GetUserStatus(ctx context.Context, userIDs []string) ([]*user.OnlineStatus, error) + //SetUserStatus(ctx context.Context, userID string, status, platformID int32) error } diff --git a/pkg/common/storage/controller/user.go b/pkg/common/storage/controller/user.go index 9efe535c0..321eff03c 100644 --- a/pkg/common/storage/controller/user.go +++ b/pkg/common/storage/controller/user.go @@ -70,10 +70,6 @@ type UserDatabase interface { GetAllSubscribeList(ctx context.Context, userID string) ([]string, error) // GetSubscribedList Get all subscribed lists GetSubscribedList(ctx context.Context, userID string) ([]string, error) - // GetUserStatus Get the online status of the user - GetUserStatus(ctx context.Context, userIDs []string) ([]*user.OnlineStatus, error) - // SetUserStatus Set the user status and store the user status in redis - SetUserStatus(ctx context.Context, userID string, status, platformID int32) error // CRUD user command AddUserCommand(ctx context.Context, userID string, Type int32, UUID string, value string, ex string) error @@ -246,17 +242,6 @@ func (u *userDatabase) GetSubscribedList(ctx context.Context, userID string) ([] return list, nil } -// GetUserStatus get user status. -func (u *userDatabase) GetUserStatus(ctx context.Context, userIDs []string) ([]*user.OnlineStatus, error) { - onlineStatusList, err := u.cache.GetUserStatus(ctx, userIDs) - return onlineStatusList, err -} - -// SetUserStatus Set the user status and save it in redis. -func (u *userDatabase) SetUserStatus(ctx context.Context, userID string, status, platformID int32) error { - return u.cache.SetUserStatus(ctx, userID, status, platformID) -} - func (u *userDatabase) AddUserCommand(ctx context.Context, userID string, Type int32, UUID string, value string, ex string) error { return u.userDB.AddUserCommand(ctx, userID, Type, UUID, value, ex) } diff --git a/pkg/localcache/cache.go b/pkg/localcache/cache.go index 0e040ad38..ba849f892 100644 --- a/pkg/localcache/cache.go +++ b/pkg/localcache/cache.go @@ -31,6 +31,12 @@ type Cache[V any] interface { Stop() } +func LRUStringHash(key string) uint64 { + h := fnv.New64a() + h.Write(*(*[]byte)(unsafe.Pointer(&key))) + return h.Sum64() +} + func New[V any](opts ...Option) Cache[V] { opt := defaultOption() for _, o := range opts { @@ -49,11 +55,7 @@ func New[V any](opts ...Option) Cache[V] { if opt.localSlotNum == 1 { c.local = createSimpleLRU() } else { - c.local = lru.NewSlotLRU[string, V](opt.localSlotNum, func(key string) uint64 { - h := fnv.New64a() - h.Write(*(*[]byte)(unsafe.Pointer(&key))) - return h.Sum64() - }, createSimpleLRU) + c.local = lru.NewSlotLRU[string, V](opt.localSlotNum, LRUStringHash, createSimpleLRU) } if opt.linkSlotNum > 0 { c.link = link.New(opt.linkSlotNum) diff --git a/pkg/localcache/lru/lru.go b/pkg/localcache/lru/lru.go index 64280f238..2fedffc48 100644 --- a/pkg/localcache/lru/lru.go +++ b/pkg/localcache/lru/lru.go @@ -20,6 +20,7 @@ type EvictCallback[K comparable, V any] simplelru.EvictCallback[K, V] type LRU[K comparable, V any] interface { Get(key K, fetch func() (V, error)) (V, error) + SetHas(key K, value V) bool Del(key K) bool Stop() } diff --git a/pkg/localcache/lru/lru_expiration.go b/pkg/localcache/lru/lru_expiration.go index 970ac083e..d27e67057 100644 --- a/pkg/localcache/lru/lru_expiration.go +++ b/pkg/localcache/lru/lru_expiration.go @@ -89,5 +89,15 @@ func (x *ExpirationLRU[K, V]) Del(key K) bool { return ok } +func (x *ExpirationLRU[K, V]) SetHas(key K, value V) bool { + x.lock.Lock() + defer x.lock.Unlock() + if x.core.Contains(key) { + x.core.Add(key, &expirationLruItem[V]{value: value}) + return true + } + return false +} + func (x *ExpirationLRU[K, V]) Stop() { } diff --git a/pkg/localcache/lru/lru_lazy.go b/pkg/localcache/lru/lru_lazy.go index d6e64aae4..e935c687c 100644 --- a/pkg/localcache/lru/lru_lazy.go +++ b/pkg/localcache/lru/lru_lazy.go @@ -88,6 +88,28 @@ func (x *LayLRU[K, V]) Get(key K, fetch func() (V, error)) (V, error) { return v.value, v.err } +//func (x *LayLRU[K, V]) Set(key K, value V) { +// x.lock.Lock() +// x.core.Add(key, &layLruItem[V]{value: value, expires: time.Now().Add(x.successTTL).UnixMilli()}) +// x.lock.Unlock() +//} +// +//func (x *LayLRU[K, V]) Has(key K) bool { +// x.lock.Lock() +// defer x.lock.Unlock() +// return x.core.Contains(key) +//} + +func (x *LayLRU[K, V]) SetHas(key K, value V) bool { + x.lock.Lock() + defer x.lock.Unlock() + if x.core.Contains(key) { + x.core.Add(key, &layLruItem[V]{value: value, expires: time.Now().Add(x.successTTL).UnixMilli()}) + return true + } + return false +} + func (x *LayLRU[K, V]) Del(key K) bool { x.lock.Lock() ok := x.core.Remove(key) diff --git a/pkg/localcache/lru/lru_slot.go b/pkg/localcache/lru/lru_slot.go index d034e94d3..4538ca20e 100644 --- a/pkg/localcache/lru/lru_slot.go +++ b/pkg/localcache/lru/lru_slot.go @@ -40,6 +40,10 @@ func (x *slotLRU[K, V]) Get(key K, fetch func() (V, error)) (V, error) { return x.slots[x.getIndex(key)].Get(key, fetch) } +func (x *slotLRU[K, V]) SetHas(key K, value V) bool { + return x.slots[x.getIndex(key)].SetHas(key, value) +} + func (x *slotLRU[K, V]) Del(key K) bool { return x.slots[x.getIndex(key)].Del(key) } diff --git a/pkg/localcache/option.go b/pkg/localcache/option.go index 00bb9d044..7d91aba6c 100644 --- a/pkg/localcache/option.go +++ b/pkg/localcache/option.go @@ -30,7 +30,7 @@ func defaultOption() *option { localSuccessTTL: time.Minute, localFailedTTL: time.Second * 5, delFn: make([]func(ctx context.Context, key ...string), 0, 2), - target: emptyTarget{}, + target: EmptyTarget{}, } } @@ -123,14 +123,14 @@ func WithDeleteKeyBefore(fn func(ctx context.Context, key ...string)) Option { } } -type emptyTarget struct{} +type EmptyTarget struct{} -func (e emptyTarget) IncrGetHit() {} +func (e EmptyTarget) IncrGetHit() {} -func (e emptyTarget) IncrGetSuccess() {} +func (e EmptyTarget) IncrGetSuccess() {} -func (e emptyTarget) IncrGetFailed() {} +func (e EmptyTarget) IncrGetFailed() {} -func (e emptyTarget) IncrDelHit() {} +func (e EmptyTarget) IncrDelHit() {} -func (e emptyTarget) IncrDelNotFound() {} +func (e EmptyTarget) IncrDelNotFound() {} diff --git a/pkg/rpccache/online.go b/pkg/rpccache/online.go new file mode 100644 index 000000000..5db68d198 --- /dev/null +++ b/pkg/rpccache/online.go @@ -0,0 +1,100 @@ +package rpccache + +import ( + "context" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey" + "github.com/openimsdk/open-im-server/v3/pkg/localcache" + "github.com/openimsdk/open-im-server/v3/pkg/localcache/lru" + "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" + "github.com/openimsdk/open-im-server/v3/pkg/util/useronline" + "github.com/openimsdk/tools/log" + "github.com/openimsdk/tools/mcontext" + "github.com/redis/go-redis/v9" + "math/rand" + "strconv" + "time" +) + +func NewOnlineCache(user rpcclient.UserRpcClient, group *GroupLocalCache, rdb redis.UniversalClient, fn func(ctx context.Context, userID string, platformIDs []int32)) *OnlineCache { + x := &OnlineCache{ + user: user, + group: group, + local: lru.NewSlotLRU(1024, localcache.LRUStringHash, func() lru.LRU[string, []int32] { + return lru.NewLayLRU[string, []int32](2048, cachekey.OnlineExpire/2, time.Second*3, localcache.EmptyTarget{}, func(key string, value []int32) {}) + }), + } + go func() { + ctx := mcontext.SetOperationID(context.Background(), cachekey.OnlineChannel+strconv.FormatUint(rand.Uint64(), 10)) + for message := range rdb.Subscribe(ctx, cachekey.OnlineChannel).Channel() { + userID, platformIDs, err := useronline.ParseUserOnlineStatus(message.Payload) + if err != nil { + log.ZError(ctx, "OnlineCache redis subscribe parseUserOnlineStatus", err, "payload", message.Payload, "channel", message.Channel) + continue + } + storageCache := x.setUserOnline(userID, platformIDs) + log.ZDebug(ctx, "OnlineCache setUserOnline", "userID", userID, "platformIDs", platformIDs, "payload", message.Payload, "storageCache", storageCache) + if fn != nil { + fn(ctx, userID, platformIDs) + } + } + }() + return x +} + +type OnlineCache struct { + user rpcclient.UserRpcClient + group *GroupLocalCache + local lru.LRU[string, []int32] +} + +func (o *OnlineCache) GetUserOnlinePlatform(ctx context.Context, userID string) ([]int32, error) { + return o.local.Get(userID, func() ([]int32, error) { + return o.user.GetUserOnlinePlatform(ctx, userID) + }) +} + +func (o *OnlineCache) GetUserOnline(ctx context.Context, userID string) (bool, error) { + platformIDs, err := o.GetUserOnlinePlatform(ctx, userID) + if err != nil { + return false, err + } + return len(platformIDs) > 0, nil +} + +func (o *OnlineCache) GetUsersOnline(ctx context.Context, userIDs []string) ([]string, error) { + onlineUserIDs := make([]string, 0, len(userIDs)) + for _, userID := range userIDs { + online, err := o.GetUserOnline(ctx, userID) + if err != nil { + return nil, err + } + if online { + onlineUserIDs = append(onlineUserIDs, userID) + } + } + log.ZDebug(ctx, "OnlineCache GetUsersOnline", "userIDs", userIDs, "onlineUserIDs", onlineUserIDs) + return onlineUserIDs, nil +} + +func (o *OnlineCache) GetGroupOnline(ctx context.Context, groupID string) ([]string, error) { + userIDs, err := o.group.GetGroupMemberIDs(ctx, groupID) + if err != nil { + return nil, err + } + var onlineUserIDs []string + for _, userID := range userIDs { + online, err := o.GetUserOnline(ctx, userID) + if err != nil { + return nil, err + } + if online { + onlineUserIDs = append(onlineUserIDs, userID) + } + } + log.ZDebug(ctx, "OnlineCache GetGroupOnline", "groupID", groupID, "onlineUserIDs", onlineUserIDs, "allUserID", userIDs) + return onlineUserIDs, nil +} + +func (o *OnlineCache) setUserOnline(userID string, platformIDs []int32) bool { + return o.local.SetHas(userID, platformIDs) +} diff --git a/pkg/rpccache/user.go b/pkg/rpccache/user.go index 25a8eb20d..6126f5891 100644 --- a/pkg/rpccache/user.go +++ b/pkg/rpccache/user.go @@ -110,3 +110,18 @@ func (u *UserLocalCache) GetUsersInfoMap(ctx context.Context, userIDs []string) } return users, nil } + +//func (u *UserLocalCache) GetUserOnlinePlatform(ctx context.Context, userID string) (val []int32, err error) { +// log.ZDebug(ctx, "UserLocalCache GetUserOnlinePlatform req", "userID", userID) +// defer func() { +// if err == nil { +// log.ZDebug(ctx, "UserLocalCache GetUserOnlinePlatform return", "value", val) +// } else { +// log.ZError(ctx, "UserLocalCache GetUserOnlinePlatform return", err) +// } +// }() +// return localcache.AnyValue[[]int32](u.local.Get(ctx, cachekey.GetOnlineKey(userID), func(ctx context.Context) (any, error) { +// log.ZDebug(ctx, "UserLocalCache GetUserGlobalMsgRecvOpt rpc", "userID", userID) +// return u.client.GetUserGlobalMsgRecvOpt(ctx, userID) +// })) +//} diff --git a/pkg/rpcclient/user.go b/pkg/rpcclient/user.go index aab96603e..eabe77b94 100644 --- a/pkg/rpcclient/user.go +++ b/pkg/rpcclient/user.go @@ -193,3 +193,25 @@ func (u *UserRpcClient) GetNotificationByID(ctx context.Context, userID string) }) return err } + +func (u *UserRpcClient) GetUsersOnlinePlatform(ctx context.Context, userIDs []string) ([]*user.OnlineStatus, error) { + if len(userIDs) == 0 { + return nil, nil + } + resp, err := u.Client.GetUserStatus(ctx, &user.GetUserStatusReq{UserIDs: userIDs, UserID: u.imAdminUserID[0]}) + if err != nil { + return nil, err + } + return resp.StatusList, nil +} + +func (u *UserRpcClient) GetUserOnlinePlatform(ctx context.Context, userID string) ([]int32, error) { + resp, err := u.GetUsersOnlinePlatform(ctx, []string{userID}) + if err != nil { + return nil, err + } + if len(resp) == 0 { + return nil, nil + } + return resp[0].PlatformIDs, nil +} diff --git a/pkg/util/useronline/split.go b/pkg/util/useronline/split.go new file mode 100644 index 000000000..c39d31d15 --- /dev/null +++ b/pkg/util/useronline/split.go @@ -0,0 +1,27 @@ +package useronline + +import ( + "errors" + "strconv" + "strings" +) + +func ParseUserOnlineStatus(payload string) (string, []int32, error) { + arr := strings.Split(payload, ":") + if len(arr) == 0 { + return "", nil, errors.New("invalid data") + } + userID := arr[len(arr)-1] + if userID == "" { + return "", nil, errors.New("userID is empty") + } + platformIDs := make([]int32, len(arr)-1) + for i := range platformIDs { + platformID, err := strconv.Atoi(arr[i]) + if err != nil { + return "", nil, err + } + platformIDs[i] = int32(platformID) + } + return userID, platformIDs, nil +}