From 8f86049599afe62a85a612ac85dee72dcad583d8 Mon Sep 17 00:00:00 2001 From: withchao <993506633@qq.com> Date: Fri, 28 Jun 2024 16:18:21 +0800 Subject: [PATCH] user online --- go.mod | 2 +- go.sum | 4 +- internal/msggateway/init.go | 7 +- internal/msggateway/n_ws_server.go | 42 +-- internal/msggateway/online.go | 106 ++++++++ internal/msggateway/user_map.go | 239 +++++++++--------- internal/msggateway/user_map2.go | 208 ++++++++++++--- internal/rpc/user/online.go | 122 +++++++++ internal/rpc/user/user.go | 73 +----- internal/tools/cron_task.go | 6 + pkg/common/config/config.go | 1 + pkg/common/storage/cache/cachekey/online.go | 13 + pkg/common/storage/cache/cachekey/user.go | 5 - pkg/common/storage/cache/online.go | 8 + pkg/common/storage/cache/redis/online.go | 33 ++- .../storage/cache/redis/seq_user_test.go | 53 ++-- pkg/common/storage/cache/redis/user.go | 183 +++++++------- pkg/common/storage/cache/user.go | 5 +- pkg/common/storage/controller/user.go | 15 -- 19 files changed, 740 insertions(+), 385 deletions(-) create mode 100644 internal/msggateway/online.go create mode 100644 internal/rpc/user/online.go create mode 100644 pkg/common/storage/cache/cachekey/online.go create mode 100644 pkg/common/storage/cache/online.go diff --git a/go.mod b/go.mod index 2614e0f32..e7ed446f4 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/lestrrat-go/file-rotatelogs v2.4.0+incompatible // indirect github.com/mitchellh/mapstructure v1.5.0 - github.com/openimsdk/protocol v0.0.69-alpha.17 + github.com/openimsdk/protocol v0.0.69-alpha.20 github.com/openimsdk/tools v0.0.49-alpha.25 github.com/pkg/errors v0.9.1 // indirect github.com/prometheus/client_golang v1.18.0 diff --git a/go.sum b/go.sum index fe4f0c390..adde3aa82 100644 --- a/go.sum +++ b/go.sum @@ -270,8 +270,8 @@ github.com/onsi/gomega v1.25.0 h1:Vw7br2PCDYijJHSfBOWhov+8cAnUf8MfMaIOV323l6Y= github.com/onsi/gomega v1.25.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM= github.com/openimsdk/gomake v0.0.13 h1:xLDe/moqgWpRoptHzI4packAWzs4C16b+sVY+txNJp0= github.com/openimsdk/gomake v0.0.13/go.mod h1:PndCozNc2IsQIciyn9mvEblYWZwJmAI+06z94EY+csI= -github.com/openimsdk/protocol v0.0.69-alpha.17 h1:pEag4ZdlovE+AyLsw1VYFU/3sk6ayvGdPzgufQfKf9M= -github.com/openimsdk/protocol v0.0.69-alpha.17/go.mod h1:OZQA9FR55lseYoN2Ql1XAHYKHJGu7OMNkUbuekrKCM8= +github.com/openimsdk/protocol v0.0.69-alpha.20 h1:skZu82sqoMhiQVEZgrRsjcfI3Grp1IpThx1LJPqETWs= +github.com/openimsdk/protocol v0.0.69-alpha.20/go.mod h1:OZQA9FR55lseYoN2Ql1XAHYKHJGu7OMNkUbuekrKCM8= github.com/openimsdk/tools v0.0.49-alpha.25 h1:OpRPwDZ2xWX7Zj5kyfZhryu/NfZTrsRVr2GFwu1HQHI= github.com/openimsdk/tools v0.0.49-alpha.25/go.mod h1:rwsFI1G/nBHNfiNapbven41akRDPBbH4df0Cgy6xueU= github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= diff --git a/internal/msggateway/init.go b/internal/msggateway/init.go index f4d8b0381..815ec8ca6 100644 --- a/internal/msggateway/init.go +++ b/internal/msggateway/init.go @@ -42,16 +42,15 @@ func Start(ctx context.Context, index int, conf *Config) error { if err != nil { return err } - longServer, err := NewWsServer( + longServer := NewWsServer( conf, WithPort(wsPort), WithMaxConnNum(int64(conf.MsgGateway.LongConnSvr.WebsocketMaxConnNum)), WithHandshakeTimeout(time.Duration(conf.MsgGateway.LongConnSvr.WebsocketTimeout)*time.Second), WithMessageMaxMsgLength(conf.MsgGateway.LongConnSvr.WebsocketMaxMsgLen), ) - if err != nil { - return err - } + + go longServer.ChangeOnlineStatus(4) hubServer := NewServer(rpcPort, longServer, conf) netDone := make(chan error) diff --git a/internal/msggateway/n_ws_server.go b/internal/msggateway/n_ws_server.go index 17e550fef..ff93ce6bc 100644 --- a/internal/msggateway/n_ws_server.go +++ b/internal/msggateway/n_ws_server.go @@ -60,7 +60,7 @@ type WsServer struct { registerChan chan *Client unregisterChan chan *Client kickHandlerChan chan *kickHandler - clients *UserMap + clients UMap clientPool sync.Pool onlineUserNum atomic.Int64 onlineUserConnNum atomic.Int64 @@ -90,18 +90,18 @@ func (ws *WsServer) SetDiscoveryRegistry(disCov discovery.SvcDiscoveryRegistry, ws.disCov = disCov } -func (ws *WsServer) SetUserOnlineStatus(ctx context.Context, client *Client, status int32) { - err := ws.userClient.SetUserStatus(ctx, client.UserID, status, client.PlatformID) - if err != nil { - log.ZWarn(ctx, "SetUserStatus err", err) - } - switch status { - case constant.Online: - ws.webhookAfterUserOnline(ctx, &ws.msgGatewayConfig.WebhooksConfig.AfterUserOnline, client.UserID, client.PlatformID, client.IsBackground, client.ctx.GetConnID()) - case constant.Offline: - ws.webhookAfterUserOffline(ctx, &ws.msgGatewayConfig.WebhooksConfig.AfterUserOffline, client.UserID, client.PlatformID, client.ctx.GetConnID()) - } -} +//func (ws *WsServer) SetUserOnlineStatus(ctx context.Context, client *Client, status int32) { +// err := ws.userClient.SetUserStatus(ctx, client.UserID, status, client.PlatformID) +// if err != nil { +// log.ZWarn(ctx, "SetUserStatus err", err) +// } +// switch status { +// case constant.Online: +// ws.webhookAfterUserOnline(ctx, &ws.msgGatewayConfig.WebhooksConfig.AfterUserOnline, client.UserID, client.PlatformID, client.IsBackground, client.ctx.GetConnID()) +// case constant.Offline: +// ws.webhookAfterUserOffline(ctx, &ws.msgGatewayConfig.WebhooksConfig.AfterUserOffline, client.UserID, client.PlatformID, client.ctx.GetConnID()) +// } +//} func (ws *WsServer) UnRegister(c *Client) { ws.unregisterChan <- c @@ -119,7 +119,7 @@ func (ws *WsServer) GetUserPlatformCons(userID string, platform int) ([]*Client, return ws.clients.Get(userID, platform) } -func NewWsServer(msgGatewayConfig *Config, opts ...Option) (*WsServer, error) { +func NewWsServer(msgGatewayConfig *Config, opts ...Option) *WsServer { var config configs for _, o := range opts { o(&config) @@ -144,7 +144,7 @@ func NewWsServer(msgGatewayConfig *Config, opts ...Option) (*WsServer, error) { Compressor: NewGzipCompressor(), Encoder: NewGobEncoder(), webhookClient: webhook.NewWebhookClient(msgGatewayConfig.WebhooksConfig.URL), - }, nil + } } func (ws *WsServer) Run(done chan error) error { @@ -278,11 +278,11 @@ func (ws *WsServer) registerClient(client *Client) { }() } - wg.Add(1) - go func() { - defer wg.Done() - ws.SetUserOnlineStatus(client.ctx, client, constant.Online) - }() + //wg.Add(1) + //go func() { + // defer wg.Done() + // ws.SetUserOnlineStatus(client.ctx, client, constant.Online) + //}() wg.Wait() @@ -351,7 +351,7 @@ func (ws *WsServer) unregisterClient(client *Client) { prommetrics.OnlineUserGauge.Dec() } ws.onlineUserConnNum.Add(-1) - ws.SetUserOnlineStatus(client.ctx, client, constant.Offline) + //ws.SetUserOnlineStatus(client.ctx, client, constant.Offline) log.ZInfo(client.ctx, "user offline", "close reason", client.closedErr, "online user Num", ws.onlineUserNum.Load(), "online user conn Num", ws.onlineUserConnNum.Load(), diff --git a/internal/msggateway/online.go b/internal/msggateway/online.go new file mode 100644 index 000000000..611ecbd79 --- /dev/null +++ b/internal/msggateway/online.go @@ -0,0 +1,106 @@ +package msggateway + +import ( + "context" + "crypto/md5" + "encoding/binary" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey" + pbuser "github.com/openimsdk/protocol/user" + "github.com/openimsdk/tools/log" + "github.com/openimsdk/tools/mcontext" + "github.com/openimsdk/tools/utils/datautil" + "math/rand" + "strconv" + "time" +) + +func (ws *WsServer) ChangeOnlineStatus(concurrent int) { + if concurrent < 1 { + concurrent = 1 + } + scanTicker := time.NewTicker(time.Minute * 3) + + requestChs := make([]chan *pbuser.SetUserOnlineStatusReq, concurrent) + changeStatus := make([][]UserState, concurrent) + + for i := 0; i < concurrent; i++ { + requestChs[i] = make(chan *pbuser.SetUserOnlineStatusReq, 64) + changeStatus[i] = make([]UserState, 100) + } + + mergeTicker := time.NewTicker(time.Second) + + local2pb := func(u UserState) *pbuser.UserOnlineStatus { + return &pbuser.UserOnlineStatus{ + UserID: u.UserID, + Online: u.Online, + Offline: u.Offline, + } + } + + rNum := rand.Uint64() + pushUserState := func(us ...UserState) { + for _, u := range us { + sum := md5.Sum([]byte(u.UserID)) + i := (binary.BigEndian.Uint64(sum[:]) + rNum) % uint64(concurrent) + changeStatus[i] = append(changeStatus[i], u) + status := changeStatus[i] + if len(status) == cap(status) { + req := &pbuser.SetUserOnlineStatusReq{ + Status: datautil.Slice(status, local2pb), + } + changeStatus[i] = status[0:] + select { + case requestChs[i] <- req: + default: + log.ZError(context.Background(), "user online processing is too slow", nil) + } + } + } + } + + pushAllUserState := func() { + for i, status := range changeStatus { + if len(status) == 0 { + continue + } + req := &pbuser.SetUserOnlineStatusReq{ + Status: datautil.Slice(status, local2pb), + } + changeStatus[i] = status[0:] + select { + case requestChs[i] <- req: + default: + log.ZError(context.Background(), "user online processing is too slow", nil) + } + } + } + + opIdCtx := mcontext.SetOperationID(context.Background(), "r"+strconv.FormatUint(rNum, 10)) + doRequest := func(req *pbuser.SetUserOnlineStatusReq) { + ctx, cancel := context.WithTimeout(opIdCtx, time.Second*5) + defer cancel() + if _, err := ws.userClient.Client.SetUserOnlineStatus(ctx, req); err != nil { + log.ZError(ctx, "update user online status", err) + } + } + + for i := 0; i < concurrent; i++ { + go func(ch <-chan *pbuser.SetUserOnlineStatusReq) { + for req := range ch { + doRequest(req) + } + }(requestChs[i]) + } + + for { + select { + case <-mergeTicker.C: + pushAllUserState() + case now := <-scanTicker.C: + pushUserState(ws.clients.GetAllUserStatus(now.Add(-cachekey.OnlineExpire/3), now)...) + case state := <-ws.clients.UserState(): + pushUserState(state) + } + } +} diff --git a/internal/msggateway/user_map.go b/internal/msggateway/user_map.go index f8bf69f9a..dcab066f8 100644 --- a/internal/msggateway/user_map.go +++ b/internal/msggateway/user_map.go @@ -14,122 +14,123 @@ package msggateway -import ( - "context" - "sync" - - "github.com/openimsdk/tools/log" - "github.com/openimsdk/tools/utils/datautil" -) - -type UserMap struct { - m sync.Map -} - -func newUserMap() *UserMap { - return &UserMap{} -} - -func (u *UserMap) GetAll(key string) ([]*Client, bool) { - allClients, ok := u.m.Load(key) - if ok { - return allClients.([]*Client), ok - } - return nil, ok -} - -func (u *UserMap) Get(key string, platformID int) ([]*Client, bool, bool) { - allClients, userExisted := u.m.Load(key) - if userExisted { - var clients []*Client - for _, client := range allClients.([]*Client) { - if client.PlatformID == platformID { - clients = append(clients, client) - } - } - if len(clients) > 0 { - return clients, userExisted, true - } - return clients, userExisted, false - } - return nil, userExisted, false -} - -// Set adds a client to the map. -func (u *UserMap) Set(key string, v *Client) { - allClients, existed := u.m.Load(key) - if existed { - log.ZDebug(context.Background(), "Set existed", "user_id", key, "client_user_id", v.UserID) - oldClients := allClients.([]*Client) - oldClients = append(oldClients, v) - u.m.Store(key, oldClients) - } else { - log.ZDebug(context.Background(), "Set not existed", "user_id", key, "client_user_id", v.UserID) - - var clients []*Client - clients = append(clients, v) - u.m.Store(key, clients) - } -} - -func (u *UserMap) Delete(key string, connRemoteAddr string) (isDeleteUser bool) { - // Attempt to load the clients associated with the key. - allClients, existed := u.m.Load(key) - if !existed { - // Return false immediately if the key does not exist. - return false - } - - // Convert allClients to a slice of *Client. - oldClients := allClients.([]*Client) - var remainingClients []*Client - for _, client := range oldClients { - // Keep clients that do not match the connRemoteAddr. - if client.ctx.GetRemoteAddr() != connRemoteAddr { - remainingClients = append(remainingClients, client) - } - } - - // If no clients remain after filtering, delete the key from the map. - if len(remainingClients) == 0 { - u.m.Delete(key) - return true - } - - // Otherwise, update the key with the remaining clients. - u.m.Store(key, remainingClients) - return false -} - -func (u *UserMap) DeleteClients(key string, clients []*Client) (isDeleteUser bool) { - m := datautil.SliceToMapAny(clients, func(c *Client) (string, struct{}) { - return c.ctx.GetRemoteAddr(), struct{}{} - }) - allClients, existed := u.m.Load(key) - if !existed { - // If the key doesn't exist, return false. - return false - } - - // Filter out clients that are in the deleteMap. - oldClients := allClients.([]*Client) - var remainingClients []*Client - for _, client := range oldClients { - if _, shouldBeDeleted := m[client.ctx.GetRemoteAddr()]; !shouldBeDeleted { - remainingClients = append(remainingClients, client) - } - } - - // Update or delete the key based on the remaining clients. - if len(remainingClients) == 0 { - u.m.Delete(key) - return true - } - - u.m.Store(key, remainingClients) - return false -} - -func (u *UserMap) DeleteAll(key string) { - u.m.Delete(key) -} +// +//import ( +// "context" +// "sync" +// +// "github.com/openimsdk/tools/log" +// "github.com/openimsdk/tools/utils/datautil" +//) +// +//type UserMap struct { +// m sync.Map +//} +// +//func newUserMap() UMap { +// return &UserMap{} +//} +// +//func (u *UserMap) GetAll(key string) ([]*Client, bool) { +// allClients, ok := u.m.Load(key) +// if ok { +// return allClients.([]*Client), ok +// } +// return nil, ok +//} +// +//func (u *UserMap) Get(key string, platformID int) ([]*Client, bool, bool) { +// allClients, userExisted := u.m.Load(key) +// if userExisted { +// var clients []*Client +// for _, client := range allClients.([]*Client) { +// if client.PlatformID == platformID { +// clients = append(clients, client) +// } +// } +// if len(clients) > 0 { +// return clients, userExisted, true +// } +// return clients, userExisted, false +// } +// return nil, userExisted, false +//} +// +//// Set adds a client to the map. +//func (u *UserMap) Set(key string, v *Client) { +// allClients, existed := u.m.Load(key) +// if existed { +// log.ZDebug(context.Background(), "Set existed", "user_id", key, "client_user_id", v.UserID) +// oldClients := allClients.([]*Client) +// oldClients = append(oldClients, v) +// u.m.Store(key, oldClients) +// } else { +// log.ZDebug(context.Background(), "Set not existed", "user_id", key, "client_user_id", v.UserID) +// +// var clients []*Client +// clients = append(clients, v) +// u.m.Store(key, clients) +// } +//} +// +//func (u *UserMap) Delete(key string, connRemoteAddr string) (isDeleteUser bool) { +// // Attempt to load the clients associated with the key. +// allClients, existed := u.m.Load(key) +// if !existed { +// // Return false immediately if the key does not exist. +// return false +// } +// +// // Convert allClients to a slice of *Client. +// oldClients := allClients.([]*Client) +// var remainingClients []*Client +// for _, client := range oldClients { +// // Keep clients that do not match the connRemoteAddr. +// if client.ctx.GetRemoteAddr() != connRemoteAddr { +// remainingClients = append(remainingClients, client) +// } +// } +// +// // If no clients remain after filtering, delete the key from the map. +// if len(remainingClients) == 0 { +// u.m.Delete(key) +// return true +// } +// +// // Otherwise, update the key with the remaining clients. +// u.m.Store(key, remainingClients) +// return false +//} +// +//func (u *UserMap) DeleteClients(key string, clients []*Client) (isDeleteUser bool) { +// m := datautil.SliceToMapAny(clients, func(c *Client) (string, struct{}) { +// return c.ctx.GetRemoteAddr(), struct{}{} +// }) +// allClients, existed := u.m.Load(key) +// if !existed { +// // If the key doesn't exist, return false. +// return false +// } +// +// // Filter out clients that are in the deleteMap. +// oldClients := allClients.([]*Client) +// var remainingClients []*Client +// for _, client := range oldClients { +// if _, shouldBeDeleted := m[client.ctx.GetRemoteAddr()]; !shouldBeDeleted { +// remainingClients = append(remainingClients, client) +// } +// } +// +// // Update or delete the key based on the remaining clients. +// if len(remainingClients) == 0 { +// u.m.Delete(key) +// return true +// } +// +// u.m.Store(key, remainingClients) +// return false +//} +// +//func (u *UserMap) DeleteAll(key string) { +// u.m.Delete(key) +//} diff --git a/internal/msggateway/user_map2.go b/internal/msggateway/user_map2.go index 1c9a91c6a..b6ed40373 100644 --- a/internal/msggateway/user_map2.go +++ b/internal/msggateway/user_map2.go @@ -1,32 +1,180 @@ package msggateway -/* - -sorted set - -userID: 10000 - -USER_ONLINE:10000 - - - - - -platformID: 1 - - - - - -key score - -E1 123456789 -O1 234567895 - - - - - - - -*/ +import ( + "sync" + "time" +) + +type UMap interface { + GetAll(userID string) ([]*Client, bool) + Get(userID string, platformID int) ([]*Client, bool, bool) + Set(userID string, v *Client) + Delete(userID string, connRemoteAddr string) (isDeleteUser bool) + DeleteClients(userID string, clients []*Client) (isDeleteUser bool) + UserState() <-chan UserState + GetAllUserStatus(deadline, nowtime time.Time) []UserState +} + +var _ UMap = (*UserMap2)(nil) + +type UserPlatform struct { + Time time.Time + Clients map[string]*Client +} + +func (u *UserPlatform) PlatformIDs() []int32 { + if len(u.Clients) == 0 { + return nil + } + platformIDs := make([]int32, 0, len(u.Clients)) + for _, client := range u.Clients { + platformIDs = append(platformIDs, int32(client.PlatformID)) + } + return platformIDs +} + +func newUserMap() UMap { + return &UserMap2{ + data: make(map[string]*UserPlatform), + ch: make(chan UserState, 10000), + } +} + +type UserMap2 struct { + lock sync.RWMutex + data map[string]*UserPlatform + ch chan UserState +} + +func (u *UserMap2) push(userID string, userPlatform *UserPlatform, offline []int32) bool { + select { + case u.ch <- UserState{UserID: userID, Online: userPlatform.PlatformIDs(), Offline: offline}: + userPlatform.Time = time.Now() + return true + default: + return false + } +} + +func (u *UserMap2) GetAll(userID string) ([]*Client, bool) { + u.lock.RLock() + defer u.lock.RUnlock() + result, ok := u.data[userID] + if !ok { + return nil, false + } + clients := make([]*Client, 0, len(result.Clients)) + for _, client := range result.Clients { + clients = append(clients, client) + } + return clients, true +} + +func (u *UserMap2) Get(userID string, platformID int) ([]*Client, bool, bool) { + u.lock.RLock() + defer u.lock.RUnlock() + result, ok := u.data[userID] + if !ok { + return nil, false, false + } + var clients []*Client + for _, client := range result.Clients { + if client.PlatformID == platformID { + clients = append(clients, client) + } + } + return clients, true, len(clients) > 0 +} + +func (u *UserMap2) Set(userID string, client *Client) { + u.lock.Lock() + defer u.lock.Unlock() + result, ok := u.data[userID] + if ok { + result.Clients[client.ctx.GetRemoteAddr()] = client + } else { + result = &UserPlatform{ + Clients: map[string]*Client{ + client.ctx.GetRemoteAddr(): client, + }, + } + } + u.push(client.UserID, result, nil) +} + +func (u *UserMap2) Delete(userID string, connRemoteAddr string) (isDeleteUser bool) { + u.lock.Lock() + defer u.lock.Unlock() + result, ok := u.data[userID] + if !ok { + return false + } + client, ok := result.Clients[connRemoteAddr] + if !ok { + return false + } + delete(result.Clients, connRemoteAddr) + defer u.push(userID, result, []int32{int32(client.PlatformID)}) + if len(result.Clients) > 0 { + return false + } + delete(u.data, userID) + return true +} + +func (u *UserMap2) DeleteClients(userID string, clients []*Client) (isDeleteUser bool) { + if len(clients) == 0 { + return false + } + u.lock.Lock() + defer u.lock.Unlock() + result, ok := u.data[userID] + if !ok { + return false + } + offline := make([]int32, 0, len(clients)) + for _, client := range clients { + offline = append(offline, int32(client.PlatformID)) + delete(result.Clients, client.ctx.GetRemoteAddr()) + } + defer u.push(userID, result, offline) + if len(result.Clients) > 0 { + return false + } + delete(u.data, userID) + return true +} + +func (u *UserMap2) GetAllUserStatus(deadline, nowtime time.Time) []UserState { + u.lock.RLock() + defer u.lock.RUnlock() + if len(u.data) == 0 { + return nil + } + result := make([]UserState, 0, len(u.data)) + for userID, p := range u.data { + if len(result) == cap(result) { + break + } + if p.Time.Before(deadline) { + continue + } + p.Time = nowtime + online := make([]int32, 0, len(p.Clients)) + for _, client := range p.Clients { + online = append(online, int32(client.PlatformID)) + } + result = append(result, UserState{UserID: userID, Online: online}) + } + return result +} + +func (u *UserMap2) UserState() <-chan UserState { + return u.ch +} + +type UserState struct { + UserID string + Online []int32 + Offline []int32 +} diff --git a/internal/rpc/user/online.go b/internal/rpc/user/online.go new file mode 100644 index 000000000..e853ceae2 --- /dev/null +++ b/internal/rpc/user/online.go @@ -0,0 +1,122 @@ +package user + +import ( + "context" + "github.com/openimsdk/protocol/constant" + "github.com/openimsdk/protocol/sdkws" + pbuser "github.com/openimsdk/protocol/user" +) + +func (s *userServer) getUserOnlineStatus(ctx context.Context, userID string) (*pbuser.OnlineStatus, error) { + platformIDs, err := s.online.GetOnline(ctx, userID) + if err != nil { + return nil, err + } + status := pbuser.OnlineStatus{ + UserID: userID, + PlatformIDs: platformIDs, + } + if len(platformIDs) > 0 { + status.Status = constant.Online + } else { + status.Status = constant.Offline + } + return &status, nil +} + +func (s *userServer) getUsersOnlineStatus(ctx context.Context, userIDs []string) ([]*pbuser.OnlineStatus, error) { + res := make([]*pbuser.OnlineStatus, 0, len(userIDs)) + for _, userID := range userIDs { + status, err := s.getUserOnlineStatus(ctx, userID) + if err != nil { + return nil, err + } + res = append(res, status) + } + return res, nil +} + +// SubscribeOrCancelUsersStatus Subscribe online or cancel online users. +func (s *userServer) SubscribeOrCancelUsersStatus(ctx context.Context, req *pbuser.SubscribeOrCancelUsersStatusReq) (*pbuser.SubscribeOrCancelUsersStatusResp, error) { + if req.Genre == constant.SubscriberUser { + err := s.db.SubscribeUsersStatus(ctx, req.UserID, req.UserIDs) + if err != nil { + return nil, err + } + var status []*pbuser.OnlineStatus + status, err = s.getUsersOnlineStatus(ctx, req.UserIDs) + if err != nil { + return nil, err + } + return &pbuser.SubscribeOrCancelUsersStatusResp{StatusList: status}, nil + } else if req.Genre == constant.Unsubscribe { + err := s.db.UnsubscribeUsersStatus(ctx, req.UserID, req.UserIDs) + if err != nil { + return nil, err + } + } + return &pbuser.SubscribeOrCancelUsersStatusResp{}, nil +} + +// GetUserStatus Get the online status of the user. +func (s *userServer) GetUserStatus(ctx context.Context, req *pbuser.GetUserStatusReq) (*pbuser.GetUserStatusResp, error) { + res, err := s.getUsersOnlineStatus(ctx, req.UserIDs) + if err != nil { + return nil, err + } + return &pbuser.GetUserStatusResp{StatusList: res}, nil +} + +// SetUserStatus Synchronize user's online status. +func (s *userServer) SetUserStatus(ctx context.Context, req *pbuser.SetUserStatusReq) (*pbuser.SetUserStatusResp, error) { + var ( + online []int32 + offline []int32 + ) + switch req.Status { + case constant.Online: + online = []int32{req.PlatformID} + case constant.Offline: + online = []int32{req.PlatformID} + } + if err := s.online.SetUserOnline(ctx, req.UserID, online, offline); err != nil { + return nil, err + } + list, err := s.db.GetSubscribedList(ctx, req.UserID) + if err != nil { + return nil, err + } + for _, userID := range list { + tips := &sdkws.UserStatusChangeTips{ + FromUserID: req.UserID, + ToUserID: userID, + Status: req.Status, + PlatformID: req.PlatformID, + } + s.userNotificationSender.UserStatusChangeNotification(ctx, tips) + } + + return &pbuser.SetUserStatusResp{}, nil +} + +// GetSubscribeUsersStatus Get the online status of subscribers. +func (s *userServer) GetSubscribeUsersStatus(ctx context.Context, req *pbuser.GetSubscribeUsersStatusReq) (*pbuser.GetSubscribeUsersStatusResp, error) { + userList, err := s.db.GetAllSubscribeList(ctx, req.UserID) + if err != nil { + return nil, err + } + onlineStatusList, err := s.getUsersOnlineStatus(ctx, userList) + if err != nil { + return nil, err + } + return &pbuser.GetSubscribeUsersStatusResp{StatusList: onlineStatusList}, nil +} + +func (s *userServer) SetUserOnlineStatus(ctx context.Context, req *pbuser.SetUserOnlineStatusReq) (*pbuser.SetUserOnlineStatusResp, error) { + for _, status := range req.Status { + if err := s.online.SetUserOnline(ctx, status.UserID, status.Online, status.Offline); err != nil { + return nil, err + } + } + return &pbuser.SetUserOnlineStatusResp{}, nil +} diff --git a/internal/rpc/user/user.go b/internal/rpc/user/user.go index 211b360b7..0b96077ec 100644 --- a/internal/rpc/user/user.go +++ b/internal/rpc/user/user.go @@ -19,6 +19,7 @@ import ( "errors" "github.com/openimsdk/open-im-server/v3/internal/rpc/friend" "github.com/openimsdk/open-im-server/v3/pkg/common/config" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo" tablerelation "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" @@ -50,6 +51,7 @@ import ( ) type userServer struct { + online cache.OnlineCache db controller.UserDatabase friendNotificationSender *friend.FriendNotificationSender userNotificationSender *UserNotificationSender @@ -98,6 +100,7 @@ func Start(ctx context.Context, config *Config, client registry.SvcDiscoveryRegi msgRpcClient := rpcclient.NewMessageRpcClient(client, config.Share.RpcRegisterName.Msg) localcache.InitLocalCache(&config.LocalCacheConfig) u := &userServer{ + online: redis.NewUserOnline(rdb), db: database, RegisterCenter: client, friendRpcClient: &friendRpcClient, @@ -329,76 +332,6 @@ func (s *userServer) GetAllUserID(ctx context.Context, req *pbuser.GetAllUserIDR return &pbuser.GetAllUserIDResp{Total: int32(total), UserIDs: userIDs}, nil } -// SubscribeOrCancelUsersStatus Subscribe online or cancel online users. -func (s *userServer) SubscribeOrCancelUsersStatus(ctx context.Context, req *pbuser.SubscribeOrCancelUsersStatusReq) (resp *pbuser.SubscribeOrCancelUsersStatusResp, err error) { - if req.Genre == constant.SubscriberUser { - err = s.db.SubscribeUsersStatus(ctx, req.UserID, req.UserIDs) - if err != nil { - return nil, err - } - var status []*pbuser.OnlineStatus - status, err = s.db.GetUserStatus(ctx, req.UserIDs) - if err != nil { - return nil, err - } - return &pbuser.SubscribeOrCancelUsersStatusResp{StatusList: status}, nil - } else if req.Genre == constant.Unsubscribe { - err = s.db.UnsubscribeUsersStatus(ctx, req.UserID, req.UserIDs) - if err != nil { - return nil, err - } - } - return &pbuser.SubscribeOrCancelUsersStatusResp{}, nil -} - -// GetUserStatus Get the online status of the user. -func (s *userServer) GetUserStatus(ctx context.Context, req *pbuser.GetUserStatusReq) (resp *pbuser.GetUserStatusResp, - err error) { - onlineStatusList, err := s.db.GetUserStatus(ctx, req.UserIDs) - if err != nil { - return nil, err - } - return &pbuser.GetUserStatusResp{StatusList: onlineStatusList}, nil -} - -// SetUserStatus Synchronize user's online status. -func (s *userServer) SetUserStatus(ctx context.Context, req *pbuser.SetUserStatusReq) (resp *pbuser.SetUserStatusResp, - err error) { - err = s.db.SetUserStatus(ctx, req.UserID, req.Status, req.PlatformID) - if err != nil { - return nil, err - } - list, err := s.db.GetSubscribedList(ctx, req.UserID) - if err != nil { - return nil, err - } - for _, userID := range list { - tips := &sdkws.UserStatusChangeTips{ - FromUserID: req.UserID, - ToUserID: userID, - Status: req.Status, - PlatformID: req.PlatformID, - } - s.userNotificationSender.UserStatusChangeNotification(ctx, tips) - } - - return &pbuser.SetUserStatusResp{}, nil -} - -// GetSubscribeUsersStatus Get the online status of subscribers. -func (s *userServer) GetSubscribeUsersStatus(ctx context.Context, - req *pbuser.GetSubscribeUsersStatusReq) (*pbuser.GetSubscribeUsersStatusResp, error) { - userList, err := s.db.GetAllSubscribeList(ctx, req.UserID) - if err != nil { - return nil, err - } - onlineStatusList, err := s.db.GetUserStatus(ctx, userList) - if err != nil { - return nil, err - } - return &pbuser.GetSubscribeUsersStatusResp{StatusList: onlineStatusList}, nil -} - // ProcessUserCommandAdd user general function add. func (s *userServer) ProcessUserCommandAdd(ctx context.Context, req *pbuser.ProcessUserCommandAddReq) (*pbuser.ProcessUserCommandAddResp, error) { err := authverify.CheckAccessV3(ctx, req.UserID, s.config.Share.IMAdminUserID) diff --git a/internal/tools/cron_task.go b/internal/tools/cron_task.go index bf037b694..50f328b55 100644 --- a/internal/tools/cron_task.go +++ b/internal/tools/cron_task.go @@ -65,10 +65,16 @@ func Start(ctx context.Context, config *CronTaskConfig) error { return } log.ZInfo(ctx, "cron clear chat records success", "deltime", deltime, "cont", time.Since(now)) + } + delFile := func() { + } if _, err := crontab.AddFunc(config.CronTask.ChatRecordsClearTime, clearFunc); err != nil { return errs.Wrap(err) } + if _, err := crontab.AddFunc(config.CronTask.ChatRecordsClearTime, delFile); err != nil { + return errs.Wrap(err) + } log.ZInfo(ctx, "start cron task", "chatRecordsClearTime", config.CronTask.ChatRecordsClearTime) crontab.Start() <-ctx.Done() diff --git a/pkg/common/config/config.go b/pkg/common/config/config.go index 6260dc00f..a4e613560 100644 --- a/pkg/common/config/config.go +++ b/pkg/common/config/config.go @@ -107,6 +107,7 @@ type API struct { type CronTask struct { ChatRecordsClearTime string `mapstructure:"chatRecordsClearTime"` RetainChatRecords int `mapstructure:"retainChatRecords"` + FileTime int } type OfflinePushConfig struct { diff --git a/pkg/common/storage/cache/cachekey/online.go b/pkg/common/storage/cache/cachekey/online.go new file mode 100644 index 000000000..164e5f2f4 --- /dev/null +++ b/pkg/common/storage/cache/cachekey/online.go @@ -0,0 +1,13 @@ +package cachekey + +import "time" + +const ( + OnlineKey = "ONLINE:" + OnlineChannel = "online_change" + OnlineExpire = time.Hour / 2 +) + +func GetOnlineKey(userID string) string { + return OnlineKey + userID +} diff --git a/pkg/common/storage/cache/cachekey/user.go b/pkg/common/storage/cache/cachekey/user.go index 7d06d4f75..473ca1b12 100644 --- a/pkg/common/storage/cache/cachekey/user.go +++ b/pkg/common/storage/cache/cachekey/user.go @@ -17,7 +17,6 @@ package cachekey const ( UserInfoKey = "USER_INFO:" UserGlobalRecvMsgOptKey = "USER_GLOBAL_RECV_MSG_OPT_KEY:" - olineStatusKey = "ONLINE_STATUS:" ) func GetUserInfoKey(userID string) string { @@ -27,7 +26,3 @@ func GetUserInfoKey(userID string) string { func GetUserGlobalRecvMsgOptKey(userID string) string { return UserGlobalRecvMsgOptKey + userID } - -func GetOnlineStatusKey(modKey string) string { - return olineStatusKey + modKey -} diff --git a/pkg/common/storage/cache/online.go b/pkg/common/storage/cache/online.go new file mode 100644 index 000000000..7669c8a11 --- /dev/null +++ b/pkg/common/storage/cache/online.go @@ -0,0 +1,8 @@ +package cache + +import "context" + +type OnlineCache interface { + GetOnline(ctx context.Context, userID string) ([]int32, error) + SetUserOnline(ctx context.Context, userID string, online, offline []int32) error +} diff --git a/pkg/common/storage/cache/redis/online.go b/pkg/common/storage/cache/redis/online.go index 138f9a573..dc6a5f775 100644 --- a/pkg/common/storage/cache/redis/online.go +++ b/pkg/common/storage/cache/redis/online.go @@ -2,10 +2,22 @@ package redis import ( "context" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey" + "github.com/openimsdk/tools/errs" "github.com/redis/go-redis/v9" + "strconv" "time" ) +func NewUserOnline(rdb redis.UniversalClient) cache.OnlineCache { + return &userOnline{ + rdb: rdb, + expire: cachekey.OnlineExpire, + channelName: cachekey.OnlineChannel, + } +} + type userOnline struct { rdb redis.UniversalClient expire time.Duration @@ -13,7 +25,26 @@ type userOnline struct { } func (s *userOnline) getUserOnlineKey(userID string) string { - return "USER_ONLINE:" + userID + return cachekey.GetOnlineKey(userID) +} + +func (s *userOnline) GetOnline(ctx context.Context, userID string) ([]int32, error) { + members, err := s.rdb.ZRangeByScore(ctx, s.getUserOnlineKey(userID), &redis.ZRangeBy{ + Min: strconv.FormatInt(time.Now().Unix(), 10), + Max: "+inf", + }).Result() + if err != nil { + return nil, errs.Wrap(err) + } + platformIDs := make([]int32, 0, len(members)) + for _, member := range members { + val, err := strconv.Atoi(member) + if err != nil { + return nil, errs.Wrap(err) + } + platformIDs = append(platformIDs, int32(val)) + } + return platformIDs, nil } func (s *userOnline) SetUserOnline(ctx context.Context, userID string, online, offline []int32) error { diff --git a/pkg/common/storage/cache/redis/seq_user_test.go b/pkg/common/storage/cache/redis/seq_user_test.go index 04a5d49cb..b4c852cd0 100644 --- a/pkg/common/storage/cache/redis/seq_user_test.go +++ b/pkg/common/storage/cache/redis/seq_user_test.go @@ -5,6 +5,8 @@ import ( "fmt" "github.com/redis/go-redis/v9" "log" + "strconv" + "sync/atomic" "testing" "time" ) @@ -24,30 +26,39 @@ func newTestOnline() *userOnline { func TestOnline(t *testing.T) { ts := newTestOnline() + var count atomic.Int64 + for i := 0; i < 64; i++ { + go func(userID string) { + var err error + for i := 0; ; i++ { + if i%2 == 0 { + err = ts.SetUserOnline(context.Background(), userID, []int32{5, 6}, []int32{7, 8, 9}) + } else { + err = ts.SetUserOnline(context.Background(), userID, []int32{1, 2, 3}, []int32{4, 5, 6}) + } + if err != nil { + panic(err) + } + count.Add(1) + } + }(strconv.Itoa(10000 + i)) + } - //err := ts.SetUserOnline(context.Background(), "1000", []int32{1, 2, 3}, []int32{4, 5, 6}) - err := ts.SetUserOnline(context.Background(), "1000", nil, []int32{1, 2, 3}) - - t.Log(err) - + ticker := time.NewTicker(time.Second) + for range ticker.C { + t.Log(count.Swap(0)) + } } -/* - -local function tableToString(tbl, separator) - local result = {} - for _, v in ipairs(tbl) do - table.insert(result, tostring(v)) - end - return table.concat(result, separator) -end - -local myTable = {"one", "two", "three"} -local result = tableToString(myTable, ":") - -print(result) - -*/ +func TestGetOnline(t *testing.T) { + ts := newTestOnline() + ctx := context.Background() + pIDs, err := ts.GetOnline(ctx, "10000") + if err != nil { + panic(err) + } + t.Log(pIDs) +} func TestRecvOnline(t *testing.T) { ts := newTestOnline() diff --git a/pkg/common/storage/cache/redis/user.go b/pkg/common/storage/cache/redis/user.go index c05cd3895..b846bf398 100644 --- a/pkg/common/storage/cache/redis/user.go +++ b/pkg/common/storage/cache/redis/user.go @@ -17,7 +17,6 @@ package redis import ( "context" "encoding/json" - "errors" "github.com/dtm-labs/rockscache" "github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" @@ -29,8 +28,6 @@ import ( "github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/log" "github.com/redis/go-redis/v9" - "hash/crc32" - "strconv" "time" ) @@ -61,9 +58,9 @@ func NewUserCacheRedis(rdb redis.UniversalClient, localCache *config.LocalCache, } } -func (u *UserCacheRedis) getOnlineStatusKey(modKey string) string { - return cachekey.GetOnlineStatusKey(modKey) -} +//func (u *UserCacheRedis) getOnlineStatusKey(modKey string) string { +// return cachekey.GetOnlineStatusKey(modKey) +//} func (u *UserCacheRedis) CloneUserCache() cache.UserCache { return &UserCacheRedis{ @@ -141,95 +138,95 @@ type RedisUserOnline struct { } // GetUserStatus get user status. -func (u *UserCacheRedis) GetUserStatus(ctx context.Context, userIDs []string) ([]*user.OnlineStatus, error) { - userStatus := make([]*user.OnlineStatus, 0, len(userIDs)) - for _, userID := range userIDs { - UserIDNum := crc32.ChecksumIEEE([]byte(userID)) - modKey := strconv.Itoa(int(UserIDNum % statusMod)) - var onlineStatus user.OnlineStatus - key := u.getOnlineStatusKey(modKey) - result, err := u.rdb.HGet(ctx, key, userID).Result() - if err != nil { - if errors.Is(err, redis.Nil) { - // key or field does not exist - userStatus = append(userStatus, &user.OnlineStatus{ - UserID: userID, - Status: constant.Offline, - PlatformIDs: nil, - }) - - continue - } else { - return nil, errs.Wrap(err) - } - } - err = json.Unmarshal([]byte(result), &onlineStatus) - if err != nil { - return nil, errs.Wrap(err) - } - onlineStatus.UserID = userID - onlineStatus.Status = constant.Online - userStatus = append(userStatus, &onlineStatus) - } - - return userStatus, nil -} +//func (u *UserCacheRedis) GetUserStatus(ctx context.Context, userIDs []string) ([]*user.OnlineStatus, error) { +// userStatus := make([]*user.OnlineStatus, 0, len(userIDs)) +// for _, userID := range userIDs { +// UserIDNum := crc32.ChecksumIEEE([]byte(userID)) +// modKey := strconv.Itoa(int(UserIDNum % statusMod)) +// var onlineStatus user.OnlineStatus +// key := u.getOnlineStatusKey(modKey) +// result, err := u.rdb.HGet(ctx, key, userID).Result() +// if err != nil { +// if errors.Is(err, redis.Nil) { +// // key or field does not exist +// userStatus = append(userStatus, &user.OnlineStatus{ +// UserID: userID, +// Status: constant.Offline, +// PlatformIDs: nil, +// }) +// +// continue +// } else { +// return nil, errs.Wrap(err) +// } +// } +// err = json.Unmarshal([]byte(result), &onlineStatus) +// if err != nil { +// return nil, errs.Wrap(err) +// } +// onlineStatus.UserID = userID +// onlineStatus.Status = constant.Online +// userStatus = append(userStatus, &onlineStatus) +// } +// +// return userStatus, nil +//} // SetUserStatus Set the user status and save it in redis. -func (u *UserCacheRedis) SetUserStatus(ctx context.Context, userID string, status, platformID int32) error { - UserIDNum := crc32.ChecksumIEEE([]byte(userID)) - modKey := strconv.Itoa(int(UserIDNum % statusMod)) - key := u.getOnlineStatusKey(modKey) - log.ZDebug(ctx, "SetUserStatus args", "userID", userID, "status", status, "platformID", platformID, "modKey", modKey, "key", key) - isNewKey, err := u.rdb.Exists(ctx, key).Result() - if err != nil { - return errs.Wrap(err) - } - if isNewKey == 0 { - if status == constant.Online { - onlineStatus := user.OnlineStatus{ - UserID: userID, - Status: constant.Online, - PlatformIDs: []int32{platformID}, - } - jsonData, err := json.Marshal(&onlineStatus) - if err != nil { - return errs.Wrap(err) - } - _, err = u.rdb.HSet(ctx, key, userID, string(jsonData)).Result() - if err != nil { - return errs.Wrap(err) - } - u.rdb.Expire(ctx, key, userOlineStatusExpireTime) - - return nil - } - } - - isNil := false - result, err := u.rdb.HGet(ctx, key, userID).Result() - if err != nil { - if errors.Is(err, redis.Nil) { - isNil = true - } else { - return errs.Wrap(err) - } - } - - if status == constant.Offline { - err = u.refreshStatusOffline(ctx, userID, status, platformID, isNil, err, result, key) - if err != nil { - return err - } - } else { - err = u.refreshStatusOnline(ctx, userID, platformID, isNil, err, result, key) - if err != nil { - return errs.Wrap(err) - } - } - - return nil -} +//func (u *UserCacheRedis) SetUserStatus(ctx context.Context, userID string, status, platformID int32) error { +// UserIDNum := crc32.ChecksumIEEE([]byte(userID)) +// modKey := strconv.Itoa(int(UserIDNum % statusMod)) +// key := u.getOnlineStatusKey(modKey) +// log.ZDebug(ctx, "SetUserStatus args", "userID", userID, "status", status, "platformID", platformID, "modKey", modKey, "key", key) +// isNewKey, err := u.rdb.Exists(ctx, key).Result() +// if err != nil { +// return errs.Wrap(err) +// } +// if isNewKey == 0 { +// if status == constant.Online { +// onlineStatus := user.OnlineStatus{ +// UserID: userID, +// Status: constant.Online, +// PlatformIDs: []int32{platformID}, +// } +// jsonData, err := json.Marshal(&onlineStatus) +// if err != nil { +// return errs.Wrap(err) +// } +// _, err = u.rdb.HSet(ctx, key, userID, string(jsonData)).Result() +// if err != nil { +// return errs.Wrap(err) +// } +// u.rdb.Expire(ctx, key, userOlineStatusExpireTime) +// +// return nil +// } +// } +// +// isNil := false +// result, err := u.rdb.HGet(ctx, key, userID).Result() +// if err != nil { +// if errors.Is(err, redis.Nil) { +// isNil = true +// } else { +// return errs.Wrap(err) +// } +// } +// +// if status == constant.Offline { +// err = u.refreshStatusOffline(ctx, userID, status, platformID, isNil, err, result, key) +// if err != nil { +// return err +// } +// } else { +// err = u.refreshStatusOnline(ctx, userID, platformID, isNil, err, result, key) +// if err != nil { +// return errs.Wrap(err) +// } +// } +// +// return nil +//} func (u *UserCacheRedis) refreshStatusOffline(ctx context.Context, userID string, status, platformID int32, isNil bool, err error, result, key string) error { if isNil { diff --git a/pkg/common/storage/cache/user.go b/pkg/common/storage/cache/user.go index 5101c0b6c..69a11635c 100644 --- a/pkg/common/storage/cache/user.go +++ b/pkg/common/storage/cache/user.go @@ -17,7 +17,6 @@ package cache import ( "context" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" - "github.com/openimsdk/protocol/user" ) type UserCache interface { @@ -28,6 +27,6 @@ type UserCache interface { DelUsersInfo(userIDs ...string) UserCache GetUserGlobalRecvMsgOpt(ctx context.Context, userID string) (opt int, err error) DelUsersGlobalRecvMsgOpt(userIDs ...string) UserCache - GetUserStatus(ctx context.Context, userIDs []string) ([]*user.OnlineStatus, error) - SetUserStatus(ctx context.Context, userID string, status, platformID int32) error + //GetUserStatus(ctx context.Context, userIDs []string) ([]*user.OnlineStatus, error) + //SetUserStatus(ctx context.Context, userID string, status, platformID int32) error } diff --git a/pkg/common/storage/controller/user.go b/pkg/common/storage/controller/user.go index 9efe535c0..321eff03c 100644 --- a/pkg/common/storage/controller/user.go +++ b/pkg/common/storage/controller/user.go @@ -70,10 +70,6 @@ type UserDatabase interface { GetAllSubscribeList(ctx context.Context, userID string) ([]string, error) // GetSubscribedList Get all subscribed lists GetSubscribedList(ctx context.Context, userID string) ([]string, error) - // GetUserStatus Get the online status of the user - GetUserStatus(ctx context.Context, userIDs []string) ([]*user.OnlineStatus, error) - // SetUserStatus Set the user status and store the user status in redis - SetUserStatus(ctx context.Context, userID string, status, platformID int32) error // CRUD user command AddUserCommand(ctx context.Context, userID string, Type int32, UUID string, value string, ex string) error @@ -246,17 +242,6 @@ func (u *userDatabase) GetSubscribedList(ctx context.Context, userID string) ([] return list, nil } -// GetUserStatus get user status. -func (u *userDatabase) GetUserStatus(ctx context.Context, userIDs []string) ([]*user.OnlineStatus, error) { - onlineStatusList, err := u.cache.GetUserStatus(ctx, userIDs) - return onlineStatusList, err -} - -// SetUserStatus Set the user status and save it in redis. -func (u *userDatabase) SetUserStatus(ctx context.Context, userID string, status, platformID int32) error { - return u.cache.SetUserStatus(ctx, userID, status, platformID) -} - func (u *userDatabase) AddUserCommand(ctx context.Context, userID string, Type int32, UUID string, value string, ex string) error { return u.userDB.AddUserCommand(ctx, userID, Type, UUID, value, ex) }