fix: fix lint erros in pkg/common/db/cache

pull/1263/head
cncsmonster 2 years ago
parent 5d798c73ce
commit 7cb011fd5e

@ -52,6 +52,7 @@ func NewBlackCacheRedis(
options rockscache.Options, options rockscache.Options,
) BlackCache { ) BlackCache {
rcClient := rockscache.NewClient(rdb, options) rcClient := rockscache.NewClient(rdb, options)
return &BlackCacheRedis{ return &BlackCacheRedis{
expireTime: blackExpireTime, expireTime: blackExpireTime,
rcClient: rcClient, rcClient: rcClient,
@ -88,5 +89,6 @@ func (b *BlackCacheRedis) GetBlackIDs(ctx context.Context, userID string) (black
func (b *BlackCacheRedis) DelBlackIDs(ctx context.Context, userID string) BlackCache { func (b *BlackCacheRedis) DelBlackIDs(ctx context.Context, userID string) BlackCache {
cache := b.NewCache() cache := b.NewCache()
cache.AddKeys(b.getBlackIDsKey(userID)) cache.AddKeys(b.getBlackIDsKey(userID))
return cache return cache
} }

@ -89,6 +89,7 @@ func NewConversationRedis(
db relationtb.ConversationModelInterface, db relationtb.ConversationModelInterface,
) ConversationCache { ) ConversationCache {
rcClient := rockscache.NewClient(rdb, opts) rcClient := rockscache.NewClient(rdb, opts)
return &ConversationRedisCache{ return &ConversationRedisCache{
rcClient: rcClient, rcClient: rcClient,
metaCache: NewMetaCacheRedis(rcClient), metaCache: NewMetaCacheRedis(rcClient),
@ -110,6 +111,7 @@ func NewNewConversationRedis(
options rockscache.Options, options rockscache.Options,
) ConversationCache { ) ConversationCache {
rcClient := rockscache.NewClient(rdb, options) rcClient := rockscache.NewClient(rdb, options)
return &ConversationRedisCache{ return &ConversationRedisCache{
rcClient: rcClient, rcClient: rcClient,
metaCache: NewMetaCacheRedis(rcClient), metaCache: NewMetaCacheRedis(rcClient),
@ -168,12 +170,13 @@ func (c *ConversationRedisCache) GetUserConversationIDs(ctx context.Context, own
} }
func (c *ConversationRedisCache) DelConversationIDs(userIDs ...string) ConversationCache { func (c *ConversationRedisCache) DelConversationIDs(userIDs ...string) ConversationCache {
var keys []string keys := make([]string, 0, len(userIDs))
for _, userID := range userIDs { for _, userID := range userIDs {
keys = append(keys, c.getConversationIDsKey(userID)) keys = append(keys, c.getConversationIDsKey(userID))
} }
cache := c.NewCache() cache := c.NewCache()
cache.AddKeys(keys...) cache.AddKeys(keys...)
return cache return cache
} }
@ -198,18 +201,20 @@ func (c *ConversationRedisCache) GetUserConversationIDsHash(
utils.Sort(conversationIDs, true) utils.Sort(conversationIDs, true)
bi := big.NewInt(0) bi := big.NewInt(0)
bi.SetString(utils.Md5(strings.Join(conversationIDs, ";"))[0:8], 16) bi.SetString(utils.Md5(strings.Join(conversationIDs, ";"))[0:8], 16)
return bi.Uint64(), nil return bi.Uint64(), nil
}, },
) )
} }
func (c *ConversationRedisCache) DelUserConversationIDsHash(ownerUserIDs ...string) ConversationCache { func (c *ConversationRedisCache) DelUserConversationIDsHash(ownerUserIDs ...string) ConversationCache {
var keys []string keys := make([]string, 0, len(ownerUserIDs))
for _, ownerUserID := range ownerUserIDs { for _, ownerUserID := range ownerUserIDs {
keys = append(keys, c.getUserConversationIDsHashKey(ownerUserID)) keys = append(keys, c.getUserConversationIDsHashKey(ownerUserID))
} }
cache := c.NewCache() cache := c.NewCache()
cache.AddKeys(keys...) cache.AddKeys(keys...)
return cache return cache
} }
@ -229,12 +234,13 @@ func (c *ConversationRedisCache) GetConversation(
} }
func (c *ConversationRedisCache) DelConversations(ownerUserID string, conversationIDs ...string) ConversationCache { func (c *ConversationRedisCache) DelConversations(ownerUserID string, conversationIDs ...string) ConversationCache {
var keys []string keys := make([]string, 0, len(conversationIDs))
for _, conversationID := range conversationIDs { for _, conversationID := range conversationIDs {
keys = append(keys, c.getConversationKey(ownerUserID, conversationID)) keys = append(keys, c.getConversationKey(ownerUserID, conversationID))
} }
cache := c.NewCache() cache := c.NewCache()
cache.AddKeys(keys...) cache.AddKeys(keys...)
return cache return cache
} }
@ -248,6 +254,7 @@ func (c *ConversationRedisCache) getConversationIndex(
return _i, nil return _i, nil
} }
} }
return 0, errors.New("not found key:" + key + " in keys") return 0, errors.New("not found key:" + key + " in keys")
} }
@ -256,10 +263,11 @@ func (c *ConversationRedisCache) GetConversations(
ownerUserID string, ownerUserID string,
conversationIDs []string, conversationIDs []string,
) ([]*relationtb.ConversationModel, error) { ) ([]*relationtb.ConversationModel, error) {
var keys []string keys := make([]string, 0, len(conversationIDs))
for _, conversarionID := range conversationIDs { for _, conversarionID := range conversationIDs {
keys = append(keys, c.getConversationKey(ownerUserID, conversarionID)) keys = append(keys, c.getConversationKey(ownerUserID, conversarionID))
} }
return batchGetCache( return batchGetCache(
ctx, ctx,
c.rcClient, c.rcClient,
@ -280,10 +288,11 @@ func (c *ConversationRedisCache) GetUserAllConversations(
if err != nil { if err != nil {
return nil, err return nil, err
} }
var keys []string keys := make([]string, 0, len(conversationIDs))
for _, conversarionID := range conversationIDs { for _, conversarionID := range conversationIDs {
keys = append(keys, c.getConversationKey(ownerUserID, conversarionID)) keys = append(keys, c.getConversationKey(ownerUserID, conversarionID))
} }
return batchGetCache( return batchGetCache(
ctx, ctx,
c.rcClient, c.rcClient,
@ -327,24 +336,27 @@ func (c *ConversationRedisCache) GetSuperGroupRecvMsgNotNotifyUserIDs(
} }
func (c *ConversationRedisCache) DelUsersConversation(conversationID string, ownerUserIDs ...string) ConversationCache { func (c *ConversationRedisCache) DelUsersConversation(conversationID string, ownerUserIDs ...string) ConversationCache {
var keys []string keys := make([]string, 0, len(ownerUserIDs))
for _, ownerUserID := range ownerUserIDs { for _, ownerUserID := range ownerUserIDs {
keys = append(keys, c.getConversationKey(ownerUserID, conversationID)) keys = append(keys, c.getConversationKey(ownerUserID, conversationID))
} }
cache := c.NewCache() cache := c.NewCache()
cache.AddKeys(keys...) cache.AddKeys(keys...)
return cache return cache
} }
func (c *ConversationRedisCache) DelUserRecvMsgOpt(ownerUserID, conversationID string) ConversationCache { func (c *ConversationRedisCache) DelUserRecvMsgOpt(ownerUserID, conversationID string) ConversationCache {
cache := c.NewCache() cache := c.NewCache()
cache.AddKeys(c.getRecvMsgOptKey(ownerUserID, conversationID)) cache.AddKeys(c.getRecvMsgOptKey(ownerUserID, conversationID))
return cache return cache
} }
func (c *ConversationRedisCache) DelSuperGroupRecvMsgNotNotifyUserIDs(groupID string) ConversationCache { func (c *ConversationRedisCache) DelSuperGroupRecvMsgNotNotifyUserIDs(groupID string) ConversationCache {
cache := c.NewCache() cache := c.NewCache()
cache.AddKeys(c.getSuperGroupRecvNotNotifyUserIDsKey(groupID)) cache.AddKeys(c.getSuperGroupRecvNotNotifyUserIDsKey(groupID))
return cache return cache
} }
@ -365,6 +377,7 @@ func (c *ConversationRedisCache) GetSuperGroupRecvMsgNotNotifyUserIDsHash(
utils.Sort(userIDs, true) utils.Sort(userIDs, true)
bi := big.NewInt(0) bi := big.NewInt(0)
bi.SetString(utils.Md5(strings.Join(userIDs, ";"))[0:8], 16) bi.SetString(utils.Md5(strings.Join(userIDs, ";"))[0:8], 16)
return bi.Uint64(), nil return bi.Uint64(), nil
}, },
) )
@ -373,6 +386,7 @@ func (c *ConversationRedisCache) GetSuperGroupRecvMsgNotNotifyUserIDsHash(
func (c *ConversationRedisCache) DelSuperGroupRecvMsgNotNotifyUserIDsHash(groupID string) ConversationCache { func (c *ConversationRedisCache) DelSuperGroupRecvMsgNotNotifyUserIDsHash(groupID string) ConversationCache {
cache := c.NewCache() cache := c.NewCache()
cache.AddKeys(c.getSuperGroupRecvNotNotifyUserIDsHashKey(groupID)) cache.AddKeys(c.getSuperGroupRecvNotNotifyUserIDsHashKey(groupID))
return cache return cache
} }
@ -385,6 +399,7 @@ func (c *ConversationRedisCache) getUserAllHasReadSeqsIndex(
return _i, nil return _i, nil
} }
} }
return 0, errors.New("not found key:" + conversationID + " in keys") return 0, errors.New("not found key:" + conversationID + " in keys")
} }
@ -396,10 +411,11 @@ func (c *ConversationRedisCache) GetUserAllHasReadSeqs(
if err != nil { if err != nil {
return nil, err return nil, err
} }
var keys []string keys := make([]string, 0, len(conversationIDs))
for _, conversarionID := range conversationIDs { for _, conversarionID := range conversationIDs {
keys = append(keys, c.getConversationHasReadSeqKey(ownerUserID, conversarionID)) keys = append(keys, c.getConversationHasReadSeqKey(ownerUserID, conversarionID))
} }
return batchGetCacheMap( return batchGetCacheMap(
ctx, ctx,
c.rcClient, c.rcClient,
@ -420,6 +436,7 @@ func (c *ConversationRedisCache) DelUserAllHasReadSeqs(ownerUserID string,
for _, conversationID := range conversationIDs { for _, conversationID := range conversationIDs {
cache.AddKeys(c.getConversationHasReadSeqKey(ownerUserID, conversationID)) cache.AddKeys(c.getConversationHasReadSeqKey(ownerUserID, conversationID))
} }
return cache return cache
} }
@ -451,5 +468,6 @@ func (c *ConversationRedisCache) DelConversationNotReceiveMessageUserIDs(convers
for _, conversationID := range conversationIDs { for _, conversationID := range conversationIDs {
cache.AddKeys(c.getConversationNotReceiveMessageUserIDsKey(conversationID)) cache.AddKeys(c.getConversationNotReceiveMessageUserIDsKey(conversationID))
} }
return cache return cache
} }

@ -59,6 +59,7 @@ func NewFriendCacheRedis(
options rockscache.Options, options rockscache.Options,
) FriendCache { ) FriendCache {
rcClient := rockscache.NewClient(rdb, options) rcClient := rockscache.NewClient(rdb, options)
return &FriendCacheRedis{ return &FriendCacheRedis{
metaCache: NewMetaCacheRedis(rcClient), metaCache: NewMetaCacheRedis(rcClient),
friendDB: friendDB, friendDB: friendDB,
@ -100,14 +101,15 @@ func (f *FriendCacheRedis) GetFriendIDs(ctx context.Context, ownerUserID string)
) )
} }
func (f *FriendCacheRedis) DelFriendIDs(ownerUserID ...string) FriendCache { func (f *FriendCacheRedis) DelFriendIDs(ownerUserIDs ...string) FriendCache {
new := f.NewCache() newGroupCache := f.NewCache()
var keys []string keys := make([]string, 0, len(ownerUserIDs))
for _, userID := range ownerUserID { for _, userID := range ownerUserIDs {
keys = append(keys, f.getFriendIDsKey(userID)) keys = append(keys, f.getFriendIDsKey(userID))
} }
new.AddKeys(keys...) newGroupCache.AddKeys(keys...)
return new
return newGroupCache
} }
// todo. // todo.
@ -128,13 +130,15 @@ func (f *FriendCacheRedis) GetTwoWayFriendIDs(
twoWayFriendIDs = append(twoWayFriendIDs, ownerUserID) twoWayFriendIDs = append(twoWayFriendIDs, ownerUserID)
} }
} }
return twoWayFriendIDs, nil return twoWayFriendIDs, nil
} }
func (f *FriendCacheRedis) DelTwoWayFriendIDs(ctx context.Context, ownerUserID string) FriendCache { func (f *FriendCacheRedis) DelTwoWayFriendIDs(ctx context.Context, ownerUserID string) FriendCache {
new := f.NewCache() newFriendCache := f.NewCache()
new.AddKeys(f.getTwoWayFriendsIDsKey(ownerUserID)) newFriendCache.AddKeys(f.getTwoWayFriendsIDsKey(ownerUserID))
return new
return newFriendCache
} }
func (f *FriendCacheRedis) GetFriend( func (f *FriendCacheRedis) GetFriend(
@ -153,7 +157,8 @@ func (f *FriendCacheRedis) GetFriend(
} }
func (f *FriendCacheRedis) DelFriend(ownerUserID, friendUserID string) FriendCache { func (f *FriendCacheRedis) DelFriend(ownerUserID, friendUserID string) FriendCache {
new := f.NewCache() newFriendCache := f.NewCache()
new.AddKeys(f.getFriendKey(ownerUserID, friendUserID)) newFriendCache.AddKeys(f.getFriendKey(ownerUserID, friendUserID))
return new
return newFriendCache
} }

@ -109,6 +109,7 @@ func NewGroupCacheRedis(
opts rockscache.Options, opts rockscache.Options,
) GroupCache { ) GroupCache {
rcClient := rockscache.NewClient(rdb, opts) rcClient := rockscache.NewClient(rdb, opts)
return &GroupCacheRedis{ return &GroupCacheRedis{
rcClient: rcClient, expireTime: groupExpireTime, rcClient: rcClient, expireTime: groupExpireTime,
groupDB: groupDB, groupMemberDB: groupMemberDB, groupRequestDB: groupRequestDB, groupDB: groupDB, groupMemberDB: groupMemberDB, groupRequestDB: groupRequestDB,
@ -169,6 +170,7 @@ func (g *GroupCacheRedis) GetGroupIndex(group *relationtb.GroupModel, keys []str
return i, nil return i, nil
} }
} }
return 0, errIndex return 0, errIndex
} }
@ -179,6 +181,7 @@ func (g *GroupCacheRedis) GetGroupMemberIndex(groupMember *relationtb.GroupMembe
return i, nil return i, nil
} }
} }
return 0, errIndex return 0, errIndex
} }
@ -187,10 +190,11 @@ func (g *GroupCacheRedis) GetGroupsInfo(
ctx context.Context, ctx context.Context,
groupIDs []string, groupIDs []string,
) (groups []*relationtb.GroupModel, err error) { ) (groups []*relationtb.GroupModel, err error) {
var keys []string keys := make([]string, 0, len(groupIDs))
for _, group := range groupIDs { for _, group := range groupIDs {
keys = append(keys, g.getGroupInfoKey(group)) keys = append(keys, g.getGroupInfoKey(group))
} }
return batchGetCache( return batchGetCache(
ctx, ctx,
g.rcClient, g.rcClient,
@ -216,13 +220,14 @@ func (g *GroupCacheRedis) GetGroupInfo(ctx context.Context, groupID string) (gro
} }
func (g *GroupCacheRedis) DelGroupsInfo(groupIDs ...string) GroupCache { func (g *GroupCacheRedis) DelGroupsInfo(groupIDs ...string) GroupCache {
new := g.NewCache() newGroupCache := g.NewCache()
var keys []string keys := make([]string, 0, len(groupIDs))
for _, groupID := range groupIDs { for _, groupID := range groupIDs {
keys = append(keys, g.getGroupInfoKey(groupID)) keys = append(keys, g.getGroupInfoKey(groupID))
} }
new.AddKeys(keys...) newGroupCache.AddKeys(keys...)
return new
return newGroupCache
} }
func (g *GroupCacheRedis) GetJoinedSuperGroupIDs( func (g *GroupCacheRedis) GetJoinedSuperGroupIDs(
@ -239,6 +244,7 @@ func (g *GroupCacheRedis) GetJoinedSuperGroupIDs(
if err != nil { if err != nil {
return nil, err return nil, err
} }
return userGroup.GroupIDs, nil return userGroup.GroupIDs, nil
}, },
) )
@ -248,10 +254,11 @@ func (g *GroupCacheRedis) GetSuperGroupMemberIDs(
ctx context.Context, ctx context.Context,
groupIDs ...string, groupIDs ...string,
) (models []*unrelationtb.SuperGroupModel, err error) { ) (models []*unrelationtb.SuperGroupModel, err error) {
var keys []string keys := make([]string, 0, len(groupIDs))
for _, group := range groupIDs { for _, group := range groupIDs {
keys = append(keys, g.getSuperGroupMemberIDsKey(group)) keys = append(keys, g.getSuperGroupMemberIDsKey(group))
} }
return batchGetCache( return batchGetCache(
ctx, ctx,
g.rcClient, g.rcClient,
@ -263,6 +270,7 @@ func (g *GroupCacheRedis) GetSuperGroupMemberIDs(
return i, nil return i, nil
} }
} }
return 0, errIndex return 0, errIndex
}, },
func(ctx context.Context) ([]*unrelationtb.SuperGroupModel, error) { func(ctx context.Context) ([]*unrelationtb.SuperGroupModel, error) {
@ -273,23 +281,25 @@ func (g *GroupCacheRedis) GetSuperGroupMemberIDs(
// userJoinSuperGroup. // userJoinSuperGroup.
func (g *GroupCacheRedis) DelJoinedSuperGroupIDs(userIDs ...string) GroupCache { func (g *GroupCacheRedis) DelJoinedSuperGroupIDs(userIDs ...string) GroupCache {
new := g.NewCache() newGroupCache := g.NewCache()
var keys []string keys := make([]string, 0, len(userIDs))
for _, userID := range userIDs { for _, userID := range userIDs {
keys = append(keys, g.getJoinedSuperGroupsIDKey(userID)) keys = append(keys, g.getJoinedSuperGroupsIDKey(userID))
} }
new.AddKeys(keys...) newGroupCache.AddKeys(keys...)
return new
return newGroupCache
} }
func (g *GroupCacheRedis) DelSuperGroupMemberIDs(groupIDs ...string) GroupCache { func (g *GroupCacheRedis) DelSuperGroupMemberIDs(groupIDs ...string) GroupCache {
new := g.NewCache() newGroupCache := g.NewCache()
var keys []string keys := make([]string, 0, len(groupIDs))
for _, groupID := range groupIDs { for _, groupID := range groupIDs {
keys = append(keys, g.getSuperGroupMemberIDsKey(groupID)) keys = append(keys, g.getSuperGroupMemberIDsKey(groupID))
} }
new.AddKeys(keys...) newGroupCache.AddKeys(keys...)
return new
return newGroupCache
} }
// groupMembersHash. // groupMembersHash.
@ -368,12 +378,14 @@ func (g *GroupCacheRedis) GetGroupMemberHashMap(
} }
res[groupID] = &relationtb.GroupSimpleUserID{Hash: hash, MemberNum: uint32(num)} res[groupID] = &relationtb.GroupSimpleUserID{Hash: hash, MemberNum: uint32(num)}
} }
return res, nil return res, nil
} }
func (g *GroupCacheRedis) DelGroupMembersHash(groupID string) GroupCache { func (g *GroupCacheRedis) DelGroupMembersHash(groupID string) GroupCache {
cache := g.NewCache() cache := g.NewCache()
cache.AddKeys(g.getGroupMembersHashKey(groupID)) cache.AddKeys(g.getGroupMembersHashKey(groupID))
return cache return cache
} }
@ -399,12 +411,14 @@ func (g *GroupCacheRedis) GetGroupsMemberIDs(ctx context.Context, groupIDs []str
} }
m[groupID] = userIDs m[groupID] = userIDs
} }
return m, nil return m, nil
} }
func (g *GroupCacheRedis) DelGroupMemberIDs(groupID string) GroupCache { func (g *GroupCacheRedis) DelGroupMemberIDs(groupID string) GroupCache {
cache := g.NewCache() cache := g.NewCache()
cache.AddKeys(g.getGroupMemberIDsKey(groupID)) cache.AddKeys(g.getGroupMemberIDsKey(groupID))
return cache return cache
} }
@ -421,12 +435,13 @@ func (g *GroupCacheRedis) GetJoinedGroupIDs(ctx context.Context, userID string)
} }
func (g *GroupCacheRedis) DelJoinedGroupID(userIDs ...string) GroupCache { func (g *GroupCacheRedis) DelJoinedGroupID(userIDs ...string) GroupCache {
var keys []string keys := make([]string, 0, len(userIDs))
for _, userID := range userIDs { for _, userID := range userIDs {
keys = append(keys, g.getJoinedGroupsKey(userID)) keys = append(keys, g.getJoinedGroupsKey(userID))
} }
cache := g.NewCache() cache := g.NewCache()
cache.AddKeys(keys...) cache.AddKeys(keys...)
return cache return cache
} }
@ -450,10 +465,11 @@ func (g *GroupCacheRedis) GetGroupMembersInfo(
groupID string, groupID string,
userIDs []string, userIDs []string,
) ([]*relationtb.GroupMemberModel, error) { ) ([]*relationtb.GroupMemberModel, error) {
var keys []string keys := make([]string, 0, len(userIDs))
for _, userID := range userIDs { for _, userID := range userIDs {
keys = append(keys, g.getGroupMemberInfoKey(groupID, userID)) keys = append(keys, g.getGroupMemberInfoKey(groupID, userID))
} }
return batchGetCache( return batchGetCache(
ctx, ctx,
g.rcClient, g.rcClient,
@ -482,6 +498,7 @@ func (g *GroupCacheRedis) GetGroupMembersPage(
userIDs = groupMemberIDs userIDs = groupMemberIDs
} }
groupMembers, err = g.GetGroupMembersInfo(ctx, groupID, utils.Paginate(userIDs, int(showNumber), int(showNumber))) groupMembers, err = g.GetGroupMembersInfo(ctx, groupID, utils.Paginate(userIDs, int(showNumber), int(showNumber)))
return uint32(len(userIDs)), groupMembers, err return uint32(len(userIDs)), groupMembers, err
} }
@ -493,6 +510,7 @@ func (g *GroupCacheRedis) GetAllGroupMembersInfo(
if err != nil { if err != nil {
return nil, err return nil, err
} }
return g.GetGroupMembersInfo(ctx, groupID, groupMemberIDs) return g.GetGroupMembersInfo(ctx, groupID, groupMemberIDs)
} }
@ -504,10 +522,11 @@ func (g *GroupCacheRedis) GetAllGroupMemberInfo(
if err != nil { if err != nil {
return nil, err return nil, err
} }
var keys []string keys := make([]string, 0, len(groupMemberIDs))
for _, groupMemberID := range groupMemberIDs { for _, groupMemberID := range groupMemberIDs {
keys = append(keys, g.getGroupMemberInfoKey(groupID, groupMemberID)) keys = append(keys, g.getGroupMemberInfoKey(groupID, groupMemberID))
} }
return batchGetCache( return batchGetCache(
ctx, ctx,
g.rcClient, g.rcClient,
@ -521,12 +540,13 @@ func (g *GroupCacheRedis) GetAllGroupMemberInfo(
} }
func (g *GroupCacheRedis) DelGroupMembersInfo(groupID string, userIDs ...string) GroupCache { func (g *GroupCacheRedis) DelGroupMembersInfo(groupID string, userIDs ...string) GroupCache {
var keys []string keys := make([]string, 0, len(userIDs))
for _, userID := range userIDs { for _, userID := range userIDs {
keys = append(keys, g.getGroupMemberInfoKey(groupID, userID)) keys = append(keys, g.getGroupMemberInfoKey(groupID, userID))
} }
cache := g.NewCache() cache := g.NewCache()
cache.AddKeys(keys...) cache.AddKeys(keys...)
return cache return cache
} }
@ -543,11 +563,12 @@ func (g *GroupCacheRedis) GetGroupMemberNum(ctx context.Context, groupID string)
} }
func (g *GroupCacheRedis) DelGroupsMemberNum(groupID ...string) GroupCache { func (g *GroupCacheRedis) DelGroupsMemberNum(groupID ...string) GroupCache {
var keys []string keys := make([]string, 0, len(groupID))
for _, groupID := range groupID { for _, groupID := range groupID {
keys = append(keys, g.getGroupMemberNumKey(groupID)) keys = append(keys, g.getGroupMemberNumKey(groupID))
} }
cache := g.NewCache() cache := g.NewCache()
cache.AddKeys(keys...) cache.AddKeys(keys...)
return cache return cache
} }

@ -72,6 +72,7 @@ func (m *metaCacheRedis) ExecDel(ctx context.Context) error {
), ),
) )
log.ZWarn(ctx, "delete cache failed, please handle keys", err, "keys", m.keys) log.ZWarn(ctx, "delete cache failed, please handle keys", err, "keys", m.keys)
return err return err
} }
retryTimes++ retryTimes++
@ -80,6 +81,7 @@ func (m *metaCacheRedis) ExecDel(ctx context.Context) error {
} }
} }
} }
return nil return nil
} }
@ -103,6 +105,7 @@ func GetDefaultOpt() rockscache.Options {
opts := rockscache.NewDefaultOptions() opts := rockscache.NewDefaultOptions()
opts.StrongConsistency = true opts.StrongConsistency = true
opts.RandomExpireAdjustment = 0.2 opts.RandomExpireAdjustment = 0.2
return opts return opts
} }
@ -125,6 +128,7 @@ func getCache[T any](
return "", utils.Wrap(err, "") return "", utils.Wrap(err, "")
} }
write = true write = true
return string(bs), nil return string(bs), nil
}) })
if err != nil { if err != nil {
@ -139,8 +143,10 @@ func getCache[T any](
err = json.Unmarshal([]byte(v), &t) err = json.Unmarshal([]byte(v), &t)
if err != nil { if err != nil {
log.ZError(ctx, "cache json.Unmarshal failed", err, "key", key, "value", v, "expire", expire) log.ZError(ctx, "cache json.Unmarshal failed", err, "key", key, "value", v, "expire", expire)
return t, utils.Wrap(err, "") return t, utils.Wrap(err, "")
} }
return t, nil return t, nil
} }
@ -169,6 +175,7 @@ func batchGetCache[T any](
} }
values[index] = string(bs) values[index] = string(bs)
} }
return values, nil return values, nil
}) })
if err != nil { if err != nil {
@ -185,6 +192,7 @@ func batchGetCache[T any](
tArrays = append(tArrays, t) tArrays = append(tArrays, t)
} }
} }
return tArrays, nil return tArrays, nil
} }
@ -213,6 +221,7 @@ func batchGetCacheMap[T any](
} }
values[index] = string(bs) values[index] = string(bs)
} }
return values, nil return values, nil
}) })
if err != nil { if err != nil {
@ -229,5 +238,6 @@ func batchGetCacheMap[T any](
tMap[originKeys[i]] = t tMap[originKeys[i]] = t
} }
} }
return tMap, nil return tMap, nil
} }

@ -16,13 +16,12 @@ package cache
import ( import (
"context" "context"
"errors"
"strconv" "strconv"
"time" "time"
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor" "github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
"github.com/dtm-labs/rockscache"
"github.com/OpenIMSDK/tools/errs" "github.com/OpenIMSDK/tools/errs"
"github.com/gogo/protobuf/jsonpb" "github.com/gogo/protobuf/jsonpb"
@ -33,7 +32,6 @@ import (
"github.com/OpenIMSDK/tools/utils" "github.com/OpenIMSDK/tools/utils"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
unrelationtb "github.com/openimsdk/open-im-server/v3/pkg/common/db/table/unrelation"
"github.com/redis/go-redis/v9" "github.com/redis/go-redis/v9"
) )
@ -140,10 +138,10 @@ func NewMsgCacheModel(client redis.UniversalClient) MsgModel {
type msgCache struct { type msgCache struct {
metaCache metaCache
rdb redis.UniversalClient rdb redis.UniversalClient
expireTime time.Duration // expireTime time.Duration
rcClient *rockscache.Client // rcClient *rockscache.Client
msgDocDatabase unrelationtb.MsgDocModelInterface // msgDocDatabase unrelationtb.MsgDocModelInterface
} }
func (c *msgCache) getMaxSeqKey(conversationID string) string { func (c *msgCache) getMaxSeqKey(conversationID string) string {
@ -182,18 +180,20 @@ func (c *msgCache) getSeqs(
) (m map[string]int64, err error) { ) (m map[string]int64, err error) {
pipe := c.rdb.Pipeline() pipe := c.rdb.Pipeline()
for _, v := range items { for _, v := range items {
if err := pipe.Get(ctx, getkey(v)).Err(); err != nil && err != redis.Nil { err2 := pipe.Get(ctx, getkey(v)).Err()
return nil, errs.Wrap(err) if err2 != nil && !errors.Is(err2, redis.Nil) {
return nil, errs.Wrap(err2)
} }
} }
result, err := pipe.Exec(ctx) result, err := pipe.Exec(ctx)
if err != nil && err != redis.Nil { if err != nil && !errors.Is(err, redis.Nil) {
return nil, errs.Wrap(err) return nil, errs.Wrap(err)
} }
m = make(map[string]int64, len(items)) m = make(map[string]int64, len(items))
for i, v := range result { for i, v := range result {
seq := v.(*redis.StringCmd) seq := v.(*redis.StringCmd)
if seq.Err() != nil && seq.Err() != redis.Nil {
if seq.Err() != nil && !errors.Is(seq.Err(), redis.Nil) {
return nil, errs.Wrap(v.Err()) return nil, errs.Wrap(v.Err())
} }
val := utils.StringToInt64(seq.Val()) val := utils.StringToInt64(seq.Val())
@ -201,6 +201,7 @@ func (c *msgCache) getSeqs(
m[items[i]] = val m[items[i]] = val
} }
} }
return m, nil return m, nil
} }
@ -229,6 +230,7 @@ func (c *msgCache) setSeqs(ctx context.Context, seqs map[string]int64, getkey fu
} }
} }
_, err := pipe.Exec(ctx) _, err := pipe.Exec(ctx)
return err return err
} }
@ -319,6 +321,7 @@ func (c *msgCache) GetHasReadSeq(ctx context.Context, userID string, conversatio
func (c *msgCache) AddTokenFlag(ctx context.Context, userID string, platformID int, token string, flag int) error { func (c *msgCache) AddTokenFlag(ctx context.Context, userID string, platformID int, token string, flag int) error {
key := uidPidToken + userID + ":" + constant.PlatformIDToName(platformID) key := uidPidToken + userID + ":" + constant.PlatformIDToName(platformID)
return errs.Wrap(c.rdb.HSet(ctx, key, token, flag).Err()) return errs.Wrap(c.rdb.HSet(ctx, key, token, flag).Err())
} }
@ -332,6 +335,7 @@ func (c *msgCache) GetTokensWithoutError(ctx context.Context, userID string, pla
for k, v := range m { for k, v := range m {
mm[k] = utils.StringToInt(v) mm[k] = utils.StringToInt(v)
} }
return mm, nil return mm, nil
} }
@ -341,11 +345,13 @@ func (c *msgCache) SetTokenMapByUidPid(ctx context.Context, userID string, platf
for k, v := range m { for k, v := range m {
mm[k] = v mm[k] = v
} }
return errs.Wrap(c.rdb.HSet(ctx, key, mm).Err()) return errs.Wrap(c.rdb.HSet(ctx, key, mm).Err())
} }
func (c *msgCache) DeleteTokenByUidPid(ctx context.Context, userID string, platform int, fields []string) error { func (c *msgCache) DeleteTokenByUidPid(ctx context.Context, userID string, platform int, fields []string) error {
key := uidPidToken + userID + ":" + constant.PlatformIDToName(platform) key := uidPidToken + userID + ":" + constant.PlatformIDToName(platform)
return errs.Wrap(c.rdb.HDel(ctx, key, fields...).Err()) return errs.Wrap(c.rdb.HDel(ctx, key, fields...).Err())
} }
@ -366,8 +372,9 @@ func (c *msgCache) GetMessagesBySeq(
for _, v := range seqs { for _, v := range seqs {
// MESSAGE_CACHE:169.254.225.224_reliability1653387820_0_1 // MESSAGE_CACHE:169.254.225.224_reliability1653387820_0_1
key := c.getMessageCacheKey(conversationID, v) key := c.getMessageCacheKey(conversationID, v)
if err := pipe.Get(ctx, key).Err(); err != nil && err != redis.Nil { err2 := pipe.Get(ctx, key).Err()
return nil, nil, err if err2 != nil && errors.Is(err2, redis.Nil) {
return nil, nil, err2
} }
} }
result, err := pipe.Exec(ctx) result, err := pipe.Exec(ctx)
@ -381,6 +388,7 @@ func (c *msgCache) GetMessagesBySeq(
if err == nil { if err == nil {
if msg.Status != constant.MsgDeleted { if msg.Status != constant.MsgDeleted {
seqMsgs = append(seqMsgs, &msg) seqMsgs = append(seqMsgs, &msg)
continue continue
} }
} else { } else {
@ -389,6 +397,7 @@ func (c *msgCache) GetMessagesBySeq(
failedSeqs = append(failedSeqs, seqs[i]) failedSeqs = append(failedSeqs, seqs[i])
} }
} }
return seqMsgs, failedSeqs, err return seqMsgs, failedSeqs, err
} }
@ -408,6 +417,7 @@ func (c *msgCache) SetMessageToCache(ctx context.Context, conversationID string,
} }
} }
_, err := pipe.Exec(ctx) _, err := pipe.Exec(ctx)
return len(failedMsgs), err return len(failedMsgs), err
} }
@ -440,6 +450,7 @@ func (c *msgCache) UserDeleteMsgs(ctx context.Context, conversationID string, se
} }
} }
_, err := pipe.Exec(ctx) _, err := pipe.Exec(ctx)
return errs.Wrap(err) return errs.Wrap(err)
} }
@ -452,6 +463,7 @@ func (c *msgCache) GetUserDelList(ctx context.Context, userID, conversationID st
for i, v := range result { for i, v := range result {
seqs[i] = utils.StringToInt64(v) seqs[i] = utils.StringToInt64(v)
} }
return seqs, nil return seqs, nil
} }
@ -460,6 +472,7 @@ func (c *msgCache) DelUserDeleteMsgsList(ctx context.Context, conversationID str
delUsers, err := c.rdb.SMembers(ctx, c.getMessageDelUserListKey(conversationID, seq)).Result() delUsers, err := c.rdb.SMembers(ctx, c.getMessageDelUserListKey(conversationID, seq)).Result()
if err != nil { if err != nil {
log.ZWarn(ctx, "DelUserDeleteMsgsList failed", err, "conversationID", conversationID, "seq", seq) log.ZWarn(ctx, "DelUserDeleteMsgsList failed", err, "conversationID", conversationID, "seq", seq)
continue continue
} }
if len(delUsers) > 0 { if len(delUsers) > 0 {
@ -502,12 +515,13 @@ func (c *msgCache) DeleteMessages(ctx context.Context, conversationID string, se
} }
} }
_, err := pipe.Exec(ctx) _, err := pipe.Exec(ctx)
return errs.Wrap(err) return errs.Wrap(err)
} }
func (c *msgCache) CleanUpOneConversationAllMsg(ctx context.Context, conversationID string) error { func (c *msgCache) CleanUpOneConversationAllMsg(ctx context.Context, conversationID string) error {
vals, err := c.rdb.Keys(ctx, c.allMessageCacheKey(conversationID)).Result() vals, err := c.rdb.Keys(ctx, c.allMessageCacheKey(conversationID)).Result()
if err == redis.Nil { if errors.Is(err, redis.Nil) {
return nil return nil
} }
if err != nil { if err != nil {
@ -515,11 +529,13 @@ func (c *msgCache) CleanUpOneConversationAllMsg(ctx context.Context, conversatio
} }
pipe := c.rdb.Pipeline() pipe := c.rdb.Pipeline()
for _, v := range vals { for _, v := range vals {
if err := pipe.Del(ctx, v).Err(); err != nil { err2 := pipe.Del(ctx, v).Err()
return errs.Wrap(err) if err2 != nil {
return errs.Wrap(err2)
} }
} }
_, err = pipe.Exec(ctx) _, err = pipe.Exec(ctx)
return errs.Wrap(err) return errs.Wrap(err)
} }
@ -528,13 +544,15 @@ func (c *msgCache) DelMsgFromCache(ctx context.Context, userID string, seqs []in
key := c.getMessageCacheKey(userID, seq) key := c.getMessageCacheKey(userID, seq)
result, err := c.rdb.Get(ctx, key).Result() result, err := c.rdb.Get(ctx, key).Result()
if err != nil { if err != nil {
if err == redis.Nil { if errors.Is(err, redis.Nil) {
continue continue
} }
return errs.Wrap(err) return errs.Wrap(err)
} }
var msg sdkws.MsgData var msg sdkws.MsgData
if err := jsonpb.UnmarshalString(result, &msg); err != nil { err = jsonpb.UnmarshalString(result, &msg)
if err != nil {
return err return err
} }
msg.Status = constant.MsgDeleted msg.Status = constant.MsgDeleted
@ -546,6 +564,7 @@ func (c *msgCache) DelMsgFromCache(ctx context.Context, userID string, seqs []in
return errs.Wrap(err) return errs.Wrap(err)
} }
} }
return nil return nil
} }
@ -571,6 +590,7 @@ func (c *msgCache) SetSendMsgStatus(ctx context.Context, id string, status int32
func (c *msgCache) GetSendMsgStatus(ctx context.Context, id string) (int32, error) { func (c *msgCache) GetSendMsgStatus(ctx context.Context, id string) (int32, error) {
result, err := c.rdb.Get(ctx, sendMsgFailedFlag+id).Int() result, err := c.rdb.Get(ctx, sendMsgFailedFlag+id).Int()
return int32(result), errs.Wrap(err) return int32(result), errs.Wrap(err)
} }
@ -597,6 +617,7 @@ func (c *msgCache) DelFcmToken(ctx context.Context, account string, platformID i
func (c *msgCache) IncrUserBadgeUnreadCountSum(ctx context.Context, userID string) (int, error) { func (c *msgCache) IncrUserBadgeUnreadCountSum(ctx context.Context, userID string) (int, error) {
seq, err := c.rdb.Incr(ctx, userBadgeUnreadCountSum+userID).Result() seq, err := c.rdb.Incr(ctx, userBadgeUnreadCountSum+userID).Result()
return int(seq), errs.Wrap(err) return int(seq), errs.Wrap(err)
} }
@ -610,11 +631,13 @@ func (c *msgCache) GetUserBadgeUnreadCountSum(ctx context.Context, userID string
func (c *msgCache) LockMessageTypeKey(ctx context.Context, clientMsgID string, TypeKey string) error { func (c *msgCache) LockMessageTypeKey(ctx context.Context, clientMsgID string, TypeKey string) error {
key := exTypeKeyLocker + clientMsgID + "_" + TypeKey key := exTypeKeyLocker + clientMsgID + "_" + TypeKey
return errs.Wrap(c.rdb.SetNX(ctx, key, 1, time.Minute).Err()) return errs.Wrap(c.rdb.SetNX(ctx, key, 1, time.Minute).Err())
} }
func (c *msgCache) UnLockMessageTypeKey(ctx context.Context, clientMsgID string, TypeKey string) error { func (c *msgCache) UnLockMessageTypeKey(ctx context.Context, clientMsgID string, TypeKey string) error {
key := exTypeKeyLocker + clientMsgID + "_" + TypeKey key := exTypeKeyLocker + clientMsgID + "_" + TypeKey
return errs.Wrap(c.rdb.Del(ctx, key).Err()) return errs.Wrap(c.rdb.Del(ctx, key).Err())
} }
@ -629,6 +652,7 @@ func (c *msgCache) getMessageReactionExPrefix(clientMsgID string, sessionType in
case constant.NotificationChatType: case constant.NotificationChatType:
return "EX_NOTIFICATION" + clientMsgID return "EX_NOTIFICATION" + clientMsgID
} }
return "" return ""
} }
@ -637,6 +661,7 @@ func (c *msgCache) JudgeMessageReactionExist(ctx context.Context, clientMsgID st
if err != nil { if err != nil {
return false, utils.Wrap(err, "") return false, utils.Wrap(err, "")
} }
return n > 0, nil return n > 0, nil
} }

@ -17,6 +17,7 @@ package cache
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"errors"
"hash/crc32" "hash/crc32"
"strconv" "strconv"
"time" "time"
@ -70,6 +71,7 @@ func NewUserCacheRedis(
options rockscache.Options, options rockscache.Options,
) UserCache { ) UserCache {
rcClient := rockscache.NewClient(rdb, options) rcClient := rockscache.NewClient(rdb, options)
return &UserCacheRedis{ return &UserCacheRedis{
rdb: rdb, rdb: rdb,
metaCache: NewMetaCacheRedis(rcClient), metaCache: NewMetaCacheRedis(rcClient),
@ -97,10 +99,6 @@ func (u *UserCacheRedis) getUserGlobalRecvMsgOptKey(userID string) string {
return userGlobalRecvMsgOptKey + userID return userGlobalRecvMsgOptKey + userID
} }
func (u *UserCacheRedis) getUserStatusHashKey(userID string, Id int32) string {
return userID + "_" + string(Id) + platformID
}
func (u *UserCacheRedis) GetUserInfo(ctx context.Context, userID string) (userInfo *relationtb.UserModel, err error) { func (u *UserCacheRedis) GetUserInfo(ctx context.Context, userID string) (userInfo *relationtb.UserModel, err error) {
return getCache( return getCache(
ctx, ctx,
@ -114,10 +112,11 @@ func (u *UserCacheRedis) GetUserInfo(ctx context.Context, userID string) (userIn
} }
func (u *UserCacheRedis) GetUsersInfo(ctx context.Context, userIDs []string) ([]*relationtb.UserModel, error) { func (u *UserCacheRedis) GetUsersInfo(ctx context.Context, userIDs []string) ([]*relationtb.UserModel, error) {
var keys []string keys := make([]string, 0, len(userIDs))
for _, userID := range userIDs { for _, userID := range userIDs {
keys = append(keys, u.getUserInfoKey(userID)) keys = append(keys, u.getUserInfoKey(userID))
} }
return batchGetCache( return batchGetCache(
ctx, ctx,
u.rcClient, u.rcClient,
@ -129,6 +128,7 @@ func (u *UserCacheRedis) GetUsersInfo(ctx context.Context, userIDs []string) ([]
return i, nil return i, nil
} }
} }
return 0, errIndex return 0, errIndex
}, },
func(ctx context.Context) ([]*relationtb.UserModel, error) { func(ctx context.Context) ([]*relationtb.UserModel, error) {
@ -138,12 +138,13 @@ func (u *UserCacheRedis) GetUsersInfo(ctx context.Context, userIDs []string) ([]
} }
func (u *UserCacheRedis) DelUsersInfo(userIDs ...string) UserCache { func (u *UserCacheRedis) DelUsersInfo(userIDs ...string) UserCache {
var keys []string keys := make([]string, 0, len(userIDs))
for _, userID := range userIDs { for _, userID := range userIDs {
keys = append(keys, u.getUserInfoKey(userID)) keys = append(keys, u.getUserInfoKey(userID))
} }
cache := u.NewCache() cache := u.NewCache()
cache.AddKeys(keys...) cache.AddKeys(keys...)
return cache return cache
} }
@ -160,22 +161,19 @@ func (u *UserCacheRedis) GetUserGlobalRecvMsgOpt(ctx context.Context, userID str
} }
func (u *UserCacheRedis) DelUsersGlobalRecvMsgOpt(userIDs ...string) UserCache { func (u *UserCacheRedis) DelUsersGlobalRecvMsgOpt(userIDs ...string) UserCache {
var keys []string keys := make([]string, 0, len(userIDs))
for _, userID := range userIDs { for _, userID := range userIDs {
keys = append(keys, u.getUserGlobalRecvMsgOptKey(userID)) keys = append(keys, u.getUserGlobalRecvMsgOptKey(userID))
} }
cache := u.NewCache() cache := u.NewCache()
cache.AddKeys(keys...) cache.AddKeys(keys...)
return cache
}
func (u *UserCacheRedis) getOnlineStatusKey(userID string) string { return cache
return olineStatusKey + userID
} }
// GetUserStatus get user status. // GetUserStatus get user status.
func (u *UserCacheRedis) GetUserStatus(ctx context.Context, userIDs []string) ([]*user.OnlineStatus, error) { func (u *UserCacheRedis) GetUserStatus(ctx context.Context, userIDs []string) ([]*user.OnlineStatus, error) {
var res []*user.OnlineStatus userStatus := make([]*user.OnlineStatus, 0, len(userIDs))
for _, userID := range userIDs { for _, userID := range userIDs {
UserIDNum := crc32.ChecksumIEEE([]byte(userID)) UserIDNum := crc32.ChecksumIEEE([]byte(userID))
modKey := strconv.Itoa(int(UserIDNum % statusMod)) modKey := strconv.Itoa(int(UserIDNum % statusMod))
@ -183,13 +181,14 @@ func (u *UserCacheRedis) GetUserStatus(ctx context.Context, userIDs []string) ([
key := olineStatusKey + modKey key := olineStatusKey + modKey
result, err := u.rdb.HGet(ctx, key, userID).Result() result, err := u.rdb.HGet(ctx, key, userID).Result()
if err != nil { if err != nil {
if err == redis.Nil { if errors.Is(err, redis.Nil) {
// key or field does not exist // key or field does not exist
res = append(res, &user.OnlineStatus{ userStatus = append(userStatus, &user.OnlineStatus{
UserID: userID, UserID: userID,
Status: constant.Offline, Status: constant.Offline,
PlatformIDs: nil, PlatformIDs: nil,
}) })
continue continue
} else { } else {
return nil, errs.Wrap(err) return nil, errs.Wrap(err)
@ -201,9 +200,10 @@ func (u *UserCacheRedis) GetUserStatus(ctx context.Context, userIDs []string) ([
} }
onlineStatus.UserID = userID onlineStatus.UserID = userID
onlineStatus.Status = constant.Online onlineStatus.Status = constant.Online
res = append(res, &onlineStatus) userStatus = append(userStatus, &onlineStatus)
} }
return res, nil
return userStatus, nil
} }
// SetUserStatus Set the user status and save it in redis. // SetUserStatus Set the user status and save it in redis.
@ -224,15 +224,16 @@ func (u *UserCacheRedis) SetUserStatus(ctx context.Context, userID string, statu
Status: constant.Online, Status: constant.Online,
PlatformIDs: []int32{platformID}, PlatformIDs: []int32{platformID},
} }
jsonData, err := json.Marshal(onlineStatus) jsonData, err2 := json.Marshal(&onlineStatus)
if err != nil { if err2 != nil {
return errs.Wrap(err) return errs.Wrap(err2)
} }
_, err = u.rdb.HSet(ctx, key, userID, string(jsonData)).Result() _, err2 = u.rdb.HSet(ctx, key, userID, string(jsonData)).Result()
if err != nil { if err2 != nil {
return errs.Wrap(err) return errs.Wrap(err2)
} }
u.rdb.Expire(ctx, key, userOlineStatusExpireTime) u.rdb.Expire(ctx, key, userOlineStatusExpireTime)
return nil return nil
} }
} }
@ -240,7 +241,7 @@ func (u *UserCacheRedis) SetUserStatus(ctx context.Context, userID string, statu
isNil := false isNil := false
result, err := u.rdb.HGet(ctx, key, userID).Result() result, err := u.rdb.HGet(ctx, key, userID).Result()
if err != nil { if err != nil {
if err == redis.Nil { if errors.Is(err, redis.Nil) {
isNil = true isNil = true
} else { } else {
return errs.Wrap(err) return errs.Wrap(err)
@ -248,51 +249,45 @@ func (u *UserCacheRedis) SetUserStatus(ctx context.Context, userID string, statu
} }
if status == constant.Offline { if status == constant.Offline {
if isNil { err = u.refreshStatusOffline(ctx, userID, status, platformID, isNil, err, result, key)
log.ZWarn(ctx, "this user not online,maybe trigger order not right", if err != nil {
err, "userStatus", status) return err
return nil
} }
var onlineStatus user.OnlineStatus } else {
err = json.Unmarshal([]byte(result), &onlineStatus) err = u.refreshStatusOnline(ctx, userID, platformID, isNil, err, result, key)
if err != nil { if err != nil {
return errs.Wrap(err) return errs.Wrap(err)
} }
var newPlatformIDs []int32 }
for _, val := range onlineStatus.PlatformIDs {
if val != platformID { return nil
newPlatformIDs = append(newPlatformIDs, val) }
}
func (u *UserCacheRedis) refreshStatusOffline(ctx context.Context, userID string, status, platformID int32, isNil bool, err error, result, key string) error {
if isNil {
log.ZWarn(ctx, "this user not online,maybe trigger order not right",
err, "userStatus", status)
return nil
}
var onlineStatus user.OnlineStatus
err = json.Unmarshal([]byte(result), &onlineStatus)
if err != nil {
return errs.Wrap(err)
}
var newPlatformIDs []int32
for _, val := range onlineStatus.PlatformIDs {
if val != platformID {
newPlatformIDs = append(newPlatformIDs, val)
} }
if newPlatformIDs == nil { }
_, err = u.rdb.HDel(ctx, key, userID).Result() if newPlatformIDs == nil {
if err != nil { _, err = u.rdb.HDel(ctx, key, userID).Result()
return errs.Wrap(err) if err != nil {
} return errs.Wrap(err)
} else {
onlineStatus.PlatformIDs = newPlatformIDs
newjsonData, err := json.Marshal(&onlineStatus)
if err != nil {
return errs.Wrap(err)
}
_, err = u.rdb.HSet(ctx, key, userID, string(newjsonData)).Result()
if err != nil {
return errs.Wrap(err)
}
} }
} else { } else {
var onlineStatus user.OnlineStatus onlineStatus.PlatformIDs = newPlatformIDs
if !isNil {
err = json.Unmarshal([]byte(result), &onlineStatus)
if err != nil {
return errs.Wrap(err)
}
onlineStatus.PlatformIDs = RemoveRepeatedElementsInList(append(onlineStatus.PlatformIDs, platformID))
} else {
onlineStatus.PlatformIDs = append(onlineStatus.PlatformIDs, platformID)
}
onlineStatus.Status = constant.Online
onlineStatus.UserID = userID
newjsonData, err := json.Marshal(&onlineStatus) newjsonData, err := json.Marshal(&onlineStatus)
if err != nil { if err != nil {
return errs.Wrap(err) return errs.Wrap(err)
@ -301,7 +296,31 @@ func (u *UserCacheRedis) SetUserStatus(ctx context.Context, userID string, statu
if err != nil { if err != nil {
return errs.Wrap(err) return errs.Wrap(err)
} }
}
return nil
}
func (u *UserCacheRedis) refreshStatusOnline(ctx context.Context, userID string, platformID int32, isNil bool, err error, result, key string) error {
var onlineStatus user.OnlineStatus
if !isNil {
err2 := json.Unmarshal([]byte(result), &onlineStatus)
if err != nil {
return errs.Wrap(err2)
}
onlineStatus.PlatformIDs = RemoveRepeatedElementsInList(append(onlineStatus.PlatformIDs, platformID))
} else {
onlineStatus.PlatformIDs = append(onlineStatus.PlatformIDs, platformID)
}
onlineStatus.Status = constant.Online
onlineStatus.UserID = userID
newjsonData, err := json.Marshal(&onlineStatus)
if err != nil {
return errs.Wrap(err)
}
_, err = u.rdb.HSet(ctx, key, userID, string(newjsonData)).Result()
if err != nil {
return errs.Wrap(err)
} }
return nil return nil

Loading…
Cancel
Save