feat: new features merged (#2409)
* fix: GroupApplicationAcceptedNotification * fix: GroupApplicationAcceptedNotification * fix: NotificationUserInfoUpdate * cicd: robot automated Change * fix: component * fix: getConversationInfo * feat: cron task * feat: cron task * feat: cron task * feat: cron task * feat: cron task * fix: minio config url recognition error * new mongo * new mongo * new mongo * new mongo * new mongo * new mongo * new mongo * new mongo * friend incr sync * friend incr sync * friend incr sync * friend incr sync * friend incr sync * mage * optimization version log * optimization version log * sync * sync * sync * group sync * sync option * sync option * refactor: replace `friend` package with `realtion`. * refactor: update lastest commit to relation. * sync option * sync option * sync option * sync * sync * go.mod * seq * update: go mod * refactor: change incremental to full * feat: get full friend user ids * feat: api and config * seq * group version * merge * seq * seq * seq * fix: sort by id avoid unstable sort friends. * group * group * group * fix: sort by id avoid unstable sort friends. * fix: sort by id avoid unstable sort friends. * fix: sort by id avoid unstable sort friends. * user version * seq * seq * seq user * user online * implement minio expire delete. * user online * config * fix * fix * implement minio expire delete logic. * online cache * online cache * online cache * online cache * online cache * online cache * online cache * online cache * online cache * online cache * online cache * online cache * feat: implement scheduled delete outdated object in minio. * update gomake version * update gomake version * implement FindExpires pagination. * remove unnesseary incr. * fix uncorrect args call. * online push * online push * online push * resolving conflicts * resolving conflicts * test * api prommetrics * api prommetrics * api prommetrics * api prommetrics * api prommetrics * rpc prommetrics * rpc prommetrics * online status * online status * online status * online status * sub * conversation version incremental * merge seq * merge online * merge online * merge online * merge seq * GetOwnerConversation * fix: change incremental syncer router name. * rockscache batch get * rockscache seq batch get * fix: GetMsgDocModelByIndex bug * update go.mod * update go.mod * merge * feat: prometheus * feat: prometheus --------- Co-authored-by: withchao <withchao@users.noreply.github.com> Co-authored-by: Monet Lee <monet_lee@163.com> Co-authored-by: OpenIM-Gordon <46924906+FGadvancer@users.noreply.github.com> Co-authored-by: icey-yu <1186114839@qq.com>pull/2414/head
parent
5f52fa19bd
commit
4aaf496086
@ -0,0 +1,25 @@
|
||||
global:
|
||||
resolve_timeout: 5m
|
||||
smtp_from: alert@openim.io
|
||||
smtp_smarthost: smtp.163.com:465
|
||||
smtp_auth_username: alert@openim.io
|
||||
smtp_auth_password: YOURAUTHPASSWORD
|
||||
smtp_require_tls: false
|
||||
smtp_hello: xxx
|
||||
|
||||
templates:
|
||||
- /etc/alertmanager/email.tmpl
|
||||
|
||||
route:
|
||||
group_by: ['alertname']
|
||||
group_wait: 5s
|
||||
group_interval: 5s
|
||||
repeat_interval: 5m
|
||||
receiver: email
|
||||
receivers:
|
||||
- name: email
|
||||
email_configs:
|
||||
- to: 'alert@example.com'
|
||||
html: '{{ template "email.to.html" . }}'
|
||||
headers: { Subject: "[OPENIM-SERVER]Alarm" }
|
||||
send_resolved: true
|
@ -0,0 +1,16 @@
|
||||
{{ define "email.to.html" }}
|
||||
{{ range .Alerts }}
|
||||
<!-- Begin of OpenIM Alert -->
|
||||
<div style="border:1px solid #ccc; padding:10px; margin-bottom:10px;">
|
||||
<h3>OpenIM Alert</h3>
|
||||
<p><strong>Alert Program:</strong> Prometheus Alert</p>
|
||||
<p><strong>Severity Level:</strong> {{ .Labels.severity }}</p>
|
||||
<p><strong>Alert Type:</strong> {{ .Labels.alertname }}</p>
|
||||
<p><strong>Affected Host:</strong> {{ .Labels.instance }}</p>
|
||||
<p><strong>Affected Service:</strong> {{ .Labels.job }}</p>
|
||||
<p><strong>Alert Subject:</strong> {{ .Annotations.summary }}</p>
|
||||
<p><strong>Trigger Time:</strong> {{ .StartsAt.Format "2006-01-02 15:04:05" }}</p>
|
||||
</div>
|
||||
<!-- End of OpenIM Alert -->
|
||||
{{ end }}
|
||||
{{ end }}
|
@ -0,0 +1,22 @@
|
||||
groups:
|
||||
- name: instance_down
|
||||
rules:
|
||||
- alert: InstanceDown
|
||||
expr: up == 0
|
||||
for: 1m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: "Instance {{ $labels.instance }} down"
|
||||
description: "{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 1 minutes."
|
||||
|
||||
- name: database_insert_failure_alerts
|
||||
rules:
|
||||
- alert: DatabaseInsertFailed
|
||||
expr: (increase(msg_insert_redis_failed_total[5m]) > 0) or (increase(msg_insert_mongo_failed_total[5m]) > 0)
|
||||
for: 1m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: "Increase in MsgInsertRedisFailedCounter or MsgInsertMongoFailedCounter detected"
|
||||
description: "Either MsgInsertRedisFailedCounter or MsgInsertMongoFailedCounter has increased in the last 5 minutes, indicating failures in message insert operations to Redis or MongoDB,maybe the redis or mongodb is crash."
|
@ -1,2 +1,3 @@
|
||||
chatRecordsClearTime: "0 2 * * *"
|
||||
cronExecuteTime: "0 2 * * *"
|
||||
retainChatRecords: 365
|
||||
fileExpireTime: 90
|
||||
|
@ -0,0 +1,83 @@
|
||||
# my global config
|
||||
global:
|
||||
scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
|
||||
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
|
||||
# scrape_timeout is set to the global default (10s).
|
||||
|
||||
# Alertmanager configuration
|
||||
alerting:
|
||||
alertmanagers:
|
||||
- static_configs:
|
||||
- targets: ['192.168.2.22:19093']
|
||||
|
||||
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
|
||||
rule_files:
|
||||
- "instance-down-rules.yml"
|
||||
# - "first_rules.yml"
|
||||
# - "second_rules.yml"
|
||||
|
||||
# A scrape configuration containing exactly one endpoint to scrape:
|
||||
# Here it's Prometheus itself.
|
||||
scrape_configs:
|
||||
# The job name is added as a label "job='job_name'"" to any timeseries scraped from this config.
|
||||
# Monitored information captured by prometheus
|
||||
|
||||
# prometheus fetches application services
|
||||
- job_name: 'node_exporter'
|
||||
static_configs:
|
||||
- targets: [ '192.168.2.22:20114' ]
|
||||
- job_name: 'openimserver-openim-api'
|
||||
static_configs:
|
||||
- targets: [ '192.168.2.22:20113' ]
|
||||
labels:
|
||||
namespace: 'default'
|
||||
- job_name: 'openimserver-openim-msggateway'
|
||||
static_configs:
|
||||
- targets: [ '192.168.2.22:20112' ]
|
||||
labels:
|
||||
namespace: 'default'
|
||||
- job_name: 'openimserver-openim-msgtransfer'
|
||||
static_configs:
|
||||
- targets: [ 192.168.2.22:20111, 192.168.2.22:20110, 192.168.2.22:20109, 192.168.2.22:20108 ]
|
||||
labels:
|
||||
namespace: 'default'
|
||||
- job_name: 'openimserver-openim-push'
|
||||
static_configs:
|
||||
- targets: [ '192.168.2.22:20107' ]
|
||||
labels:
|
||||
namespace: 'default'
|
||||
- job_name: 'openimserver-openim-rpc-auth'
|
||||
static_configs:
|
||||
- targets: [ '192.168.2.22:20106' ]
|
||||
labels:
|
||||
namespace: 'default'
|
||||
- job_name: 'openimserver-openim-rpc-conversation'
|
||||
static_configs:
|
||||
- targets: [ '192.168.2.22:20105' ]
|
||||
labels:
|
||||
namespace: 'default'
|
||||
- job_name: 'openimserver-openim-rpc-friend'
|
||||
static_configs:
|
||||
- targets: [ '192.168.2.22:20104' ]
|
||||
labels:
|
||||
namespace: 'default'
|
||||
- job_name: 'openimserver-openim-rpc-group'
|
||||
static_configs:
|
||||
- targets: [ '192.168.2.22:20103' ]
|
||||
labels:
|
||||
namespace: 'default'
|
||||
- job_name: 'openimserver-openim-rpc-msg'
|
||||
static_configs:
|
||||
- targets: [ '192.168.2.22:20102' ]
|
||||
labels:
|
||||
namespace: 'default'
|
||||
- job_name: 'openimserver-openim-rpc-third'
|
||||
static_configs:
|
||||
- targets: [ '192.168.2.22:20101' ]
|
||||
labels:
|
||||
namespace: 'default'
|
||||
- job_name: 'openimserver-openim-rpc-user'
|
||||
static_configs:
|
||||
- targets: [ '192.168.2.22:20100' ]
|
||||
labels:
|
||||
namespace: 'default'
|
@ -0,0 +1,112 @@
|
||||
package msggateway
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/binary"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
|
||||
pbuser "github.com/openimsdk/protocol/user"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/mcontext"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (ws *WsServer) ChangeOnlineStatus(concurrent int) {
|
||||
if concurrent < 1 {
|
||||
concurrent = 1
|
||||
}
|
||||
const renewalTime = cachekey.OnlineExpire / 3
|
||||
//const renewalTime = time.Second * 10
|
||||
renewalTicker := time.NewTicker(renewalTime)
|
||||
|
||||
requestChs := make([]chan *pbuser.SetUserOnlineStatusReq, concurrent)
|
||||
changeStatus := make([][]UserState, concurrent)
|
||||
|
||||
for i := 0; i < concurrent; i++ {
|
||||
requestChs[i] = make(chan *pbuser.SetUserOnlineStatusReq, 64)
|
||||
changeStatus[i] = make([]UserState, 0, 100)
|
||||
}
|
||||
|
||||
mergeTicker := time.NewTicker(time.Second)
|
||||
|
||||
local2pb := func(u UserState) *pbuser.UserOnlineStatus {
|
||||
return &pbuser.UserOnlineStatus{
|
||||
UserID: u.UserID,
|
||||
Online: u.Online,
|
||||
Offline: u.Offline,
|
||||
}
|
||||
}
|
||||
|
||||
rNum := rand.Uint64()
|
||||
pushUserState := func(us ...UserState) {
|
||||
for _, u := range us {
|
||||
sum := md5.Sum([]byte(u.UserID))
|
||||
i := (binary.BigEndian.Uint64(sum[:]) + rNum) % uint64(concurrent)
|
||||
changeStatus[i] = append(changeStatus[i], u)
|
||||
status := changeStatus[i]
|
||||
if len(status) == cap(status) {
|
||||
req := &pbuser.SetUserOnlineStatusReq{
|
||||
Status: datautil.Slice(status, local2pb),
|
||||
}
|
||||
changeStatus[i] = status[:0]
|
||||
select {
|
||||
case requestChs[i] <- req:
|
||||
default:
|
||||
log.ZError(context.Background(), "user online processing is too slow", nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pushAllUserState := func() {
|
||||
for i, status := range changeStatus {
|
||||
if len(status) == 0 {
|
||||
continue
|
||||
}
|
||||
req := &pbuser.SetUserOnlineStatusReq{
|
||||
Status: datautil.Slice(status, local2pb),
|
||||
}
|
||||
changeStatus[i] = status[:0]
|
||||
select {
|
||||
case requestChs[i] <- req:
|
||||
default:
|
||||
log.ZError(context.Background(), "user online processing is too slow", nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
opIdCtx := mcontext.SetOperationID(context.Background(), "r"+strconv.FormatUint(rNum, 10))
|
||||
doRequest := func(req *pbuser.SetUserOnlineStatusReq) {
|
||||
ctx, cancel := context.WithTimeout(opIdCtx, time.Second*5)
|
||||
defer cancel()
|
||||
if _, err := ws.userClient.Client.SetUserOnlineStatus(ctx, req); err != nil {
|
||||
log.ZError(ctx, "update user online status", err)
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < concurrent; i++ {
|
||||
go func(ch <-chan *pbuser.SetUserOnlineStatusReq) {
|
||||
for req := range ch {
|
||||
doRequest(req)
|
||||
}
|
||||
}(requestChs[i])
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-mergeTicker.C:
|
||||
pushAllUserState()
|
||||
case now := <-renewalTicker.C:
|
||||
deadline := now.Add(-cachekey.OnlineExpire / 3)
|
||||
users := ws.clients.GetAllUserStatus(deadline, now)
|
||||
log.ZDebug(context.Background(), "renewal ticker", "deadline", deadline, "nowtime", now, "num", len(users))
|
||||
pushUserState(users...)
|
||||
case state := <-ws.clients.UserState():
|
||||
log.ZDebug(context.Background(), "OnlineCache user online change", "userID", state.UserID, "online", state.Online, "offline", state.Offline)
|
||||
pushUserState(state)
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,181 @@
|
||||
package msggateway
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
"github.com/openimsdk/protocol/sdkws"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"github.com/openimsdk/tools/utils/idutil"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (ws *WsServer) subscriberUserOnlineStatusChanges(ctx context.Context, userID string, platformIDs []int32) {
|
||||
if ws.clients.RecvSubChange(userID, platformIDs) {
|
||||
log.ZDebug(ctx, "gateway receive subscription message and go back online", "userID", userID, "platformIDs", platformIDs)
|
||||
} else {
|
||||
log.ZDebug(ctx, "gateway ignore user online status changes", "userID", userID, "platformIDs", platformIDs)
|
||||
}
|
||||
ws.pushUserIDOnlineStatus(ctx, userID, platformIDs)
|
||||
}
|
||||
|
||||
func (ws *WsServer) SubUserOnlineStatus(ctx context.Context, client *Client, data *Req) ([]byte, error) {
|
||||
var sub sdkws.SubUserOnlineStatus
|
||||
if err := proto.Unmarshal(data.Data, &sub); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ws.subscription.Sub(client, sub.SubscribeUserID, sub.UnsubscribeUserID)
|
||||
var resp sdkws.SubUserOnlineStatusTips
|
||||
if len(sub.SubscribeUserID) > 0 {
|
||||
resp.Subscribers = make([]*sdkws.SubUserOnlineStatusElem, 0, len(sub.SubscribeUserID))
|
||||
for _, userID := range sub.SubscribeUserID {
|
||||
platformIDs, err := ws.online.GetUserOnlinePlatform(ctx, userID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp.Subscribers = append(resp.Subscribers, &sdkws.SubUserOnlineStatusElem{
|
||||
UserID: userID,
|
||||
OnlinePlatformIDs: platformIDs,
|
||||
})
|
||||
}
|
||||
}
|
||||
return proto.Marshal(&resp)
|
||||
}
|
||||
|
||||
type subClient struct {
|
||||
clients map[string]*Client
|
||||
}
|
||||
|
||||
func newSubscription() *Subscription {
|
||||
return &Subscription{
|
||||
userIDs: make(map[string]*subClient),
|
||||
}
|
||||
}
|
||||
|
||||
type Subscription struct {
|
||||
lock sync.RWMutex
|
||||
userIDs map[string]*subClient
|
||||
}
|
||||
|
||||
func (s *Subscription) GetClient(userID string) []*Client {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
cs, ok := s.userIDs[userID]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
clients := make([]*Client, 0, len(cs.clients))
|
||||
for _, client := range cs.clients {
|
||||
clients = append(clients, client)
|
||||
}
|
||||
return clients
|
||||
}
|
||||
|
||||
func (s *Subscription) DelClient(client *Client) {
|
||||
client.subLock.Lock()
|
||||
userIDs := datautil.Keys(client.subUserIDs)
|
||||
for _, userID := range userIDs {
|
||||
delete(client.subUserIDs, userID)
|
||||
}
|
||||
client.subLock.Unlock()
|
||||
if len(userIDs) == 0 {
|
||||
return
|
||||
}
|
||||
addr := client.ctx.GetRemoteAddr()
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
for _, userID := range userIDs {
|
||||
sub, ok := s.userIDs[userID]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
delete(sub.clients, addr)
|
||||
if len(sub.clients) == 0 {
|
||||
delete(s.userIDs, userID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Subscription) Sub(client *Client, addUserIDs, delUserIDs []string) {
|
||||
if len(addUserIDs)+len(delUserIDs) == 0 {
|
||||
return
|
||||
}
|
||||
var (
|
||||
del = make(map[string]struct{})
|
||||
add = make(map[string]struct{})
|
||||
)
|
||||
client.subLock.Lock()
|
||||
for _, userID := range delUserIDs {
|
||||
if _, ok := client.subUserIDs[userID]; !ok {
|
||||
continue
|
||||
}
|
||||
del[userID] = struct{}{}
|
||||
delete(client.subUserIDs, userID)
|
||||
}
|
||||
for _, userID := range addUserIDs {
|
||||
delete(del, userID)
|
||||
if _, ok := client.subUserIDs[userID]; ok {
|
||||
continue
|
||||
}
|
||||
client.subUserIDs[userID] = struct{}{}
|
||||
}
|
||||
client.subLock.Unlock()
|
||||
if len(del)+len(add) == 0 {
|
||||
return
|
||||
}
|
||||
addr := client.ctx.GetRemoteAddr()
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
for userID := range del {
|
||||
sub, ok := s.userIDs[userID]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
delete(sub.clients, addr)
|
||||
if len(sub.clients) == 0 {
|
||||
delete(s.userIDs, userID)
|
||||
}
|
||||
}
|
||||
for userID := range add {
|
||||
sub, ok := s.userIDs[userID]
|
||||
if !ok {
|
||||
sub = &subClient{clients: make(map[string]*Client)}
|
||||
s.userIDs[userID] = sub
|
||||
}
|
||||
sub.clients[addr] = client
|
||||
}
|
||||
}
|
||||
|
||||
func (ws *WsServer) pushUserIDOnlineStatus(ctx context.Context, userID string, platformIDs []int32) {
|
||||
clients := ws.subscription.GetClient(userID)
|
||||
if len(clients) == 0 {
|
||||
return
|
||||
}
|
||||
msgContent, err := json.Marshal(platformIDs)
|
||||
if err != nil {
|
||||
log.ZError(ctx, "pushUserIDOnlineStatus json.Marshal", err)
|
||||
return
|
||||
}
|
||||
now := time.Now().UnixMilli()
|
||||
msgID := idutil.GetMsgIDByMD5(userID)
|
||||
msg := &sdkws.MsgData{
|
||||
SendID: userID,
|
||||
ClientMsgID: msgID,
|
||||
ServerMsgID: msgID,
|
||||
SenderPlatformID: constant.AdminPlatformID,
|
||||
SessionType: constant.NotificationChatType,
|
||||
ContentType: constant.UserSubscribeOnlineStatusNotification,
|
||||
Content: msgContent,
|
||||
SendTime: now,
|
||||
CreateTime: now,
|
||||
}
|
||||
for _, client := range clients {
|
||||
msg.RecvID = client.UserID
|
||||
if err := client.PushMessage(ctx, msg); err != nil {
|
||||
log.ZError(ctx, "UserSubscribeOnlineStatusNotification push failed", err, "userID", client.UserID, "platformID", client.PlatformID, "changeUserID", userID, "content", msgContent)
|
||||
}
|
||||
}
|
||||
}
|
@ -1,135 +1,185 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package msggateway
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type UserMap struct {
|
||||
m sync.Map
|
||||
type UserMap interface {
|
||||
GetAll(userID string) ([]*Client, bool)
|
||||
Get(userID string, platformID int) ([]*Client, bool, bool)
|
||||
Set(userID string, v *Client)
|
||||
DeleteClients(userID string, clients []*Client) (isDeleteUser bool)
|
||||
UserState() <-chan UserState
|
||||
GetAllUserStatus(deadline time.Time, nowtime time.Time) []UserState
|
||||
RecvSubChange(userID string, platformIDs []int32) bool
|
||||
}
|
||||
|
||||
func newUserMap() *UserMap {
|
||||
return &UserMap{}
|
||||
type UserState struct {
|
||||
UserID string
|
||||
Online []int32
|
||||
Offline []int32
|
||||
}
|
||||
|
||||
func (u *UserMap) GetAll(key string) ([]*Client, bool) {
|
||||
allClients, ok := u.m.Load(key)
|
||||
if ok {
|
||||
return allClients.([]*Client), ok
|
||||
}
|
||||
return nil, ok
|
||||
type UserPlatform struct {
|
||||
Time time.Time
|
||||
Clients []*Client
|
||||
}
|
||||
|
||||
func (u *UserMap) Get(key string, platformID int) ([]*Client, bool, bool) {
|
||||
allClients, userExisted := u.m.Load(key)
|
||||
if userExisted {
|
||||
var clients []*Client
|
||||
for _, client := range allClients.([]*Client) {
|
||||
if client.PlatformID == platformID {
|
||||
clients = append(clients, client)
|
||||
}
|
||||
}
|
||||
if len(clients) > 0 {
|
||||
return clients, userExisted, true
|
||||
}
|
||||
return clients, userExisted, false
|
||||
func (u *UserPlatform) PlatformIDs() []int32 {
|
||||
if len(u.Clients) == 0 {
|
||||
return nil
|
||||
}
|
||||
platformIDs := make([]int32, 0, len(u.Clients))
|
||||
for _, client := range u.Clients {
|
||||
platformIDs = append(platformIDs, int32(client.PlatformID))
|
||||
}
|
||||
return nil, userExisted, false
|
||||
return platformIDs
|
||||
}
|
||||
|
||||
// Set adds a client to the map.
|
||||
func (u *UserMap) Set(key string, v *Client) {
|
||||
allClients, existed := u.m.Load(key)
|
||||
if existed {
|
||||
log.ZDebug(context.Background(), "Set existed", "user_id", key, "client_user_id", v.UserID)
|
||||
oldClients := allClients.([]*Client)
|
||||
oldClients = append(oldClients, v)
|
||||
u.m.Store(key, oldClients)
|
||||
} else {
|
||||
log.ZDebug(context.Background(), "Set not existed", "user_id", key, "client_user_id", v.UserID)
|
||||
func (u *UserPlatform) PlatformIDSet() map[int32]struct{} {
|
||||
if len(u.Clients) == 0 {
|
||||
return nil
|
||||
}
|
||||
platformIDs := make(map[int32]struct{})
|
||||
for _, client := range u.Clients {
|
||||
platformIDs[int32(client.PlatformID)] = struct{}{}
|
||||
}
|
||||
return platformIDs
|
||||
}
|
||||
|
||||
var clients []*Client
|
||||
clients = append(clients, v)
|
||||
u.m.Store(key, clients)
|
||||
func newUserMap() UserMap {
|
||||
return &userMap{
|
||||
data: make(map[string]*UserPlatform),
|
||||
ch: make(chan UserState, 10000),
|
||||
}
|
||||
}
|
||||
|
||||
func (u *UserMap) delete(key string, connRemoteAddr string) (isDeleteUser bool) {
|
||||
// Attempt to load the clients associated with the key.
|
||||
allClients, existed := u.m.Load(key)
|
||||
if !existed {
|
||||
// Return false immediately if the key does not exist.
|
||||
type userMap struct {
|
||||
lock sync.RWMutex
|
||||
data map[string]*UserPlatform
|
||||
ch chan UserState
|
||||
}
|
||||
|
||||
func (u *userMap) RecvSubChange(userID string, platformIDs []int32) bool {
|
||||
u.lock.RLock()
|
||||
defer u.lock.RUnlock()
|
||||
result, ok := u.data[userID]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
// Convert allClients to a slice of *Client.
|
||||
oldClients := allClients.([]*Client)
|
||||
var remainingClients []*Client
|
||||
for _, client := range oldClients {
|
||||
// Keep clients that do not match the connRemoteAddr.
|
||||
if client.ctx.GetRemoteAddr() != connRemoteAddr {
|
||||
remainingClients = append(remainingClients, client)
|
||||
}
|
||||
localPlatformIDs := result.PlatformIDSet()
|
||||
for _, platformID := range platformIDs {
|
||||
delete(localPlatformIDs, platformID)
|
||||
}
|
||||
if len(localPlatformIDs) == 0 {
|
||||
return false
|
||||
}
|
||||
u.push(userID, result, nil)
|
||||
return true
|
||||
}
|
||||
|
||||
// If no clients remain after filtering, delete the key from the map.
|
||||
if len(remainingClients) == 0 {
|
||||
u.m.Delete(key)
|
||||
func (u *userMap) push(userID string, userPlatform *UserPlatform, offline []int32) bool {
|
||||
select {
|
||||
case u.ch <- UserState{UserID: userID, Online: userPlatform.PlatformIDs(), Offline: offline}:
|
||||
userPlatform.Time = time.Now()
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Otherwise, update the key with the remaining clients.
|
||||
u.m.Store(key, remainingClients)
|
||||
return false
|
||||
func (u *userMap) GetAll(userID string) ([]*Client, bool) {
|
||||
u.lock.RLock()
|
||||
defer u.lock.RUnlock()
|
||||
result, ok := u.data[userID]
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
return result.Clients, true
|
||||
}
|
||||
|
||||
func (u *UserMap) deleteClients(key string, clients []*Client) (isDeleteUser bool) {
|
||||
m := datautil.SliceToMapAny(clients, func(c *Client) (string, struct{}) {
|
||||
return c.ctx.GetRemoteAddr(), struct{}{}
|
||||
})
|
||||
allClients, existed := u.m.Load(key)
|
||||
if !existed {
|
||||
// If the key doesn't exist, return false.
|
||||
return false
|
||||
func (u *userMap) Get(userID string, platformID int) ([]*Client, bool, bool) {
|
||||
u.lock.RLock()
|
||||
defer u.lock.RUnlock()
|
||||
result, ok := u.data[userID]
|
||||
if !ok {
|
||||
return nil, false, false
|
||||
}
|
||||
var clients []*Client
|
||||
for _, client := range result.Clients {
|
||||
if client.PlatformID == platformID {
|
||||
clients = append(clients, client)
|
||||
}
|
||||
}
|
||||
return clients, true, len(clients) > 0
|
||||
}
|
||||
|
||||
// Filter out clients that are in the deleteMap.
|
||||
oldClients := allClients.([]*Client)
|
||||
var remainingClients []*Client
|
||||
for _, client := range oldClients {
|
||||
if _, shouldBeDeleted := m[client.ctx.GetRemoteAddr()]; !shouldBeDeleted {
|
||||
remainingClients = append(remainingClients, client)
|
||||
func (u *userMap) Set(userID string, client *Client) {
|
||||
u.lock.Lock()
|
||||
defer u.lock.Unlock()
|
||||
result, ok := u.data[userID]
|
||||
if ok {
|
||||
result.Clients = append(result.Clients, client)
|
||||
} else {
|
||||
result = &UserPlatform{
|
||||
Clients: []*Client{client},
|
||||
}
|
||||
u.data[userID] = result
|
||||
}
|
||||
u.push(client.UserID, result, nil)
|
||||
}
|
||||
|
||||
// Update or delete the key based on the remaining clients.
|
||||
if len(remainingClients) == 0 {
|
||||
u.m.Delete(key)
|
||||
return true
|
||||
func (u *userMap) DeleteClients(userID string, clients []*Client) (isDeleteUser bool) {
|
||||
if len(clients) == 0 {
|
||||
return false
|
||||
}
|
||||
u.lock.Lock()
|
||||
defer u.lock.Unlock()
|
||||
result, ok := u.data[userID]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
offline := make([]int32, 0, len(clients))
|
||||
deleteAddr := datautil.SliceSetAny(clients, func(client *Client) string {
|
||||
return client.ctx.GetRemoteAddr()
|
||||
})
|
||||
tmp := result.Clients
|
||||
result.Clients = result.Clients[:0]
|
||||
for _, client := range tmp {
|
||||
if _, delCli := deleteAddr[client.ctx.GetRemoteAddr()]; delCli {
|
||||
offline = append(offline, int32(client.PlatformID))
|
||||
} else {
|
||||
result.Clients = append(result.Clients, client)
|
||||
}
|
||||
}
|
||||
defer u.push(userID, result, offline)
|
||||
if len(result.Clients) > 0 {
|
||||
return false
|
||||
}
|
||||
delete(u.data, userID)
|
||||
return true
|
||||
}
|
||||
|
||||
u.m.Store(key, remainingClients)
|
||||
return false
|
||||
func (u *userMap) GetAllUserStatus(deadline time.Time, nowtime time.Time) []UserState {
|
||||
u.lock.RLock()
|
||||
defer u.lock.RUnlock()
|
||||
result := make([]UserState, 0, len(u.data))
|
||||
for userID, userPlatform := range u.data {
|
||||
if userPlatform.Time.Before(deadline) {
|
||||
continue
|
||||
}
|
||||
userPlatform.Time = nowtime
|
||||
online := make([]int32, 0, len(userPlatform.Clients))
|
||||
for _, client := range userPlatform.Clients {
|
||||
online = append(online, int32(client.PlatformID))
|
||||
}
|
||||
result = append(result, UserState{UserID: userID, Online: online})
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (u *UserMap) DeleteAll(key string) {
|
||||
u.m.Delete(key)
|
||||
func (u *userMap) UserState() <-chan UserState {
|
||||
return u.ch
|
||||
}
|
||||
|
@ -0,0 +1,122 @@
|
||||
package user
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
"github.com/openimsdk/protocol/sdkws"
|
||||
pbuser "github.com/openimsdk/protocol/user"
|
||||
)
|
||||
|
||||
func (s *userServer) getUserOnlineStatus(ctx context.Context, userID string) (*pbuser.OnlineStatus, error) {
|
||||
platformIDs, err := s.online.GetOnline(ctx, userID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
status := pbuser.OnlineStatus{
|
||||
UserID: userID,
|
||||
PlatformIDs: platformIDs,
|
||||
}
|
||||
if len(platformIDs) > 0 {
|
||||
status.Status = constant.Online
|
||||
} else {
|
||||
status.Status = constant.Offline
|
||||
}
|
||||
return &status, nil
|
||||
}
|
||||
|
||||
func (s *userServer) getUsersOnlineStatus(ctx context.Context, userIDs []string) ([]*pbuser.OnlineStatus, error) {
|
||||
res := make([]*pbuser.OnlineStatus, 0, len(userIDs))
|
||||
for _, userID := range userIDs {
|
||||
status, err := s.getUserOnlineStatus(ctx, userID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res = append(res, status)
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// SubscribeOrCancelUsersStatus Subscribe online or cancel online users.
|
||||
func (s *userServer) SubscribeOrCancelUsersStatus(ctx context.Context, req *pbuser.SubscribeOrCancelUsersStatusReq) (*pbuser.SubscribeOrCancelUsersStatusResp, error) {
|
||||
if req.Genre == constant.SubscriberUser {
|
||||
err := s.db.SubscribeUsersStatus(ctx, req.UserID, req.UserIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var status []*pbuser.OnlineStatus
|
||||
status, err = s.getUsersOnlineStatus(ctx, req.UserIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &pbuser.SubscribeOrCancelUsersStatusResp{StatusList: status}, nil
|
||||
} else if req.Genre == constant.Unsubscribe {
|
||||
err := s.db.UnsubscribeUsersStatus(ctx, req.UserID, req.UserIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return &pbuser.SubscribeOrCancelUsersStatusResp{}, nil
|
||||
}
|
||||
|
||||
// GetUserStatus Get the online status of the user.
|
||||
func (s *userServer) GetUserStatus(ctx context.Context, req *pbuser.GetUserStatusReq) (*pbuser.GetUserStatusResp, error) {
|
||||
res, err := s.getUsersOnlineStatus(ctx, req.UserIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &pbuser.GetUserStatusResp{StatusList: res}, nil
|
||||
}
|
||||
|
||||
// SetUserStatus Synchronize user's online status.
|
||||
func (s *userServer) SetUserStatus(ctx context.Context, req *pbuser.SetUserStatusReq) (*pbuser.SetUserStatusResp, error) {
|
||||
var (
|
||||
online []int32
|
||||
offline []int32
|
||||
)
|
||||
switch req.Status {
|
||||
case constant.Online:
|
||||
online = []int32{req.PlatformID}
|
||||
case constant.Offline:
|
||||
online = []int32{req.PlatformID}
|
||||
}
|
||||
if err := s.online.SetUserOnline(ctx, req.UserID, online, offline); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
list, err := s.db.GetSubscribedList(ctx, req.UserID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, userID := range list {
|
||||
tips := &sdkws.UserStatusChangeTips{
|
||||
FromUserID: req.UserID,
|
||||
ToUserID: userID,
|
||||
Status: req.Status,
|
||||
PlatformID: req.PlatformID,
|
||||
}
|
||||
s.userNotificationSender.UserStatusChangeNotification(ctx, tips)
|
||||
}
|
||||
|
||||
return &pbuser.SetUserStatusResp{}, nil
|
||||
}
|
||||
|
||||
// GetSubscribeUsersStatus Get the online status of subscribers.
|
||||
func (s *userServer) GetSubscribeUsersStatus(ctx context.Context, req *pbuser.GetSubscribeUsersStatusReq) (*pbuser.GetSubscribeUsersStatusResp, error) {
|
||||
userList, err := s.db.GetAllSubscribeList(ctx, req.UserID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
onlineStatusList, err := s.getUsersOnlineStatus(ctx, userList)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &pbuser.GetSubscribeUsersStatusResp{StatusList: onlineStatusList}, nil
|
||||
}
|
||||
|
||||
func (s *userServer) SetUserOnlineStatus(ctx context.Context, req *pbuser.SetUserOnlineStatusReq) (*pbuser.SetUserOnlineStatusResp, error) {
|
||||
for _, status := range req.Status {
|
||||
if err := s.online.SetUserOnline(ctx, status.UserID, status.Online, status.Offline); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return &pbuser.SetUserOnlineStatusResp{}, nil
|
||||
}
|
@ -0,0 +1,48 @@
|
||||
package prommetrics
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
var (
|
||||
apiCounter = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "api_count",
|
||||
Help: "Total number of API calls",
|
||||
},
|
||||
[]string{"path", "method", "code"},
|
||||
)
|
||||
httpCounter = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "http_count",
|
||||
Help: "Total number of HTTP calls",
|
||||
},
|
||||
[]string{"path", "method", "status"},
|
||||
)
|
||||
)
|
||||
|
||||
func ApiInit(prometheusPort int) error {
|
||||
apiRegistry := prometheus.NewRegistry()
|
||||
cs := append(
|
||||
baseCollector,
|
||||
apiCounter,
|
||||
httpCounter,
|
||||
)
|
||||
return Init(apiRegistry, prometheusPort, commonPath, promhttp.HandlerFor(apiRegistry, promhttp.HandlerOpts{}), cs...)
|
||||
}
|
||||
|
||||
func APICall(path string, method string, apiCode int) {
|
||||
apiCounter.With(prometheus.Labels{"path": path, "method": method, "code": strconv.Itoa(apiCode)}).Inc()
|
||||
}
|
||||
|
||||
func HttpCall(path string, method string, status int) {
|
||||
httpCounter.With(prometheus.Labels{"path": path, "method": method, "status": strconv.Itoa(status)}).Inc()
|
||||
}
|
||||
|
||||
//func ApiHandler() http.Handler {
|
||||
// return promhttp.InstrumentMetricHandler(
|
||||
// apiRegistry, promhttp.HandlerFor(apiRegistry, promhttp.HandlerOpts{}),
|
||||
// )
|
||||
//}
|
@ -1,30 +0,0 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prommetrics
|
||||
|
||||
import ginprom "github.com/openimsdk/open-im-server/v3/pkg/common/ginprometheus"
|
||||
|
||||
/*
|
||||
labels := prometheus.Labels{"label_one": "any", "label_two": "value"}
|
||||
ApiCustomCnt.MetricCollector.(*prometheus.CounterVec).With(labels).Inc().
|
||||
*/
|
||||
var (
|
||||
ApiCustomCnt = &ginprom.Metric{
|
||||
Name: "custom_total",
|
||||
Description: "Custom counter events.",
|
||||
Type: "counter_vec",
|
||||
Args: []string{"label_one", "label_two"},
|
||||
}
|
||||
)
|
@ -0,0 +1,58 @@
|
||||
package prommetrics
|
||||
|
||||
import (
|
||||
gp "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
const rpcPath = commonPath
|
||||
|
||||
var (
|
||||
grpcMetrics *gp.ServerMetrics
|
||||
rpcCounter = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "rpc_count",
|
||||
Help: "Total number of RPC calls",
|
||||
},
|
||||
[]string{"name", "path", "code"},
|
||||
)
|
||||
)
|
||||
|
||||
func RpcInit(cs []prometheus.Collector, prometheusPort int) error {
|
||||
reg := prometheus.NewRegistry()
|
||||
cs = append(append(
|
||||
baseCollector,
|
||||
rpcCounter,
|
||||
), cs...)
|
||||
return Init(reg, prometheusPort, rpcPath, promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg}), cs...)
|
||||
}
|
||||
|
||||
func RPCCall(name string, path string, code int) {
|
||||
rpcCounter.With(prometheus.Labels{"name": name, "path": path, "code": strconv.Itoa(code)}).Inc()
|
||||
}
|
||||
|
||||
func GetGrpcServerMetrics() *gp.ServerMetrics {
|
||||
if grpcMetrics == nil {
|
||||
grpcMetrics = gp.NewServerMetrics()
|
||||
grpcMetrics.EnableHandlingTimeHistogram()
|
||||
}
|
||||
return grpcMetrics
|
||||
}
|
||||
|
||||
func GetGrpcCusMetrics(registerName string, share *config.Share) []prometheus.Collector {
|
||||
switch registerName {
|
||||
case share.RpcRegisterName.MessageGateway:
|
||||
return []prometheus.Collector{OnlineUserGauge}
|
||||
case share.RpcRegisterName.Msg:
|
||||
return []prometheus.Collector{SingleChatMsgProcessSuccessCounter, SingleChatMsgProcessFailedCounter, GroupChatMsgProcessSuccessCounter, GroupChatMsgProcessFailedCounter}
|
||||
case share.RpcRegisterName.Push:
|
||||
return []prometheus.Collector{MsgOfflinePushFailedCounter}
|
||||
case share.RpcRegisterName.Auth:
|
||||
return []prometheus.Collector{UserLoginCounter}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
@ -0,0 +1,13 @@
|
||||
package cachekey
|
||||
|
||||
import "time"
|
||||
|
||||
const (
|
||||
OnlineKey = "ONLINE:"
|
||||
OnlineChannel = "online_change"
|
||||
OnlineExpire = time.Hour / 2
|
||||
)
|
||||
|
||||
func GetOnlineKey(userID string) string {
|
||||
return OnlineKey + userID
|
||||
}
|
@ -1,38 +1,30 @@
|
||||
// Copyright © 2024 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cachekey
|
||||
|
||||
const (
|
||||
maxSeq = "MAX_SEQ:"
|
||||
minSeq = "MIN_SEQ:"
|
||||
conversationUserMinSeq = "CON_USER_MIN_SEQ:"
|
||||
hasReadSeq = "HAS_READ_SEQ:"
|
||||
MallocSeq = "MALLOC_SEQ:"
|
||||
MallocMinSeqLock = "MALLOC_MIN_SEQ:"
|
||||
|
||||
SeqUserMaxSeq = "SEQ_USER_MAX:"
|
||||
SeqUserMinSeq = "SEQ_USER_MIN:"
|
||||
SeqUserReadSeq = "SEQ_USER_READ:"
|
||||
)
|
||||
|
||||
func GetMaxSeqKey(conversationID string) string {
|
||||
return maxSeq + conversationID
|
||||
func GetMallocSeqKey(conversationID string) string {
|
||||
return MallocSeq + conversationID
|
||||
}
|
||||
|
||||
func GetMallocMinSeqKey(conversationID string) string {
|
||||
return MallocMinSeqLock + conversationID
|
||||
}
|
||||
|
||||
func GetMinSeqKey(conversationID string) string {
|
||||
return minSeq + conversationID
|
||||
func GetSeqUserMaxSeqKey(conversationID string, userID string) string {
|
||||
return SeqUserMaxSeq + conversationID + ":" + userID
|
||||
}
|
||||
|
||||
func GetHasReadSeqKey(conversationID string, userID string) string {
|
||||
return hasReadSeq + userID + ":" + conversationID
|
||||
func GetSeqUserMinSeqKey(conversationID string, userID string) string {
|
||||
return SeqUserMinSeq + conversationID + ":" + userID
|
||||
}
|
||||
|
||||
func GetConversationUserMinSeqKey(conversationID, userID string) string {
|
||||
return conversationUserMinSeq + conversationID + "u:" + userID
|
||||
func GetSeqUserReadSeqKey(conversationID string, userID string) string {
|
||||
return SeqUserReadSeq + conversationID + ":" + userID
|
||||
}
|
||||
|
@ -0,0 +1,8 @@
|
||||
package cache
|
||||
|
||||
import "context"
|
||||
|
||||
type OnlineCache interface {
|
||||
GetOnline(ctx context.Context, userID string) ([]int32, error)
|
||||
SetUserOnline(ctx context.Context, userID string, online, offline []int32) error
|
||||
}
|
@ -0,0 +1,94 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"github.com/dtm-labs/rockscache"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"golang.org/x/sync/singleflight"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func getRocksCacheRedisClient(cli *rockscache.Client) redis.UniversalClient {
|
||||
type Client struct {
|
||||
rdb redis.UniversalClient
|
||||
_ rockscache.Options
|
||||
_ singleflight.Group
|
||||
}
|
||||
return (*Client)(unsafe.Pointer(cli)).rdb
|
||||
}
|
||||
|
||||
func batchGetCache2[K comparable, V any](ctx context.Context, rcClient *rockscache.Client, expire time.Duration, ids []K, idKey func(id K) string, vId func(v *V) K, fn func(ctx context.Context, ids []K) ([]*V, error)) ([]*V, error) {
|
||||
if len(ids) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
findKeys := make([]string, 0, len(ids))
|
||||
keyId := make(map[string]K)
|
||||
for _, id := range ids {
|
||||
key := idKey(id)
|
||||
if _, ok := keyId[key]; ok {
|
||||
continue
|
||||
}
|
||||
keyId[key] = id
|
||||
findKeys = append(findKeys, key)
|
||||
}
|
||||
slotKeys, err := groupKeysBySlot(ctx, getRocksCacheRedisClient(rcClient), findKeys)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result := make([]*V, 0, len(findKeys))
|
||||
for _, keys := range slotKeys {
|
||||
indexCache, err := rcClient.FetchBatch2(ctx, keys, expire, func(idx []int) (map[int]string, error) {
|
||||
queryIds := make([]K, 0, len(idx))
|
||||
idIndex := make(map[K]int)
|
||||
for _, index := range idx {
|
||||
id := keyId[keys[index]]
|
||||
idIndex[id] = index
|
||||
queryIds = append(queryIds, id)
|
||||
}
|
||||
values, err := fn(ctx, queryIds)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(values) == 0 {
|
||||
return map[int]string{}, nil
|
||||
}
|
||||
cacheIndex := make(map[int]string)
|
||||
for _, value := range values {
|
||||
id := vId(value)
|
||||
index, ok := idIndex[id]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
bs, err := json.Marshal(value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cacheIndex[index] = string(bs)
|
||||
}
|
||||
return cacheIndex, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for index, data := range indexCache {
|
||||
if data == "" {
|
||||
continue
|
||||
}
|
||||
var value V
|
||||
if err := json.Unmarshal([]byte(data), &value); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if cb, ok := any(&value).(BatchCacheCallback[K]); ok {
|
||||
cb.BatchCache(keyId[keys[index]])
|
||||
}
|
||||
result = append(result, &value)
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
type BatchCacheCallback[K comparable] interface {
|
||||
BatchCache(id K)
|
||||
}
|
@ -0,0 +1,55 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
|
||||
"github.com/openimsdk/tools/db/mongoutil"
|
||||
"github.com/openimsdk/tools/db/redisutil"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestName(t *testing.T) {
|
||||
//var rocks rockscache.Client
|
||||
//rdb := getRocksCacheRedisClient(&rocks)
|
||||
//t.Log(rdb == nil)
|
||||
|
||||
ctx := context.Background()
|
||||
rdb, err := redisutil.NewRedisClient(ctx, (&config.Redis{
|
||||
Address: []string{"172.16.8.48:16379"},
|
||||
Password: "openIM123",
|
||||
DB: 3,
|
||||
}).Build())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
mgocli, err := mongoutil.NewMongoDB(ctx, (&config.Mongo{
|
||||
Address: []string{"172.16.8.48:37017"},
|
||||
Database: "openim_v3",
|
||||
Username: "openIM",
|
||||
Password: "openIM123",
|
||||
MaxPoolSize: 100,
|
||||
MaxRetry: 1,
|
||||
}).Build())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
//userMgo, err := mgo.NewUserMongo(mgocli.GetDB())
|
||||
//if err != nil {
|
||||
// panic(err)
|
||||
//}
|
||||
//rock := rockscache.NewClient(rdb, rockscache.NewDefaultOptions())
|
||||
mgoSeqUser, err := mgo.NewSeqUserMongo(mgocli.GetDB())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
seqUser := NewSeqUserCacheRedis(rdb, mgoSeqUser)
|
||||
|
||||
res, err := seqUser.GetReadSeqs(ctx, "2110910952", []string{"sg_2920732023", "sg_345762580"})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
t.Log(res)
|
||||
|
||||
}
|
@ -0,0 +1,89 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
func NewUserOnline(rdb redis.UniversalClient) cache.OnlineCache {
|
||||
return &userOnline{
|
||||
rdb: rdb,
|
||||
expire: cachekey.OnlineExpire,
|
||||
channelName: cachekey.OnlineChannel,
|
||||
}
|
||||
}
|
||||
|
||||
type userOnline struct {
|
||||
rdb redis.UniversalClient
|
||||
expire time.Duration
|
||||
channelName string
|
||||
}
|
||||
|
||||
func (s *userOnline) getUserOnlineKey(userID string) string {
|
||||
return cachekey.GetOnlineKey(userID)
|
||||
}
|
||||
|
||||
func (s *userOnline) GetOnline(ctx context.Context, userID string) ([]int32, error) {
|
||||
members, err := s.rdb.ZRangeByScore(ctx, s.getUserOnlineKey(userID), &redis.ZRangeBy{
|
||||
Min: strconv.FormatInt(time.Now().Unix(), 10),
|
||||
Max: "+inf",
|
||||
}).Result()
|
||||
if err != nil {
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
platformIDs := make([]int32, 0, len(members))
|
||||
for _, member := range members {
|
||||
val, err := strconv.Atoi(member)
|
||||
if err != nil {
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
platformIDs = append(platformIDs, int32(val))
|
||||
}
|
||||
return platformIDs, nil
|
||||
}
|
||||
|
||||
func (s *userOnline) SetUserOnline(ctx context.Context, userID string, online, offline []int32) error {
|
||||
script := `
|
||||
local key = KEYS[1]
|
||||
local score = ARGV[3]
|
||||
local num1 = redis.call("ZCARD", key)
|
||||
redis.call("ZREMRANGEBYSCORE", key, "-inf", ARGV[2])
|
||||
for i = 5, tonumber(ARGV[4])+4 do
|
||||
redis.call("ZREM", key, ARGV[i])
|
||||
end
|
||||
local num2 = redis.call("ZCARD", key)
|
||||
for i = 5+tonumber(ARGV[4]), #ARGV do
|
||||
redis.call("ZADD", key, score, ARGV[i])
|
||||
end
|
||||
redis.call("EXPIRE", key, ARGV[1])
|
||||
local num3 = redis.call("ZCARD", key)
|
||||
local change = (num1 ~= num2) or (num2 ~= num3)
|
||||
if change then
|
||||
local members = redis.call("ZRANGE", key, 0, -1)
|
||||
table.insert(members, KEYS[2])
|
||||
redis.call("PUBLISH", KEYS[3], table.concat(members, ":"))
|
||||
return 1
|
||||
else
|
||||
return 0
|
||||
end
|
||||
`
|
||||
now := time.Now()
|
||||
argv := make([]any, 0, 2+len(online)+len(offline))
|
||||
argv = append(argv, int32(s.expire/time.Second), now.Unix(), now.Add(s.expire).Unix(), int32(len(offline)))
|
||||
for _, platformID := range offline {
|
||||
argv = append(argv, platformID)
|
||||
}
|
||||
for _, platformID := range online {
|
||||
argv = append(argv, platformID)
|
||||
}
|
||||
keys := []string{s.getUserOnlineKey(userID), userID, s.channelName}
|
||||
if err := s.rdb.Eval(ctx, script, keys, argv).Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
@ -1,200 +0,0 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/utils/stringutil"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"sync"
|
||||
)
|
||||
|
||||
func NewSeqCache(rdb redis.UniversalClient) cache.SeqCache {
|
||||
return &seqCache{rdb: rdb}
|
||||
}
|
||||
|
||||
type seqCache struct {
|
||||
rdb redis.UniversalClient
|
||||
}
|
||||
|
||||
func (c *seqCache) getMaxSeqKey(conversationID string) string {
|
||||
return cachekey.GetMaxSeqKey(conversationID)
|
||||
}
|
||||
|
||||
func (c *seqCache) getMinSeqKey(conversationID string) string {
|
||||
return cachekey.GetMinSeqKey(conversationID)
|
||||
}
|
||||
|
||||
func (c *seqCache) getHasReadSeqKey(conversationID string, userID string) string {
|
||||
return cachekey.GetHasReadSeqKey(conversationID, userID)
|
||||
}
|
||||
|
||||
func (c *seqCache) getConversationUserMinSeqKey(conversationID, userID string) string {
|
||||
return cachekey.GetConversationUserMinSeqKey(conversationID, userID)
|
||||
}
|
||||
|
||||
func (c *seqCache) setSeq(ctx context.Context, conversationID string, seq int64, getkey func(conversationID string) string) error {
|
||||
return errs.Wrap(c.rdb.Set(ctx, getkey(conversationID), seq, 0).Err())
|
||||
}
|
||||
|
||||
func (c *seqCache) getSeq(ctx context.Context, conversationID string, getkey func(conversationID string) string) (int64, error) {
|
||||
val, err := c.rdb.Get(ctx, getkey(conversationID)).Int64()
|
||||
if err != nil {
|
||||
return 0, errs.Wrap(err)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
func (c *seqCache) getSeqs(ctx context.Context, items []string, getkey func(s string) string) (m map[string]int64, err error) {
|
||||
m = make(map[string]int64, len(items))
|
||||
var (
|
||||
reverseMap = make(map[string]string, len(items))
|
||||
keys = make([]string, len(items))
|
||||
lock sync.Mutex
|
||||
)
|
||||
|
||||
for i, v := range items {
|
||||
keys[i] = getkey(v)
|
||||
reverseMap[getkey(v)] = v
|
||||
}
|
||||
|
||||
manager := NewRedisShardManager(c.rdb)
|
||||
if err = manager.ProcessKeysBySlot(ctx, keys, func(ctx context.Context, _ int64, keys []string) error {
|
||||
res, err := c.rdb.MGet(ctx, keys...).Result()
|
||||
if err != nil && !errors.Is(err, redis.Nil) {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
|
||||
// len(res) <= len(items)
|
||||
for i := range res {
|
||||
strRes, ok := res[i].(string)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
val := stringutil.StringToInt64(strRes)
|
||||
if val != 0 {
|
||||
lock.Lock()
|
||||
m[reverseMap[keys[i]]] = val
|
||||
lock.Unlock()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (c *seqCache) SetMaxSeq(ctx context.Context, conversationID string, maxSeq int64) error {
|
||||
return c.setSeq(ctx, conversationID, maxSeq, c.getMaxSeqKey)
|
||||
}
|
||||
|
||||
func (c *seqCache) GetMaxSeqs(ctx context.Context, conversationIDs []string) (m map[string]int64, err error) {
|
||||
return c.getSeqs(ctx, conversationIDs, c.getMaxSeqKey)
|
||||
}
|
||||
|
||||
func (c *seqCache) GetMaxSeq(ctx context.Context, conversationID string) (int64, error) {
|
||||
return c.getSeq(ctx, conversationID, c.getMaxSeqKey)
|
||||
}
|
||||
|
||||
func (c *seqCache) SetMinSeq(ctx context.Context, conversationID string, minSeq int64) error {
|
||||
return c.setSeq(ctx, conversationID, minSeq, c.getMinSeqKey)
|
||||
}
|
||||
|
||||
func (c *seqCache) setSeqs(ctx context.Context, seqs map[string]int64, getkey func(key string) string) error {
|
||||
for conversationID, seq := range seqs {
|
||||
if err := c.rdb.Set(ctx, getkey(conversationID), seq, 0).Err(); err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *seqCache) SetMinSeqs(ctx context.Context, seqs map[string]int64) error {
|
||||
return c.setSeqs(ctx, seqs, c.getMinSeqKey)
|
||||
}
|
||||
|
||||
func (c *seqCache) GetMinSeqs(ctx context.Context, conversationIDs []string) (map[string]int64, error) {
|
||||
return c.getSeqs(ctx, conversationIDs, c.getMinSeqKey)
|
||||
}
|
||||
|
||||
func (c *seqCache) GetMinSeq(ctx context.Context, conversationID string) (int64, error) {
|
||||
return c.getSeq(ctx, conversationID, c.getMinSeqKey)
|
||||
}
|
||||
|
||||
func (c *seqCache) GetConversationUserMinSeq(ctx context.Context, conversationID string, userID string) (int64, error) {
|
||||
val, err := c.rdb.Get(ctx, c.getConversationUserMinSeqKey(conversationID, userID)).Int64()
|
||||
if err != nil {
|
||||
return 0, errs.Wrap(err)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
func (c *seqCache) GetConversationUserMinSeqs(ctx context.Context, conversationID string, userIDs []string) (m map[string]int64, err error) {
|
||||
return c.getSeqs(ctx, userIDs, func(userID string) string {
|
||||
return c.getConversationUserMinSeqKey(conversationID, userID)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *seqCache) SetConversationUserMinSeq(ctx context.Context, conversationID string, userID string, minSeq int64) error {
|
||||
return errs.Wrap(c.rdb.Set(ctx, c.getConversationUserMinSeqKey(conversationID, userID), minSeq, 0).Err())
|
||||
}
|
||||
|
||||
func (c *seqCache) SetConversationUserMinSeqs(ctx context.Context, conversationID string, seqs map[string]int64) (err error) {
|
||||
return c.setSeqs(ctx, seqs, func(userID string) string {
|
||||
return c.getConversationUserMinSeqKey(conversationID, userID)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *seqCache) SetUserConversationsMinSeqs(ctx context.Context, userID string, seqs map[string]int64) (err error) {
|
||||
return c.setSeqs(ctx, seqs, func(conversationID string) string {
|
||||
return c.getConversationUserMinSeqKey(conversationID, userID)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *seqCache) SetHasReadSeq(ctx context.Context, userID string, conversationID string, hasReadSeq int64) error {
|
||||
return errs.Wrap(c.rdb.Set(ctx, c.getHasReadSeqKey(conversationID, userID), hasReadSeq, 0).Err())
|
||||
}
|
||||
|
||||
func (c *seqCache) SetHasReadSeqs(ctx context.Context, conversationID string, hasReadSeqs map[string]int64) error {
|
||||
return c.setSeqs(ctx, hasReadSeqs, func(userID string) string {
|
||||
return c.getHasReadSeqKey(conversationID, userID)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *seqCache) UserSetHasReadSeqs(ctx context.Context, userID string, hasReadSeqs map[string]int64) error {
|
||||
return c.setSeqs(ctx, hasReadSeqs, func(conversationID string) string {
|
||||
return c.getHasReadSeqKey(conversationID, userID)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *seqCache) GetHasReadSeqs(ctx context.Context, userID string, conversationIDs []string) (map[string]int64, error) {
|
||||
return c.getSeqs(ctx, conversationIDs, func(conversationID string) string {
|
||||
return c.getHasReadSeqKey(conversationID, userID)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *seqCache) GetHasReadSeq(ctx context.Context, userID string, conversationID string) (int64, error) {
|
||||
val, err := c.rdb.Get(ctx, c.getHasReadSeqKey(conversationID, userID)).Int64()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return val, nil
|
||||
}
|
@ -0,0 +1,333 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/dtm-labs/rockscache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"time"
|
||||
)
|
||||
|
||||
func NewSeqConversationCacheRedis(rdb redis.UniversalClient, mgo database.SeqConversation) cache.SeqConversationCache {
|
||||
return &seqConversationCacheRedis{
|
||||
rdb: rdb,
|
||||
mgo: mgo,
|
||||
lockTime: time.Second * 3,
|
||||
dataTime: time.Hour * 24 * 365,
|
||||
minSeqExpireTime: time.Hour,
|
||||
rocks: rockscache.NewClient(rdb, *GetRocksCacheOptions()),
|
||||
}
|
||||
}
|
||||
|
||||
type seqConversationCacheRedis struct {
|
||||
rdb redis.UniversalClient
|
||||
mgo database.SeqConversation
|
||||
rocks *rockscache.Client
|
||||
lockTime time.Duration
|
||||
dataTime time.Duration
|
||||
minSeqExpireTime time.Duration
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) getMinSeqKey(conversationID string) string {
|
||||
return cachekey.GetMallocMinSeqKey(conversationID)
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) SetMinSeq(ctx context.Context, conversationID string, seq int64) error {
|
||||
return s.SetMinSeqs(ctx, map[string]int64{conversationID: seq})
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) GetMinSeq(ctx context.Context, conversationID string) (int64, error) {
|
||||
return getCache(ctx, s.rocks, s.getMinSeqKey(conversationID), s.minSeqExpireTime, func(ctx context.Context) (int64, error) {
|
||||
return s.mgo.GetMinSeq(ctx, conversationID)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) getSingleMaxSeq(ctx context.Context, conversationID string) (map[string]int64, error) {
|
||||
seq, err := s.GetMaxSeq(ctx, conversationID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return map[string]int64{conversationID: seq}, nil
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) batchGetMaxSeq(ctx context.Context, keys []string, keyConversationID map[string]string, seqs map[string]int64) error {
|
||||
result := make([]*redis.StringCmd, len(keys))
|
||||
pipe := s.rdb.Pipeline()
|
||||
for i, key := range keys {
|
||||
result[i] = pipe.HGet(ctx, key, "CURR")
|
||||
}
|
||||
if _, err := pipe.Exec(ctx); err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
var notFoundKey []string
|
||||
for i, r := range result {
|
||||
req, err := r.Int64()
|
||||
if err == nil {
|
||||
seqs[keyConversationID[keys[i]]] = req
|
||||
} else if errors.Is(err, redis.Nil) {
|
||||
notFoundKey = append(notFoundKey, keys[i])
|
||||
} else {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
}
|
||||
if len(notFoundKey) > 0 {
|
||||
conversationID := keyConversationID[notFoundKey[0]]
|
||||
seq, err := s.GetMaxSeq(ctx, conversationID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
seqs[conversationID] = seq
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) GetMaxSeqs(ctx context.Context, conversationIDs []string) (map[string]int64, error) {
|
||||
switch len(conversationIDs) {
|
||||
case 0:
|
||||
return map[string]int64{}, nil
|
||||
case 1:
|
||||
return s.getSingleMaxSeq(ctx, conversationIDs[0])
|
||||
}
|
||||
keys := make([]string, 0, len(conversationIDs))
|
||||
keyConversationID := make(map[string]string, len(conversationIDs))
|
||||
for _, conversationID := range conversationIDs {
|
||||
key := s.getSeqMallocKey(conversationID)
|
||||
if _, ok := keyConversationID[key]; ok {
|
||||
continue
|
||||
}
|
||||
keys = append(keys, key)
|
||||
keyConversationID[key] = conversationID
|
||||
}
|
||||
if len(keys) == 1 {
|
||||
return s.getSingleMaxSeq(ctx, conversationIDs[0])
|
||||
}
|
||||
slotKeys, err := groupKeysBySlot(ctx, s.rdb, keys)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
seqs := make(map[string]int64, len(conversationIDs))
|
||||
for _, keys := range slotKeys {
|
||||
if err := s.batchGetMaxSeq(ctx, keys, keyConversationID, seqs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return seqs, nil
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) getSeqMallocKey(conversationID string) string {
|
||||
return cachekey.GetMallocSeqKey(conversationID)
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) setSeq(ctx context.Context, key string, owner int64, currSeq int64, lastSeq int64) (int64, error) {
|
||||
if lastSeq < currSeq {
|
||||
return 0, errs.New("lastSeq must be greater than currSeq")
|
||||
}
|
||||
// 0: success
|
||||
// 1: success the lock has expired, but has not been locked by anyone else
|
||||
// 2: already locked, but not by yourself
|
||||
script := `
|
||||
local key = KEYS[1]
|
||||
local lockValue = ARGV[1]
|
||||
local dataSecond = ARGV[2]
|
||||
local curr_seq = tonumber(ARGV[3])
|
||||
local last_seq = tonumber(ARGV[4])
|
||||
if redis.call("EXISTS", key) == 0 then
|
||||
redis.call("HSET", key, "CURR", curr_seq, "LAST", last_seq)
|
||||
redis.call("EXPIRE", key, dataSecond)
|
||||
return 1
|
||||
end
|
||||
if redis.call("HGET", key, "LOCK") ~= lockValue then
|
||||
return 2
|
||||
end
|
||||
redis.call("HDEL", key, "LOCK")
|
||||
redis.call("HSET", key, "CURR", curr_seq, "LAST", last_seq)
|
||||
redis.call("EXPIRE", key, dataSecond)
|
||||
return 0
|
||||
`
|
||||
result, err := s.rdb.Eval(ctx, script, []string{key}, owner, int64(s.dataTime/time.Second), currSeq, lastSeq).Int64()
|
||||
if err != nil {
|
||||
return 0, errs.Wrap(err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// malloc size=0 is to get the current seq size>0 is to allocate seq
|
||||
func (s *seqConversationCacheRedis) malloc(ctx context.Context, key string, size int64) ([]int64, error) {
|
||||
// 0: success
|
||||
// 1: need to obtain and lock
|
||||
// 2: already locked
|
||||
// 3: exceeded the maximum value and locked
|
||||
script := `
|
||||
local key = KEYS[1]
|
||||
local size = tonumber(ARGV[1])
|
||||
local lockSecond = ARGV[2]
|
||||
local dataSecond = ARGV[3]
|
||||
local result = {}
|
||||
if redis.call("EXISTS", key) == 0 then
|
||||
local lockValue = math.random(0, 999999999)
|
||||
redis.call("HSET", key, "LOCK", lockValue)
|
||||
redis.call("EXPIRE", key, lockSecond)
|
||||
table.insert(result, 1)
|
||||
table.insert(result, lockValue)
|
||||
return result
|
||||
end
|
||||
if redis.call("HEXISTS", key, "LOCK") == 1 then
|
||||
table.insert(result, 2)
|
||||
return result
|
||||
end
|
||||
local curr_seq = tonumber(redis.call("HGET", key, "CURR"))
|
||||
local last_seq = tonumber(redis.call("HGET", key, "LAST"))
|
||||
if size == 0 then
|
||||
redis.call("EXPIRE", key, dataSecond)
|
||||
table.insert(result, 0)
|
||||
table.insert(result, curr_seq)
|
||||
table.insert(result, last_seq)
|
||||
return result
|
||||
end
|
||||
local max_seq = curr_seq + size
|
||||
if max_seq > last_seq then
|
||||
local lockValue = math.random(0, 999999999)
|
||||
redis.call("HSET", key, "LOCK", lockValue)
|
||||
redis.call("HSET", key, "CURR", last_seq)
|
||||
redis.call("EXPIRE", key, lockSecond)
|
||||
table.insert(result, 3)
|
||||
table.insert(result, curr_seq)
|
||||
table.insert(result, last_seq)
|
||||
table.insert(result, lockValue)
|
||||
return result
|
||||
end
|
||||
redis.call("HSET", key, "CURR", max_seq)
|
||||
redis.call("EXPIRE", key, dataSecond)
|
||||
table.insert(result, 0)
|
||||
table.insert(result, curr_seq)
|
||||
table.insert(result, last_seq)
|
||||
return result
|
||||
`
|
||||
result, err := s.rdb.Eval(ctx, script, []string{key}, size, int64(s.lockTime/time.Second), int64(s.dataTime/time.Second)).Int64Slice()
|
||||
if err != nil {
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) wait(ctx context.Context) error {
|
||||
timer := time.NewTimer(time.Second / 4)
|
||||
defer timer.Stop()
|
||||
select {
|
||||
case <-timer.C:
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) setSeqRetry(ctx context.Context, key string, owner int64, currSeq int64, lastSeq int64) {
|
||||
for i := 0; i < 10; i++ {
|
||||
state, err := s.setSeq(ctx, key, owner, currSeq, lastSeq)
|
||||
if err != nil {
|
||||
log.ZError(ctx, "set seq cache failed", err, "key", key, "owner", owner, "currSeq", currSeq, "lastSeq", lastSeq, "count", i+1)
|
||||
if err := s.wait(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
switch state {
|
||||
case 0: // ideal state
|
||||
case 1:
|
||||
log.ZWarn(ctx, "set seq cache lock not found", nil, "key", key, "owner", owner, "currSeq", currSeq, "lastSeq", lastSeq)
|
||||
case 2:
|
||||
log.ZWarn(ctx, "set seq cache lock to be held by someone else", nil, "key", key, "owner", owner, "currSeq", currSeq, "lastSeq", lastSeq)
|
||||
default:
|
||||
log.ZError(ctx, "set seq cache lock unknown state", nil, "key", key, "owner", owner, "currSeq", currSeq, "lastSeq", lastSeq)
|
||||
}
|
||||
return
|
||||
}
|
||||
log.ZError(ctx, "set seq cache retrying still failed", nil, "key", key, "owner", owner, "currSeq", currSeq, "lastSeq", lastSeq)
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) getMallocSize(conversationID string, size int64) int64 {
|
||||
if size == 0 {
|
||||
return 0
|
||||
}
|
||||
var basicSize int64
|
||||
if msgprocessor.IsGroupConversationID(conversationID) {
|
||||
basicSize = 100
|
||||
} else {
|
||||
basicSize = 50
|
||||
}
|
||||
basicSize += size
|
||||
return basicSize
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) Malloc(ctx context.Context, conversationID string, size int64) (int64, error) {
|
||||
if size < 0 {
|
||||
return 0, errs.New("size must be greater than 0")
|
||||
}
|
||||
key := s.getSeqMallocKey(conversationID)
|
||||
for i := 0; i < 10; i++ {
|
||||
states, err := s.malloc(ctx, key, size)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
switch states[0] {
|
||||
case 0: // success
|
||||
return states[1], nil
|
||||
case 1: // not found
|
||||
mallocSize := s.getMallocSize(conversationID, size)
|
||||
seq, err := s.mgo.Malloc(ctx, conversationID, mallocSize)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
s.setSeqRetry(ctx, key, states[1], seq+size, seq+mallocSize)
|
||||
return seq, nil
|
||||
case 2: // locked
|
||||
if err := s.wait(ctx); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
continue
|
||||
case 3: // exceeded cache max value
|
||||
currSeq := states[1]
|
||||
lastSeq := states[2]
|
||||
mallocSize := s.getMallocSize(conversationID, size)
|
||||
seq, err := s.mgo.Malloc(ctx, conversationID, mallocSize)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if lastSeq == seq {
|
||||
s.setSeqRetry(ctx, key, states[3], currSeq+size, seq+mallocSize)
|
||||
return currSeq, nil
|
||||
} else {
|
||||
log.ZWarn(ctx, "malloc seq not equal cache last seq", nil, "conversationID", conversationID, "currSeq", currSeq, "lastSeq", lastSeq, "mallocSeq", seq)
|
||||
s.setSeqRetry(ctx, key, states[3], seq+size, seq+mallocSize)
|
||||
return seq, nil
|
||||
}
|
||||
default:
|
||||
log.ZError(ctx, "malloc seq unknown state", nil, "state", states[0], "conversationID", conversationID, "size", size)
|
||||
return 0, errs.New(fmt.Sprintf("unknown state: %d", states[0]))
|
||||
}
|
||||
}
|
||||
log.ZError(ctx, "malloc seq retrying still failed", nil, "conversationID", conversationID, "size", size)
|
||||
return 0, errs.New("malloc seq waiting for lock timeout", "conversationID", conversationID, "size", size)
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) GetMaxSeq(ctx context.Context, conversationID string) (int64, error) {
|
||||
return s.Malloc(ctx, conversationID, 0)
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) SetMinSeqs(ctx context.Context, seqs map[string]int64) error {
|
||||
keys := make([]string, 0, len(seqs))
|
||||
for conversationID, seq := range seqs {
|
||||
keys = append(keys, s.getMinSeqKey(conversationID))
|
||||
if err := s.mgo.SetMinSeq(ctx, conversationID, seq); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return DeleteCacheBySlot(ctx, s.rocks, keys)
|
||||
}
|
@ -0,0 +1,109 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"go.mongodb.org/mongo-driver/mongo"
|
||||
"go.mongodb.org/mongo-driver/mongo/options"
|
||||
"strconv"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func newTestSeq() *seqConversationCacheRedis {
|
||||
mgocli, err := mongo.Connect(context.Background(), options.Client().ApplyURI("mongodb://openIM:openIM123@172.16.8.48:37017/openim_v3?maxPoolSize=100").SetConnectTimeout(5*time.Second))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
model, err := mgo.NewSeqConversationMongo(mgocli.Database("openim_v3"))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
opt := &redis.Options{
|
||||
Addr: "172.16.8.48:16379",
|
||||
Password: "openIM123",
|
||||
DB: 1,
|
||||
}
|
||||
rdb := redis.NewClient(opt)
|
||||
if err := rdb.Ping(context.Background()).Err(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return NewSeqConversationCacheRedis(rdb, model).(*seqConversationCacheRedis)
|
||||
}
|
||||
|
||||
func TestSeq(t *testing.T) {
|
||||
ts := newTestSeq()
|
||||
var (
|
||||
wg sync.WaitGroup
|
||||
speed atomic.Int64
|
||||
)
|
||||
|
||||
const count = 128
|
||||
wg.Add(count)
|
||||
for i := 0; i < count; i++ {
|
||||
index := i + 1
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
var size int64 = 10
|
||||
cID := strconv.Itoa(index * 1)
|
||||
for i := 1; ; i++ {
|
||||
//first, err := ts.mgo.Malloc(context.Background(), cID, size) // mongo
|
||||
first, err := ts.Malloc(context.Background(), cID, size) // redis
|
||||
if err != nil {
|
||||
t.Logf("[%d-%d] %s %s", index, i, cID, err)
|
||||
return
|
||||
}
|
||||
speed.Add(size)
|
||||
_ = first
|
||||
//t.Logf("[%d] %d -> %d", i, first+1, first+size)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(done)
|
||||
}()
|
||||
|
||||
ticker := time.NewTicker(time.Second)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-done:
|
||||
ticker.Stop()
|
||||
return
|
||||
case <-ticker.C:
|
||||
value := speed.Swap(0)
|
||||
t.Logf("speed: %d/s", value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDel(t *testing.T) {
|
||||
ts := newTestSeq()
|
||||
for i := 1; i < 100; i++ {
|
||||
var size int64 = 100
|
||||
first, err := ts.Malloc(context.Background(), "100", size)
|
||||
if err != nil {
|
||||
t.Logf("[%d] %s", i, err)
|
||||
return
|
||||
}
|
||||
t.Logf("[%d] %d -> %d", i, first+1, first+size)
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSeqMalloc(t *testing.T) {
|
||||
ts := newTestSeq()
|
||||
t.Log(ts.GetMaxSeq(context.Background(), "100"))
|
||||
}
|
||||
|
||||
func TestMinSeq(t *testing.T) {
|
||||
ts := newTestSeq()
|
||||
t.Log(ts.GetMinSeq(context.Background(), "10000000"))
|
||||
}
|
@ -0,0 +1,185 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/dtm-labs/rockscache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
func NewSeqUserCacheRedis(rdb redis.UniversalClient, mgo database.SeqUser) cache.SeqUser {
|
||||
return &seqUserCacheRedis{
|
||||
rdb: rdb,
|
||||
mgo: mgo,
|
||||
readSeqWriteRatio: 100,
|
||||
expireTime: time.Hour * 24 * 7,
|
||||
readExpireTime: time.Hour * 24 * 30,
|
||||
rocks: rockscache.NewClient(rdb, *GetRocksCacheOptions()),
|
||||
}
|
||||
}
|
||||
|
||||
type seqUserCacheRedis struct {
|
||||
rdb redis.UniversalClient
|
||||
mgo database.SeqUser
|
||||
rocks *rockscache.Client
|
||||
expireTime time.Duration
|
||||
readExpireTime time.Duration
|
||||
readSeqWriteRatio int64
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) getSeqUserMaxSeqKey(conversationID string, userID string) string {
|
||||
return cachekey.GetSeqUserMaxSeqKey(conversationID, userID)
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) getSeqUserMinSeqKey(conversationID string, userID string) string {
|
||||
return cachekey.GetSeqUserMinSeqKey(conversationID, userID)
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) getSeqUserReadSeqKey(conversationID string, userID string) string {
|
||||
return cachekey.GetSeqUserReadSeqKey(conversationID, userID)
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) GetMaxSeq(ctx context.Context, conversationID string, userID string) (int64, error) {
|
||||
return getCache(ctx, s.rocks, s.getSeqUserMaxSeqKey(conversationID, userID), s.expireTime, func(ctx context.Context) (int64, error) {
|
||||
return s.mgo.GetMaxSeq(ctx, conversationID, userID)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) SetMaxSeq(ctx context.Context, conversationID string, userID string, seq int64) error {
|
||||
if err := s.mgo.SetMaxSeq(ctx, conversationID, userID, seq); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.rocks.TagAsDeleted2(ctx, s.getSeqUserMaxSeqKey(conversationID, userID))
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) GetMinSeq(ctx context.Context, conversationID string, userID string) (int64, error) {
|
||||
return getCache(ctx, s.rocks, s.getSeqUserMinSeqKey(conversationID, userID), s.expireTime, func(ctx context.Context) (int64, error) {
|
||||
return s.mgo.GetMaxSeq(ctx, conversationID, userID)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) SetMinSeq(ctx context.Context, conversationID string, userID string, seq int64) error {
|
||||
return s.SetMinSeqs(ctx, userID, map[string]int64{conversationID: seq})
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) GetReadSeq(ctx context.Context, conversationID string, userID string) (int64, error) {
|
||||
return getCache(ctx, s.rocks, s.getSeqUserReadSeqKey(conversationID, userID), s.readExpireTime, func(ctx context.Context) (int64, error) {
|
||||
return s.mgo.GetMaxSeq(ctx, conversationID, userID)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) SetReadSeq(ctx context.Context, conversationID string, userID string, seq int64) error {
|
||||
if seq%s.readSeqWriteRatio == 0 {
|
||||
if err := s.mgo.SetReadSeq(ctx, conversationID, userID, seq); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := s.rocks.RawSet(ctx, s.getSeqUserReadSeqKey(conversationID, userID), strconv.Itoa(int(seq)), s.readExpireTime); err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) SetMinSeqs(ctx context.Context, userID string, seqs map[string]int64) error {
|
||||
keys := make([]string, 0, len(seqs))
|
||||
for conversationID, seq := range seqs {
|
||||
if err := s.mgo.SetMinSeq(ctx, conversationID, userID, seq); err != nil {
|
||||
return err
|
||||
}
|
||||
keys = append(keys, s.getSeqUserMinSeqKey(conversationID, userID))
|
||||
}
|
||||
return DeleteCacheBySlot(ctx, s.rocks, keys)
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) setRedisReadSeqs(ctx context.Context, userID string, seqs map[string]int64) error {
|
||||
keys := make([]string, 0, len(seqs))
|
||||
keySeq := make(map[string]int64)
|
||||
for conversationID, seq := range seqs {
|
||||
key := s.getSeqUserReadSeqKey(conversationID, userID)
|
||||
keys = append(keys, key)
|
||||
keySeq[key] = seq
|
||||
}
|
||||
slotKeys, err := groupKeysBySlot(ctx, s.rdb, keys)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, keys := range slotKeys {
|
||||
pipe := s.rdb.Pipeline()
|
||||
for _, key := range keys {
|
||||
pipe.HSet(ctx, key, "value", strconv.FormatInt(keySeq[key], 10))
|
||||
pipe.Expire(ctx, key, s.readExpireTime)
|
||||
}
|
||||
if _, err := pipe.Exec(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) SetReadSeqs(ctx context.Context, userID string, seqs map[string]int64) error {
|
||||
if len(seqs) == 0 {
|
||||
return nil
|
||||
}
|
||||
if err := s.setRedisReadSeqs(ctx, userID, seqs); err != nil {
|
||||
return err
|
||||
}
|
||||
for conversationID, seq := range seqs {
|
||||
if seq%s.readSeqWriteRatio == 0 {
|
||||
if err := s.mgo.SetReadSeq(ctx, conversationID, userID, seq); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) GetReadSeqs(ctx context.Context, userID string, conversationIDs []string) (map[string]int64, error) {
|
||||
res, err := batchGetCache2(ctx, s.rocks, s.readExpireTime, conversationIDs, func(conversationID string) string {
|
||||
return s.getSeqUserReadSeqKey(conversationID, userID)
|
||||
}, func(v *readSeqModel) string {
|
||||
return v.ConversationID
|
||||
}, func(ctx context.Context, conversationIDs []string) ([]*readSeqModel, error) {
|
||||
seqs, err := s.mgo.GetReadSeqs(ctx, userID, conversationIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res := make([]*readSeqModel, 0, len(seqs))
|
||||
for conversationID, seq := range seqs {
|
||||
res = append(res, &readSeqModel{ConversationID: conversationID, Seq: seq})
|
||||
}
|
||||
return res, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
data := make(map[string]int64)
|
||||
for _, v := range res {
|
||||
data[v.ConversationID] = v.Seq
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
var _ BatchCacheCallback[string] = (*readSeqModel)(nil)
|
||||
|
||||
type readSeqModel struct {
|
||||
ConversationID string
|
||||
Seq int64
|
||||
}
|
||||
|
||||
func (r *readSeqModel) BatchCache(conversationID string) {
|
||||
r.ConversationID = conversationID
|
||||
}
|
||||
|
||||
func (r *readSeqModel) UnmarshalJSON(bytes []byte) (err error) {
|
||||
r.Seq, err = strconv.ParseInt(string(bytes), 10, 64)
|
||||
return
|
||||
}
|
||||
|
||||
func (r *readSeqModel) MarshalJSON() ([]byte, error) {
|
||||
return []byte(strconv.FormatInt(r.Seq, 10)), nil
|
||||
}
|
@ -0,0 +1,79 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"log"
|
||||
"strconv"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func newTestOnline() *userOnline {
|
||||
opt := &redis.Options{
|
||||
Addr: "172.16.8.48:16379",
|
||||
Password: "openIM123",
|
||||
DB: 0,
|
||||
}
|
||||
rdb := redis.NewClient(opt)
|
||||
if err := rdb.Ping(context.Background()).Err(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return &userOnline{rdb: rdb, expire: time.Hour, channelName: "user_online"}
|
||||
}
|
||||
|
||||
func TestOnline(t *testing.T) {
|
||||
ts := newTestOnline()
|
||||
var count atomic.Int64
|
||||
for i := 0; i < 64; i++ {
|
||||
go func(userID string) {
|
||||
var err error
|
||||
for i := 0; ; i++ {
|
||||
if i%2 == 0 {
|
||||
err = ts.SetUserOnline(context.Background(), userID, []int32{5, 6}, []int32{7, 8, 9})
|
||||
} else {
|
||||
err = ts.SetUserOnline(context.Background(), userID, []int32{1, 2, 3}, []int32{4, 5, 6})
|
||||
}
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
count.Add(1)
|
||||
}
|
||||
}(strconv.Itoa(10000 + i))
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(time.Second)
|
||||
for range ticker.C {
|
||||
t.Log(count.Swap(0))
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetOnline(t *testing.T) {
|
||||
ts := newTestOnline()
|
||||
ctx := context.Background()
|
||||
pIDs, err := ts.GetOnline(ctx, "10000")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
t.Log(pIDs)
|
||||
}
|
||||
|
||||
func TestRecvOnline(t *testing.T) {
|
||||
ts := newTestOnline()
|
||||
ctx := context.Background()
|
||||
pubsub := ts.rdb.Subscribe(ctx, cachekey.OnlineChannel)
|
||||
|
||||
_, err := pubsub.Receive(ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not subscribe: %v", err)
|
||||
}
|
||||
|
||||
ch := pubsub.Channel()
|
||||
|
||||
for msg := range ch {
|
||||
fmt.Printf("Received message from channel %s: %s\n", msg.Channel, msg.Payload)
|
||||
}
|
||||
}
|
@ -1,30 +0,0 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
type SeqCache interface {
|
||||
SetMaxSeq(ctx context.Context, conversationID string, maxSeq int64) error
|
||||
GetMaxSeqs(ctx context.Context, conversationIDs []string) (map[string]int64, error)
|
||||
GetMaxSeq(ctx context.Context, conversationID string) (int64, error)
|
||||
SetMinSeq(ctx context.Context, conversationID string, minSeq int64) error
|
||||
SetMinSeqs(ctx context.Context, seqs map[string]int64) error
|
||||
GetMinSeqs(ctx context.Context, conversationIDs []string) (map[string]int64, error)
|
||||
GetMinSeq(ctx context.Context, conversationID string) (int64, error)
|
||||
GetConversationUserMinSeq(ctx context.Context, conversationID string, userID string) (int64, error)
|
||||
GetConversationUserMinSeqs(ctx context.Context, conversationID string, userIDs []string) (map[string]int64, error)
|
||||
SetConversationUserMinSeq(ctx context.Context, conversationID string, userID string, minSeq int64) error
|
||||
// seqs map: key userID value minSeq
|
||||
SetConversationUserMinSeqs(ctx context.Context, conversationID string, seqs map[string]int64) (err error)
|
||||
// seqs map: key conversationID value minSeq
|
||||
SetUserConversationsMinSeqs(ctx context.Context, userID string, seqs map[string]int64) error
|
||||
// has read seq
|
||||
SetHasReadSeq(ctx context.Context, userID string, conversationID string, hasReadSeq int64) error
|
||||
// k: user, v: seq
|
||||
SetHasReadSeqs(ctx context.Context, conversationID string, hasReadSeqs map[string]int64) error
|
||||
// k: conversation, v :seq
|
||||
UserSetHasReadSeqs(ctx context.Context, userID string, hasReadSeqs map[string]int64) error
|
||||
GetHasReadSeqs(ctx context.Context, userID string, conversationIDs []string) (map[string]int64, error)
|
||||
GetHasReadSeq(ctx context.Context, userID string, conversationID string) (int64, error)
|
||||
}
|
@ -0,0 +1,12 @@
|
||||
package cache
|
||||
|
||||
import "context"
|
||||
|
||||
type SeqConversationCache interface {
|
||||
Malloc(ctx context.Context, conversationID string, size int64) (int64, error)
|
||||
GetMaxSeq(ctx context.Context, conversationID string) (int64, error)
|
||||
SetMinSeq(ctx context.Context, conversationID string, seq int64) error
|
||||
GetMinSeq(ctx context.Context, conversationID string) (int64, error)
|
||||
GetMaxSeqs(ctx context.Context, conversationIDs []string) (map[string]int64, error)
|
||||
SetMinSeqs(ctx context.Context, seqs map[string]int64) error
|
||||
}
|
@ -0,0 +1,15 @@
|
||||
package cache
|
||||
|
||||
import "context"
|
||||
|
||||
type SeqUser interface {
|
||||
GetMaxSeq(ctx context.Context, conversationID string, userID string) (int64, error)
|
||||
SetMaxSeq(ctx context.Context, conversationID string, userID string, seq int64) error
|
||||
GetMinSeq(ctx context.Context, conversationID string, userID string) (int64, error)
|
||||
SetMinSeq(ctx context.Context, conversationID string, userID string, seq int64) error
|
||||
GetReadSeq(ctx context.Context, conversationID string, userID string) (int64, error)
|
||||
SetReadSeq(ctx context.Context, conversationID string, userID string, seq int64) error
|
||||
SetMinSeqs(ctx context.Context, userID string, seqs map[string]int64) error
|
||||
SetReadSeqs(ctx context.Context, userID string, seqs map[string]int64) error
|
||||
GetReadSeqs(ctx context.Context, userID string, conversationIDs []string) (map[string]int64, error)
|
||||
}
|
@ -0,0 +1,103 @@
|
||||
package mgo
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"github.com/openimsdk/tools/db/mongoutil"
|
||||
"go.mongodb.org/mongo-driver/bson"
|
||||
"go.mongodb.org/mongo-driver/mongo"
|
||||
"go.mongodb.org/mongo-driver/mongo/options"
|
||||
)
|
||||
|
||||
func NewSeqConversationMongo(db *mongo.Database) (database.SeqConversation, error) {
|
||||
coll := db.Collection(database.SeqConversationName)
|
||||
_, err := coll.Indexes().CreateOne(context.Background(), mongo.IndexModel{
|
||||
Keys: bson.D{
|
||||
{Key: "conversation_id", Value: 1},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &seqConversationMongo{coll: coll}, nil
|
||||
}
|
||||
|
||||
type seqConversationMongo struct {
|
||||
coll *mongo.Collection
|
||||
}
|
||||
|
||||
func (s *seqConversationMongo) setSeq(ctx context.Context, conversationID string, seq int64, field string) error {
|
||||
filter := map[string]any{
|
||||
"conversation_id": conversationID,
|
||||
}
|
||||
insert := bson.M{
|
||||
"conversation_id": conversationID,
|
||||
"min_seq": 0,
|
||||
"max_seq": 0,
|
||||
}
|
||||
delete(insert, field)
|
||||
update := map[string]any{
|
||||
"$set": bson.M{
|
||||
field: seq,
|
||||
},
|
||||
"$setOnInsert": insert,
|
||||
}
|
||||
opt := options.Update().SetUpsert(true)
|
||||
return mongoutil.UpdateOne(ctx, s.coll, filter, update, false, opt)
|
||||
}
|
||||
|
||||
func (s *seqConversationMongo) Malloc(ctx context.Context, conversationID string, size int64) (int64, error) {
|
||||
if size < 0 {
|
||||
return 0, errors.New("size must be greater than 0")
|
||||
}
|
||||
if size == 0 {
|
||||
return s.GetMaxSeq(ctx, conversationID)
|
||||
}
|
||||
filter := map[string]any{"conversation_id": conversationID}
|
||||
update := map[string]any{
|
||||
"$inc": map[string]any{"max_seq": size},
|
||||
"$set": map[string]any{"min_seq": int64(0)},
|
||||
}
|
||||
opt := options.FindOneAndUpdate().SetUpsert(true).SetReturnDocument(options.After).SetProjection(map[string]any{"_id": 0, "max_seq": 1})
|
||||
lastSeq, err := mongoutil.FindOneAndUpdate[int64](ctx, s.coll, filter, update, opt)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return lastSeq - size, nil
|
||||
}
|
||||
|
||||
func (s *seqConversationMongo) SetMaxSeq(ctx context.Context, conversationID string, seq int64) error {
|
||||
return s.setSeq(ctx, conversationID, seq, "max_seq")
|
||||
}
|
||||
|
||||
func (s *seqConversationMongo) GetMaxSeq(ctx context.Context, conversationID string) (int64, error) {
|
||||
seq, err := mongoutil.FindOne[int64](ctx, s.coll, bson.M{"conversation_id": conversationID}, options.FindOne().SetProjection(map[string]any{"_id": 0, "max_seq": 1}))
|
||||
if err == nil {
|
||||
return seq, nil
|
||||
} else if IsNotFound(err) {
|
||||
return 0, nil
|
||||
} else {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
func (s *seqConversationMongo) GetMinSeq(ctx context.Context, conversationID string) (int64, error) {
|
||||
seq, err := mongoutil.FindOne[int64](ctx, s.coll, bson.M{"conversation_id": conversationID}, options.FindOne().SetProjection(map[string]any{"_id": 0, "min_seq": 1}))
|
||||
if err == nil {
|
||||
return seq, nil
|
||||
} else if IsNotFound(err) {
|
||||
return 0, nil
|
||||
} else {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
func (s *seqConversationMongo) SetMinSeq(ctx context.Context, conversationID string, seq int64) error {
|
||||
return s.setSeq(ctx, conversationID, seq, "min_seq")
|
||||
}
|
||||
|
||||
func (s *seqConversationMongo) GetConversation(ctx context.Context, conversationID string) (*model.SeqConversation, error) {
|
||||
return mongoutil.FindOne[*model.SeqConversation](ctx, s.coll, bson.M{"conversation_id": conversationID})
|
||||
}
|
@ -0,0 +1,37 @@
|
||||
package mgo
|
||||
|
||||
import (
|
||||
"context"
|
||||
"go.mongodb.org/mongo-driver/mongo"
|
||||
"go.mongodb.org/mongo-driver/mongo/options"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func Result[V any](val V, err error) V {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
func Mongodb() *mongo.Database {
|
||||
return Result(
|
||||
mongo.Connect(context.Background(),
|
||||
options.Client().
|
||||
ApplyURI("mongodb://openIM:openIM123@172.16.8.48:37017/openim_v3?maxPoolSize=100").
|
||||
SetConnectTimeout(5*time.Second)),
|
||||
).Database("openim_v3")
|
||||
}
|
||||
|
||||
func TestUserSeq(t *testing.T) {
|
||||
uSeq := Result(NewSeqUserMongo(Mongodb())).(*seqUserMongo)
|
||||
t.Log(uSeq.SetMinSeq(context.Background(), "1000", "2000", 4))
|
||||
}
|
||||
|
||||
func TestConversationSeq(t *testing.T) {
|
||||
cSeq := Result(NewSeqConversationMongo(Mongodb())).(*seqConversationMongo)
|
||||
t.Log(cSeq.SetMaxSeq(context.Background(), "2000", 10))
|
||||
t.Log(cSeq.Malloc(context.Background(), "2000", 10))
|
||||
t.Log(cSeq.GetMaxSeq(context.Background(), "2000"))
|
||||
}
|
@ -0,0 +1,110 @@
|
||||
package mgo
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"github.com/openimsdk/tools/db/mongoutil"
|
||||
"go.mongodb.org/mongo-driver/bson"
|
||||
"go.mongodb.org/mongo-driver/mongo"
|
||||
"go.mongodb.org/mongo-driver/mongo/options"
|
||||
)
|
||||
|
||||
func NewSeqUserMongo(db *mongo.Database) (database.SeqUser, error) {
|
||||
coll := db.Collection(database.SeqUserName)
|
||||
_, err := coll.Indexes().CreateOne(context.Background(), mongo.IndexModel{
|
||||
Keys: bson.D{
|
||||
{Key: "user_id", Value: 1},
|
||||
{Key: "conversation_id", Value: 1},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &seqUserMongo{coll: coll}, nil
|
||||
}
|
||||
|
||||
type seqUserMongo struct {
|
||||
coll *mongo.Collection
|
||||
}
|
||||
|
||||
func (s *seqUserMongo) setSeq(ctx context.Context, conversationID string, userID string, seq int64, field string) error {
|
||||
filter := map[string]any{
|
||||
"user_id": userID,
|
||||
"conversation_id": conversationID,
|
||||
}
|
||||
insert := bson.M{
|
||||
"user_id": userID,
|
||||
"conversation_id": conversationID,
|
||||
"min_seq": 0,
|
||||
"max_seq": 0,
|
||||
"read_seq": 0,
|
||||
}
|
||||
delete(insert, field)
|
||||
update := map[string]any{
|
||||
"$set": bson.M{
|
||||
field: seq,
|
||||
},
|
||||
"$setOnInsert": insert,
|
||||
}
|
||||
opt := options.Update().SetUpsert(true)
|
||||
return mongoutil.UpdateOne(ctx, s.coll, filter, update, false, opt)
|
||||
}
|
||||
|
||||
func (s *seqUserMongo) getSeq(ctx context.Context, conversationID string, userID string, failed string) (int64, error) {
|
||||
filter := map[string]any{
|
||||
"user_id": userID,
|
||||
"conversation_id": conversationID,
|
||||
}
|
||||
opt := options.FindOne().SetProjection(bson.M{"_id": 0, failed: 1})
|
||||
seq, err := mongoutil.FindOne[int64](ctx, s.coll, filter, opt)
|
||||
if err == nil {
|
||||
return seq, nil
|
||||
} else if errors.Is(err, mongo.ErrNoDocuments) {
|
||||
return 0, nil
|
||||
} else {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
func (s *seqUserMongo) GetMaxSeq(ctx context.Context, conversationID string, userID string) (int64, error) {
|
||||
return s.getSeq(ctx, conversationID, userID, "max_seq")
|
||||
}
|
||||
|
||||
func (s *seqUserMongo) SetMaxSeq(ctx context.Context, conversationID string, userID string, seq int64) error {
|
||||
return s.setSeq(ctx, conversationID, userID, seq, "max_seq")
|
||||
}
|
||||
|
||||
func (s *seqUserMongo) GetMinSeq(ctx context.Context, conversationID string, userID string) (int64, error) {
|
||||
return s.getSeq(ctx, conversationID, userID, "min_seq")
|
||||
}
|
||||
|
||||
func (s *seqUserMongo) SetMinSeq(ctx context.Context, conversationID string, userID string, seq int64) error {
|
||||
return s.setSeq(ctx, conversationID, userID, seq, "min_seq")
|
||||
}
|
||||
|
||||
func (s *seqUserMongo) GetReadSeq(ctx context.Context, conversationID string, userID string) (int64, error) {
|
||||
return s.getSeq(ctx, conversationID, userID, "read_seq")
|
||||
}
|
||||
|
||||
func (s *seqUserMongo) GetReadSeqs(ctx context.Context, userID string, conversationID []string) (map[string]int64, error) {
|
||||
if len(conversationID) == 0 {
|
||||
return map[string]int64{}, nil
|
||||
}
|
||||
filter := bson.M{"user_id": userID, "conversation_id": bson.M{"$in": conversationID}}
|
||||
opt := options.Find().SetProjection(bson.M{"_id": 0, "conversation_id": 1, "read_seq": 1})
|
||||
seqs, err := mongoutil.Find[*model.SeqUser](ctx, s.coll, filter, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res := make(map[string]int64)
|
||||
for _, seq := range seqs {
|
||||
res[seq.ConversationID] = seq.ReadSeq
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (s *seqUserMongo) SetReadSeq(ctx context.Context, conversationID string, userID string, seq int64) error {
|
||||
return s.setSeq(ctx, conversationID, userID, seq, "read_seq")
|
||||
}
|
@ -0,0 +1,11 @@
|
||||
package database
|
||||
|
||||
import "context"
|
||||
|
||||
type SeqConversation interface {
|
||||
Malloc(ctx context.Context, conversationID string, size int64) (int64, error)
|
||||
GetMaxSeq(ctx context.Context, conversationID string) (int64, error)
|
||||
SetMaxSeq(ctx context.Context, conversationID string, seq int64) error
|
||||
GetMinSeq(ctx context.Context, conversationID string) (int64, error)
|
||||
SetMinSeq(ctx context.Context, conversationID string, seq int64) error
|
||||
}
|
@ -0,0 +1,13 @@
|
||||
package database
|
||||
|
||||
import "context"
|
||||
|
||||
type SeqUser interface {
|
||||
GetMaxSeq(ctx context.Context, conversationID string, userID string) (int64, error)
|
||||
SetMaxSeq(ctx context.Context, conversationID string, userID string, seq int64) error
|
||||
GetMinSeq(ctx context.Context, conversationID string, userID string) (int64, error)
|
||||
SetMinSeq(ctx context.Context, conversationID string, userID string, seq int64) error
|
||||
GetReadSeq(ctx context.Context, conversationID string, userID string) (int64, error)
|
||||
SetReadSeq(ctx context.Context, conversationID string, userID string, seq int64) error
|
||||
GetReadSeqs(ctx context.Context, userID string, conversationID []string) (map[string]int64, error)
|
||||
}
|
@ -0,0 +1,7 @@
|
||||
package model
|
||||
|
||||
type SeqConversation struct {
|
||||
ConversationID string `bson:"conversation_id"`
|
||||
MaxSeq int64 `bson:"max_seq"`
|
||||
MinSeq int64 `bson:"min_seq"`
|
||||
}
|
@ -0,0 +1,9 @@
|
||||
package model
|
||||
|
||||
type SeqUser struct {
|
||||
UserID string `bson:"user_id"`
|
||||
ConversationID string `bson:"conversation_id"`
|
||||
MinSeq int64 `bson:"min_seq"`
|
||||
MaxSeq int64 `bson:"max_seq"`
|
||||
ReadSeq int64 `bson:"read_seq"`
|
||||
}
|
@ -0,0 +1,100 @@
|
||||
package rpccache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/localcache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/localcache/lru"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/util/useronline"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/mcontext"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
func NewOnlineCache(user rpcclient.UserRpcClient, group *GroupLocalCache, rdb redis.UniversalClient, fn func(ctx context.Context, userID string, platformIDs []int32)) *OnlineCache {
|
||||
x := &OnlineCache{
|
||||
user: user,
|
||||
group: group,
|
||||
local: lru.NewSlotLRU(1024, localcache.LRUStringHash, func() lru.LRU[string, []int32] {
|
||||
return lru.NewLayLRU[string, []int32](2048, cachekey.OnlineExpire/2, time.Second*3, localcache.EmptyTarget{}, func(key string, value []int32) {})
|
||||
}),
|
||||
}
|
||||
go func() {
|
||||
ctx := mcontext.SetOperationID(context.Background(), cachekey.OnlineChannel+strconv.FormatUint(rand.Uint64(), 10))
|
||||
for message := range rdb.Subscribe(ctx, cachekey.OnlineChannel).Channel() {
|
||||
userID, platformIDs, err := useronline.ParseUserOnlineStatus(message.Payload)
|
||||
if err != nil {
|
||||
log.ZError(ctx, "OnlineCache redis subscribe parseUserOnlineStatus", err, "payload", message.Payload, "channel", message.Channel)
|
||||
continue
|
||||
}
|
||||
storageCache := x.setUserOnline(userID, platformIDs)
|
||||
log.ZDebug(ctx, "OnlineCache setUserOnline", "userID", userID, "platformIDs", platformIDs, "payload", message.Payload, "storageCache", storageCache)
|
||||
if fn != nil {
|
||||
fn(ctx, userID, platformIDs)
|
||||
}
|
||||
}
|
||||
}()
|
||||
return x
|
||||
}
|
||||
|
||||
type OnlineCache struct {
|
||||
user rpcclient.UserRpcClient
|
||||
group *GroupLocalCache
|
||||
local lru.LRU[string, []int32]
|
||||
}
|
||||
|
||||
func (o *OnlineCache) GetUserOnlinePlatform(ctx context.Context, userID string) ([]int32, error) {
|
||||
return o.local.Get(userID, func() ([]int32, error) {
|
||||
return o.user.GetUserOnlinePlatform(ctx, userID)
|
||||
})
|
||||
}
|
||||
|
||||
func (o *OnlineCache) GetUserOnline(ctx context.Context, userID string) (bool, error) {
|
||||
platformIDs, err := o.GetUserOnlinePlatform(ctx, userID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return len(platformIDs) > 0, nil
|
||||
}
|
||||
|
||||
func (o *OnlineCache) GetUsersOnline(ctx context.Context, userIDs []string) ([]string, error) {
|
||||
onlineUserIDs := make([]string, 0, len(userIDs))
|
||||
for _, userID := range userIDs {
|
||||
online, err := o.GetUserOnline(ctx, userID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if online {
|
||||
onlineUserIDs = append(onlineUserIDs, userID)
|
||||
}
|
||||
}
|
||||
log.ZDebug(ctx, "OnlineCache GetUsersOnline", "userIDs", userIDs, "onlineUserIDs", onlineUserIDs)
|
||||
return onlineUserIDs, nil
|
||||
}
|
||||
|
||||
func (o *OnlineCache) GetGroupOnline(ctx context.Context, groupID string) ([]string, error) {
|
||||
userIDs, err := o.group.GetGroupMemberIDs(ctx, groupID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var onlineUserIDs []string
|
||||
for _, userID := range userIDs {
|
||||
online, err := o.GetUserOnline(ctx, userID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if online {
|
||||
onlineUserIDs = append(onlineUserIDs, userID)
|
||||
}
|
||||
}
|
||||
log.ZDebug(ctx, "OnlineCache GetGroupOnline", "groupID", groupID, "onlineUserIDs", onlineUserIDs, "allUserID", userIDs)
|
||||
return onlineUserIDs, nil
|
||||
}
|
||||
|
||||
func (o *OnlineCache) setUserOnline(userID string, platformIDs []int32) bool {
|
||||
return o.local.SetHas(userID, platformIDs)
|
||||
}
|
@ -0,0 +1,27 @@
|
||||
package useronline
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func ParseUserOnlineStatus(payload string) (string, []int32, error) {
|
||||
arr := strings.Split(payload, ":")
|
||||
if len(arr) == 0 {
|
||||
return "", nil, errors.New("invalid data")
|
||||
}
|
||||
userID := arr[len(arr)-1]
|
||||
if userID == "" {
|
||||
return "", nil, errors.New("userID is empty")
|
||||
}
|
||||
platformIDs := make([]int32, len(arr)-1)
|
||||
for i := range platformIDs {
|
||||
platformID, err := strconv.Atoi(arr[i])
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
platformIDs[i] = int32(platformID)
|
||||
}
|
||||
return userID, platformIDs, nil
|
||||
}
|
@ -0,0 +1,331 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/cmd"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
|
||||
"github.com/openimsdk/tools/db/mongoutil"
|
||||
"github.com/openimsdk/tools/db/redisutil"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"go.mongodb.org/mongo-driver/bson"
|
||||
"go.mongodb.org/mongo-driver/mongo"
|
||||
"go.mongodb.org/mongo-driver/mongo/options"
|
||||
"gopkg.in/yaml.v3"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
MaxSeq = "MAX_SEQ:"
|
||||
MinSeq = "MIN_SEQ:"
|
||||
ConversationUserMinSeq = "CON_USER_MIN_SEQ:"
|
||||
HasReadSeq = "HAS_READ_SEQ:"
|
||||
)
|
||||
|
||||
const (
|
||||
batchSize = 100
|
||||
dataVersionCollection = "data_version"
|
||||
seqKey = "seq"
|
||||
seqVersion = 38
|
||||
)
|
||||
|
||||
func readConfig[T any](dir string, name string) (*T, error) {
|
||||
data, err := os.ReadFile(filepath.Join(dir, name))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var conf T
|
||||
if err := yaml.Unmarshal(data, &conf); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &conf, nil
|
||||
}
|
||||
|
||||
func Main(conf string, del time.Duration) error {
|
||||
redisConfig, err := readConfig[config.Redis](conf, cmd.RedisConfigFileName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mongodbConfig, err := readConfig[config.Mongo](conf, cmd.MongodbConfigFileName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
|
||||
defer cancel()
|
||||
rdb, err := redisutil.NewRedisClient(ctx, redisConfig.Build())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mgocli, err := mongoutil.NewMongoDB(ctx, mongodbConfig.Build())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
versionColl := mgocli.GetDB().Collection(dataVersionCollection)
|
||||
converted, err := CheckVersion(versionColl, seqKey, seqVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if converted {
|
||||
fmt.Println("[seq] seq data has been converted")
|
||||
return nil
|
||||
}
|
||||
if _, err := mgo.NewSeqConversationMongo(mgocli.GetDB()); err != nil {
|
||||
return err
|
||||
}
|
||||
cSeq, err := mgo.NewSeqConversationMongo(mgocli.GetDB())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
uSeq, err := mgo.NewSeqUserMongo(mgocli.GetDB())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
uSpitHasReadSeq := func(id string) (conversationID string, userID string, err error) {
|
||||
// HasReadSeq + userID + ":" + conversationID
|
||||
arr := strings.Split(id, ":")
|
||||
if len(arr) != 2 || arr[0] == "" || arr[1] == "" {
|
||||
return "", "", fmt.Errorf("invalid has read seq id %s", id)
|
||||
}
|
||||
userID = arr[0]
|
||||
conversationID = arr[1]
|
||||
return
|
||||
}
|
||||
uSpitConversationUserMinSeq := func(id string) (conversationID string, userID string, err error) {
|
||||
// ConversationUserMinSeq + conversationID + "u:" + userID
|
||||
arr := strings.Split(id, "u:")
|
||||
if len(arr) != 2 || arr[0] == "" || arr[1] == "" {
|
||||
return "", "", fmt.Errorf("invalid has read seq id %s", id)
|
||||
}
|
||||
conversationID = arr[0]
|
||||
userID = arr[1]
|
||||
return
|
||||
}
|
||||
|
||||
ts := []*taskSeq{
|
||||
{
|
||||
Prefix: MaxSeq,
|
||||
GetSeq: cSeq.GetMaxSeq,
|
||||
SetSeq: cSeq.SetMinSeq,
|
||||
},
|
||||
{
|
||||
Prefix: MinSeq,
|
||||
GetSeq: cSeq.GetMinSeq,
|
||||
SetSeq: cSeq.SetMinSeq,
|
||||
},
|
||||
{
|
||||
Prefix: HasReadSeq,
|
||||
GetSeq: func(ctx context.Context, id string) (int64, error) {
|
||||
conversationID, userID, err := uSpitHasReadSeq(id)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uSeq.GetReadSeq(ctx, conversationID, userID)
|
||||
},
|
||||
SetSeq: func(ctx context.Context, id string, seq int64) error {
|
||||
conversationID, userID, err := uSpitHasReadSeq(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return uSeq.SetReadSeq(ctx, conversationID, userID, seq)
|
||||
},
|
||||
},
|
||||
{
|
||||
Prefix: ConversationUserMinSeq,
|
||||
GetSeq: func(ctx context.Context, id string) (int64, error) {
|
||||
conversationID, userID, err := uSpitConversationUserMinSeq(id)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uSeq.GetMinSeq(ctx, conversationID, userID)
|
||||
},
|
||||
SetSeq: func(ctx context.Context, id string, seq int64) error {
|
||||
conversationID, userID, err := uSpitConversationUserMinSeq(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return uSeq.SetMinSeq(ctx, conversationID, userID, seq)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
cancel()
|
||||
ctx = context.Background()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(ts))
|
||||
|
||||
for i := range ts {
|
||||
go func(task *taskSeq) {
|
||||
defer wg.Done()
|
||||
err := seqRedisToMongo(ctx, rdb, task.GetSeq, task.SetSeq, task.Prefix, del, &task.Count)
|
||||
task.End = time.Now()
|
||||
task.Error = err
|
||||
}(ts[i])
|
||||
}
|
||||
start := time.Now()
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(done)
|
||||
}()
|
||||
|
||||
sigs := make(chan os.Signal, 1)
|
||||
signal.Notify(sigs, syscall.SIGTERM)
|
||||
|
||||
ticker := time.NewTicker(time.Second)
|
||||
defer ticker.Stop()
|
||||
var buf bytes.Buffer
|
||||
|
||||
printTaskInfo := func(now time.Time) {
|
||||
buf.Reset()
|
||||
buf.WriteString(now.Format(time.DateTime))
|
||||
buf.WriteString(" \n")
|
||||
for i := range ts {
|
||||
task := ts[i]
|
||||
if task.Error == nil {
|
||||
if task.End.IsZero() {
|
||||
buf.WriteString(fmt.Sprintf("[%s] converting %s* count %d", now.Sub(start), task.Prefix, atomic.LoadInt64(&task.Count)))
|
||||
} else {
|
||||
buf.WriteString(fmt.Sprintf("[%s] success %s* count %d", task.End.Sub(start), task.Prefix, atomic.LoadInt64(&task.Count)))
|
||||
}
|
||||
} else {
|
||||
buf.WriteString(fmt.Sprintf("[%s] failed %s* count %d error %s", task.End.Sub(start), task.Prefix, atomic.LoadInt64(&task.Count), task.Error))
|
||||
}
|
||||
buf.WriteString("\n")
|
||||
}
|
||||
fmt.Println(buf.String())
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case s := <-sigs:
|
||||
return fmt.Errorf("exit by signal %s", s)
|
||||
case <-done:
|
||||
errs := make([]error, 0, len(ts))
|
||||
for i := range ts {
|
||||
task := ts[i]
|
||||
if task.Error != nil {
|
||||
errs = append(errs, fmt.Errorf("seq %s failed %w", task.Prefix, task.Error))
|
||||
}
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
printTaskInfo(time.Now())
|
||||
if err := SetVersion(versionColl, seqKey, seqVersion); err != nil {
|
||||
return fmt.Errorf("set mongodb seq version %w", err)
|
||||
}
|
||||
return nil
|
||||
case now := <-ticker.C:
|
||||
printTaskInfo(now)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type taskSeq struct {
|
||||
Prefix string
|
||||
Count int64
|
||||
Error error
|
||||
End time.Time
|
||||
GetSeq func(ctx context.Context, id string) (int64, error)
|
||||
SetSeq func(ctx context.Context, id string, seq int64) error
|
||||
}
|
||||
|
||||
func seqRedisToMongo(ctx context.Context, rdb redis.UniversalClient, getSeq func(ctx context.Context, id string) (int64, error), setSeq func(ctx context.Context, id string, seq int64) error, prefix string, delAfter time.Duration, count *int64) error {
|
||||
var (
|
||||
cursor uint64
|
||||
keys []string
|
||||
err error
|
||||
)
|
||||
for {
|
||||
keys, cursor, err = rdb.Scan(ctx, cursor, prefix+"*", batchSize).Result()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(keys) > 0 {
|
||||
for _, key := range keys {
|
||||
seqStr, err := rdb.Get(ctx, key).Result()
|
||||
if err != nil {
|
||||
return fmt.Errorf("redis get %s failed %w", key, err)
|
||||
}
|
||||
seq, err := strconv.Atoi(seqStr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid %s seq %s", key, seqStr)
|
||||
}
|
||||
if seq < 0 {
|
||||
return fmt.Errorf("invalid %s seq %s", key, seqStr)
|
||||
}
|
||||
id := strings.TrimPrefix(key, prefix)
|
||||
redisSeq := int64(seq)
|
||||
mongoSeq, err := getSeq(ctx, id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get mongo seq %s failed %w", key, err)
|
||||
}
|
||||
if mongoSeq < redisSeq {
|
||||
if err := setSeq(ctx, id, redisSeq); err != nil {
|
||||
return fmt.Errorf("set mongo seq %s failed %w", key, err)
|
||||
}
|
||||
}
|
||||
if delAfter > 0 {
|
||||
if err := rdb.Expire(ctx, key, delAfter).Err(); err != nil {
|
||||
return fmt.Errorf("redis expire key %s failed %w", key, err)
|
||||
}
|
||||
} else {
|
||||
if err := rdb.Del(ctx, key).Err(); err != nil {
|
||||
return fmt.Errorf("redis del key %s failed %w", key, err)
|
||||
}
|
||||
}
|
||||
atomic.AddInt64(count, 1)
|
||||
}
|
||||
}
|
||||
if cursor == 0 {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func CheckVersion(coll *mongo.Collection, key string, currentVersion int) (converted bool, err error) {
|
||||
type VersionTable struct {
|
||||
Key string `bson:"key"`
|
||||
Value string `bson:"value"`
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
|
||||
defer cancel()
|
||||
res, err := mongoutil.FindOne[VersionTable](ctx, coll, bson.M{"key": key})
|
||||
if err == nil {
|
||||
ver, err := strconv.Atoi(res.Value)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("version %s parse error %w", res.Value, err)
|
||||
}
|
||||
if ver >= currentVersion {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
} else if errors.Is(err, mongo.ErrNoDocuments) {
|
||||
return false, nil
|
||||
} else {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
func SetVersion(coll *mongo.Collection, key string, version int) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
|
||||
defer cancel()
|
||||
option := options.Update().SetUpsert(true)
|
||||
filter := bson.M{"key": key, "value": strconv.Itoa(version)}
|
||||
update := bson.M{"$set": bson.M{"key": key, "value": strconv.Itoa(version)}}
|
||||
return mongoutil.UpdateOne(ctx, coll, filter, update, false, option)
|
||||
}
|
@ -0,0 +1,22 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/openimsdk/open-im-server/v3/tools/seq/internal"
|
||||
"time"
|
||||
)
|
||||
|
||||
func main() {
|
||||
var (
|
||||
config string
|
||||
second int
|
||||
)
|
||||
flag.StringVar(&config, "c", "/Users/chao/Desktop/project/open-im-server/config", "config directory")
|
||||
flag.IntVar(&second, "sec", 3600*24, "delayed deletion of the original seq key after conversion")
|
||||
flag.Parse()
|
||||
if err := internal.Main(config, time.Duration(second)*time.Second); err != nil {
|
||||
fmt.Println("seq task", err)
|
||||
}
|
||||
fmt.Println("seq task success!")
|
||||
}
|
Loading…
Reference in new issue