feat: new features merged (#2409)

* fix: GroupApplicationAcceptedNotification

* fix: GroupApplicationAcceptedNotification

* fix: NotificationUserInfoUpdate

* cicd: robot automated Change

* fix: component

* fix: getConversationInfo

* feat: cron task

* feat: cron task

* feat: cron task

* feat: cron task

* feat: cron task

* fix: minio config url recognition error

* new mongo

* new mongo

* new mongo

* new mongo

* new mongo

* new mongo

* new mongo

* new mongo

* friend incr sync

* friend incr sync

* friend incr sync

* friend incr sync

* friend incr sync

* mage

* optimization version log

* optimization version log

* sync

* sync

* sync

* group sync

* sync option

* sync option

* refactor: replace `friend` package with `realtion`.

* refactor: update lastest commit to relation.

* sync option

* sync option

* sync option

* sync

* sync

* go.mod

* seq

* update: go mod

* refactor: change incremental to full

* feat: get full friend user ids

* feat: api and config

* seq

* group version

* merge

* seq

* seq

* seq

* fix: sort by id avoid unstable sort friends.

* group

* group

* group

* fix: sort by id avoid unstable sort friends.

* fix: sort by id avoid unstable sort friends.

* fix: sort by id avoid unstable sort friends.

* user version

* seq

* seq

* seq user

* user online

* implement minio expire delete.

* user online

* config

* fix

* fix

* implement minio expire delete logic.

* online cache

* online cache

* online cache

* online cache

* online cache

* online cache

* online cache

* online cache

* online cache

* online cache

* online cache

* online cache

* feat: implement scheduled delete outdated object in minio.

* update gomake version

* update gomake version

* implement FindExpires pagination.

* remove unnesseary incr.

* fix uncorrect args call.

* online push

* online push

* online push

* resolving conflicts

* resolving conflicts

* test

* api prommetrics

* api prommetrics

* api prommetrics

* api prommetrics

* api prommetrics

* rpc prommetrics

* rpc prommetrics

* online status

* online status

* online status

* online status

* sub

* conversation version incremental

* merge seq

* merge online

* merge online

* merge online

* merge seq

* GetOwnerConversation

* fix: change incremental syncer router name.

* rockscache batch get

* rockscache seq batch get

* fix: GetMsgDocModelByIndex bug

* update go.mod

* update go.mod

* merge

* feat: prometheus

* feat: prometheus

---------

Co-authored-by: withchao <withchao@users.noreply.github.com>
Co-authored-by: Monet Lee <monet_lee@163.com>
Co-authored-by: OpenIM-Gordon <46924906+FGadvancer@users.noreply.github.com>
Co-authored-by: icey-yu <1186114839@qq.com>
pull/2414/head
chao 4 months ago committed by GitHub
parent 5f52fa19bd
commit 4aaf496086
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -5,6 +5,9 @@ ZOOKEEPER_IMAGE=bitnami/zookeeper:3.8
KAFKA_IMAGE=bitnami/kafka:3.5.1
MINIO_IMAGE=minio/minio:RELEASE.2024-01-11T07-46-16Z
ETCD_IMAGE=quay.io/coreos/etcd:v3.5.13
PROMETHEUS_IMAGE=prom/prometheus:v2.45.6
ALERTMANAGER_IMAGE=prom/alertmanager:v0.27.0
GRAFANA_IMAGE=grafana/grafana:11.0.1
OPENIM_WEB_FRONT_IMAGE=openim/openim-web-front:release-v3.5.1
OPENIM_ADMIN_FRONT_IMAGE=openim/openim-admin-front:release-v1.7

4
.gitignore vendored

@ -34,11 +34,7 @@ deployments/charts/generated-configs/
### OpenIM Config ###
.env
config/config.yaml
config/alertmanager.yml
config/prometheus.yml
config/email.tmpl
config/notification.yaml
config/instance-down-rules.yml
### OpenIM deploy ###
deployments/openim-server/charts

@ -25,5 +25,4 @@ func main() {
if err := cmd.NewApiCmd().Exec(); err != nil {
program.ExitWithError(err)
}
}

@ -0,0 +1,25 @@
global:
resolve_timeout: 5m
smtp_from: alert@openim.io
smtp_smarthost: smtp.163.com:465
smtp_auth_username: alert@openim.io
smtp_auth_password: YOURAUTHPASSWORD
smtp_require_tls: false
smtp_hello: xxx
templates:
- /etc/alertmanager/email.tmpl
route:
group_by: ['alertname']
group_wait: 5s
group_interval: 5s
repeat_interval: 5m
receiver: email
receivers:
- name: email
email_configs:
- to: 'alert@example.com'
html: '{{ template "email.to.html" . }}'
headers: { Subject: "[OPENIM-SERVER]Alarm" }
send_resolved: true

@ -0,0 +1,16 @@
{{ define "email.to.html" }}
{{ range .Alerts }}
<!-- Begin of OpenIM Alert -->
<div style="border:1px solid #ccc; padding:10px; margin-bottom:10px;">
<h3>OpenIM Alert</h3>
<p><strong>Alert Program:</strong> Prometheus Alert</p>
<p><strong>Severity Level:</strong> {{ .Labels.severity }}</p>
<p><strong>Alert Type:</strong> {{ .Labels.alertname }}</p>
<p><strong>Affected Host:</strong> {{ .Labels.instance }}</p>
<p><strong>Affected Service:</strong> {{ .Labels.job }}</p>
<p><strong>Alert Subject:</strong> {{ .Annotations.summary }}</p>
<p><strong>Trigger Time:</strong> {{ .StartsAt.Format "2006-01-02 15:04:05" }}</p>
</div>
<!-- End of OpenIM Alert -->
{{ end }}
{{ end }}

@ -0,0 +1,22 @@
groups:
- name: instance_down
rules:
- alert: InstanceDown
expr: up == 0
for: 1m
labels:
severity: critical
annotations:
summary: "Instance {{ $labels.instance }} down"
description: "{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 1 minutes."
- name: database_insert_failure_alerts
rules:
- alert: DatabaseInsertFailed
expr: (increase(msg_insert_redis_failed_total[5m]) > 0) or (increase(msg_insert_mongo_failed_total[5m]) > 0)
for: 1m
labels:
severity: critical
annotations:
summary: "Increase in MsgInsertRedisFailedCounter or MsgInsertMongoFailedCounter detected"
description: "Either MsgInsertRedisFailedCounter or MsgInsertMongoFailedCounter has increased in the last 5 minutes, indicating failures in message insert operations to Redis or MongoDB,maybe the redis or mongodb is crash."

@ -1,2 +1,3 @@
chatRecordsClearTime: "0 2 * * *"
cronExecuteTime: "0 2 * * *"
retainChatRecords: 365
fileExpireTime: 90

@ -0,0 +1,83 @@
# my global config
global:
scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets: ['192.168.2.22:19093']
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
- "instance-down-rules.yml"
# - "first_rules.yml"
# - "second_rules.yml"
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
# The job name is added as a label "job='job_name'"" to any timeseries scraped from this config.
# Monitored information captured by prometheus
# prometheus fetches application services
- job_name: 'node_exporter'
static_configs:
- targets: [ '192.168.2.22:20114' ]
- job_name: 'openimserver-openim-api'
static_configs:
- targets: [ '192.168.2.22:20113' ]
labels:
namespace: 'default'
- job_name: 'openimserver-openim-msggateway'
static_configs:
- targets: [ '192.168.2.22:20112' ]
labels:
namespace: 'default'
- job_name: 'openimserver-openim-msgtransfer'
static_configs:
- targets: [ 192.168.2.22:20111, 192.168.2.22:20110, 192.168.2.22:20109, 192.168.2.22:20108 ]
labels:
namespace: 'default'
- job_name: 'openimserver-openim-push'
static_configs:
- targets: [ '192.168.2.22:20107' ]
labels:
namespace: 'default'
- job_name: 'openimserver-openim-rpc-auth'
static_configs:
- targets: [ '192.168.2.22:20106' ]
labels:
namespace: 'default'
- job_name: 'openimserver-openim-rpc-conversation'
static_configs:
- targets: [ '192.168.2.22:20105' ]
labels:
namespace: 'default'
- job_name: 'openimserver-openim-rpc-friend'
static_configs:
- targets: [ '192.168.2.22:20104' ]
labels:
namespace: 'default'
- job_name: 'openimserver-openim-rpc-group'
static_configs:
- targets: [ '192.168.2.22:20103' ]
labels:
namespace: 'default'
- job_name: 'openimserver-openim-rpc-msg'
static_configs:
- targets: [ '192.168.2.22:20102' ]
labels:
namespace: 'default'
- job_name: 'openimserver-openim-rpc-third'
static_configs:
- targets: [ '192.168.2.22:20101' ]
labels:
namespace: 'default'
- job_name: 'openimserver-openim-rpc-user'
static_configs:
- targets: [ '192.168.2.22:20100' ]
labels:
namespace: 'default'

@ -140,5 +140,50 @@ services:
networks:
- openim
prometheus:
image: ${PROMETHEUS_IMAGE}
container_name: prometheus
restart: always
volumes:
- ./config/prometheus.yml:/etc/prometheus/prometheus.yml
- ./config/instance-down-rules.yml:/etc/prometheus/instance-down-rules.yml
- ${DATA_DIR}/components/prometheus/data:/prometheus
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
ports:
- "19091:9090"
networks:
- openim
alertmanager:
image: ${ALERTMANAGER_IMAGE}
container_name: alertmanager
restart: always
volumes:
- ./config/alertmanager.yml:/etc/alertmanager/alertmanager.yml
- ./config/email.tmpl:/etc/alertmanager/email.tmpl
ports:
- "19093:9093"
networks:
- openim
grafana:
image: ${GRAFANA_IMAGE}
container_name: grafana
user: root
restart: always
environment:
- GF_SECURITY_ALLOW_EMBEDDING=true
- GF_SESSION_COOKIE_SAMESITE=none
- GF_SESSION_COOKIE_SECURE=true
- GF_AUTH_ANONYMOUS_ENABLED=true
- GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
ports:
- "13000:3000"
volumes:
- ${DATA_DIR:-./}/components/grafana:/var/lib/grafana
networks:
- openim

@ -13,7 +13,7 @@ require (
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
github.com/mitchellh/mapstructure v1.5.0
github.com/openimsdk/protocol v0.0.69-alpha.30
github.com/openimsdk/tools v0.0.49-alpha.49
github.com/openimsdk/tools v0.0.49-alpha.50
github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_golang v1.18.0
github.com/stretchr/testify v1.9.0
@ -194,5 +194,3 @@ require (
golang.org/x/crypto v0.21.0 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
)
//replace github.com/openimsdk/protocol => /Users/chao/Desktop/project/protocol

@ -313,8 +313,8 @@ github.com/openimsdk/gomake v0.0.14-alpha.5 h1:VY9c5x515lTfmdhhPjMvR3BBRrRquAUCF
github.com/openimsdk/gomake v0.0.14-alpha.5/go.mod h1:PndCozNc2IsQIciyn9mvEblYWZwJmAI+06z94EY+csI=
github.com/openimsdk/protocol v0.0.69-alpha.30 h1:OXzCIpDpIY/GI6h1SDYWN51OS9Xv/BcHaOwq8whPKqI=
github.com/openimsdk/protocol v0.0.69-alpha.30/go.mod h1:OZQA9FR55lseYoN2Ql1XAHYKHJGu7OMNkUbuekrKCM8=
github.com/openimsdk/tools v0.0.49-alpha.49 h1:bkVxlEDM9rJxo5a98mk2ETR0xbEv+5mjeVVVFQfARAQ=
github.com/openimsdk/tools v0.0.49-alpha.49/go.mod h1:HtSRjPTL8PsuZ+PhR5noqzrYBF0sdwW3/O/sWVucWg8=
github.com/openimsdk/tools v0.0.49-alpha.50 h1:7CaYLVtsBU5kyiTetUOuOkO5FFFmMvSzBEfh2tfCn90=
github.com/openimsdk/tools v0.0.49-alpha.50/go.mod h1:HtSRjPTL8PsuZ+PhR5noqzrYBF0sdwW3/O/sWVucWg8=
github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4=
github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=

@ -58,7 +58,6 @@ func (o *FriendApi) GetFriendList(c *gin.Context) {
func (o *FriendApi) GetDesignatedFriends(c *gin.Context) {
a2r.Call(relation.FriendClient.GetDesignatedFriends, o.Client, c)
//a2r.Call(relation.FriendClient.GetDesignatedFriends, o.Client, c, a2r.NewNilReplaceOption(relation.FriendClient.GetDesignatedFriends))
}
func (o *FriendApi) SetFriendRemark(c *gin.Context) {

@ -29,7 +29,6 @@ import (
"time"
kdisc "github.com/openimsdk/open-im-server/v3/pkg/common/discoveryregister"
ginprom "github.com/openimsdk/open-im-server/v3/pkg/common/ginprometheus"
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
"github.com/openimsdk/tools/discovery"
"github.com/openimsdk/tools/errs"
@ -72,10 +71,8 @@ func Start(ctx context.Context, index int, config *Config) error {
netDone <- struct{}{}
return
}
p := ginprom.NewPrometheus("app", prommetrics.GetGinCusMetrics("Api"))
p.SetListenAddress(fmt.Sprintf(":%d", prometheusPort))
if err = p.Use(router); err != nil && err != http.ErrServerClosed {
netErr = errs.WrapMsg(err, fmt.Sprintf("prometheus start err: %d", prometheusPort))
if err := prommetrics.ApiInit(prometheusPort); err != nil && err != http.ErrServerClosed {
netErr = errs.WrapMsg(err, fmt.Sprintf("api prometheus start err: %d", prometheusPort))
netDone <- struct{}{}
}
}()

@ -2,15 +2,13 @@ package api
import (
"fmt"
"net/http"
"strings"
"github.com/gin-gonic/gin"
"github.com/gin-gonic/gin/binding"
"github.com/go-playground/validator/v10"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
"github.com/openimsdk/protocol/constant"
@ -18,8 +16,25 @@ import (
"github.com/openimsdk/tools/discovery"
"github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/mw"
"net/http"
"strings"
)
func prommetricsGin() gin.HandlerFunc {
return func(c *gin.Context) {
c.Next()
path := c.FullPath()
if c.Writer.Status() == http.StatusNotFound {
prommetrics.HttpCall("<404>", c.Request.Method, c.Writer.Status())
} else {
prommetrics.HttpCall(path, c.Request.Method, c.Writer.Status())
}
if resp := apiresp.GetGinApiResponse(c); resp != nil {
prommetrics.APICall(path, c.Request.Method, resp.ErrCode)
}
}
}
func newGinRouter(disCov discovery.SvcDiscoveryRegistry, config *Config) *gin.Engine {
disCov.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"LoadBalancingPolicy": "%s"}`, "round_robin")))
@ -38,7 +53,7 @@ func newGinRouter(disCov discovery.SvcDiscoveryRegistry, config *Config) *gin.En
authRpc := rpcclient.NewAuth(disCov, config.Share.RpcRegisterName.Auth)
thirdRpc := rpcclient.NewThird(disCov, config.Share.RpcRegisterName.Third, config.API.Prometheus.GrafanaURL)
r.Use(gin.Recovery(), mw.CorsHandler(), mw.GinParseOperationID(), GinParseToken(authRpc))
r.Use(prommetricsGin(), gin.Recovery(), mw.CorsHandler(), mw.GinParseOperationID(), GinParseToken(authRpc))
u := NewUserApi(*userRpc)
m := NewMessageApi(messageRpc, userRpc, config.Share.IMAdminUserID)
userRouterGroup := r.Group("/user")

@ -75,6 +75,8 @@ type Client struct {
token string
hbCtx context.Context
hbCancel context.CancelFunc
subLock sync.Mutex
subUserIDs map[string]struct{}
}
// ResetClient updates the client's state with new connection and context information.
@ -216,6 +218,8 @@ func (c *Client) handleMessage(message []byte) error {
resp, messageErr = c.longConnServer.UserLogout(ctx, binaryReq)
case WsSetBackgroundStatus:
resp, messageErr = c.setAppBackgroundStatus(ctx, binaryReq)
case WsSubUserOnlineStatus:
resp, messageErr = c.longConnServer.SubUserOnlineStatus(ctx, c, binaryReq)
default:
return fmt.Errorf(
"ReqIdentifier failed,sendID:%s,msgIncr:%s,reqIdentifier:%d",

@ -16,10 +16,10 @@ package msggateway
import (
"crypto/rand"
"github.com/stretchr/testify/assert"
"sync"
"testing"
"github.com/stretchr/testify/assert"
"unsafe"
)
func mockRandom() []byte {
@ -132,3 +132,8 @@ func BenchmarkDecompressWithSyncPool(b *testing.B) {
assert.Equal(b, nil, err)
}
}
func TestName(t *testing.T) {
t.Log(unsafe.Sizeof(Client{}))
}

@ -43,6 +43,7 @@ const (
WSKickOnlineMsg = 2002
WsLogoutMsg = 2003
WsSetBackgroundStatus = 2004
WsSubUserOnlineStatus = 2005
WSDataError = 3001
)

@ -19,6 +19,7 @@ import (
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
"github.com/openimsdk/open-im-server/v3/pkg/common/startrpc"
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
"github.com/openimsdk/protocol/constant"
"github.com/openimsdk/protocol/msggateway"
"github.com/openimsdk/tools/discovery"
@ -31,6 +32,10 @@ import (
func (s *Server) InitServer(ctx context.Context, config *Config, disCov discovery.SvcDiscoveryRegistry, server *grpc.Server) error {
s.LongConnServer.SetDiscoveryRegistry(disCov, config)
msggateway.RegisterMsgGatewayServer(server, s)
s.userRcp = rpcclient.NewUserRpcClient(disCov, config.Share.RpcRegisterName.User, config.Share.IMAdminUserID)
if s.ready != nil {
return s.ready(s)
}
return nil
}
@ -50,18 +55,21 @@ type Server struct {
LongConnServer LongConnServer
config *Config
pushTerminal map[int]struct{}
ready func(srv *Server) error
userRcp rpcclient.UserRpcClient
}
func (s *Server) SetLongConnServer(LongConnServer LongConnServer) {
s.LongConnServer = LongConnServer
}
func NewServer(rpcPort int, longConnServer LongConnServer, conf *Config) *Server {
func NewServer(rpcPort int, longConnServer LongConnServer, conf *Config, ready func(srv *Server) error) *Server {
s := &Server{
rpcPort: rpcPort,
LongConnServer: longConnServer,
pushTerminal: make(map[int]struct{}),
config: conf,
ready: ready,
}
s.pushTerminal[constant.IOSPlatformID] = struct{}{}
s.pushTerminal[constant.AndroidPlatformID] = struct{}{}

@ -17,6 +17,8 @@ package msggateway
import (
"context"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/rpccache"
"github.com/openimsdk/tools/db/redisutil"
"github.com/openimsdk/tools/utils/datautil"
"time"
@ -26,6 +28,7 @@ import (
type Config struct {
MsgGateway config.MsgGateway
Share config.Share
RedisConfig config.Redis
WebhooksConfig config.Webhooks
Discovery config.Discovery
}
@ -42,18 +45,25 @@ func Start(ctx context.Context, index int, conf *Config) error {
if err != nil {
return err
}
longServer, err := NewWsServer(
rdb, err := redisutil.NewRedisClient(ctx, conf.RedisConfig.Build())
if err != nil {
return err
}
longServer := NewWsServer(
conf,
WithPort(wsPort),
WithMaxConnNum(int64(conf.MsgGateway.LongConnSvr.WebsocketMaxConnNum)),
WithHandshakeTimeout(time.Duration(conf.MsgGateway.LongConnSvr.WebsocketTimeout)*time.Second),
WithMessageMaxMsgLength(conf.MsgGateway.LongConnSvr.WebsocketMaxMsgLen),
)
if err != nil {
return err
}
hubServer := NewServer(rpcPort, longServer, conf)
hubServer := NewServer(rpcPort, longServer, conf, func(srv *Server) error {
longServer.online = rpccache.NewOnlineCache(srv.userRcp, nil, rdb, longServer.subscriberUserOnlineStatusChanges)
return nil
})
go longServer.ChangeOnlineStatus(4)
netDone := make(chan error)
go func() {
err = hubServer.Start(ctx, index, conf)

@ -0,0 +1,112 @@
package msggateway
import (
"context"
"crypto/md5"
"encoding/binary"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
pbuser "github.com/openimsdk/protocol/user"
"github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/mcontext"
"github.com/openimsdk/tools/utils/datautil"
"math/rand"
"strconv"
"time"
)
func (ws *WsServer) ChangeOnlineStatus(concurrent int) {
if concurrent < 1 {
concurrent = 1
}
const renewalTime = cachekey.OnlineExpire / 3
//const renewalTime = time.Second * 10
renewalTicker := time.NewTicker(renewalTime)
requestChs := make([]chan *pbuser.SetUserOnlineStatusReq, concurrent)
changeStatus := make([][]UserState, concurrent)
for i := 0; i < concurrent; i++ {
requestChs[i] = make(chan *pbuser.SetUserOnlineStatusReq, 64)
changeStatus[i] = make([]UserState, 0, 100)
}
mergeTicker := time.NewTicker(time.Second)
local2pb := func(u UserState) *pbuser.UserOnlineStatus {
return &pbuser.UserOnlineStatus{
UserID: u.UserID,
Online: u.Online,
Offline: u.Offline,
}
}
rNum := rand.Uint64()
pushUserState := func(us ...UserState) {
for _, u := range us {
sum := md5.Sum([]byte(u.UserID))
i := (binary.BigEndian.Uint64(sum[:]) + rNum) % uint64(concurrent)
changeStatus[i] = append(changeStatus[i], u)
status := changeStatus[i]
if len(status) == cap(status) {
req := &pbuser.SetUserOnlineStatusReq{
Status: datautil.Slice(status, local2pb),
}
changeStatus[i] = status[:0]
select {
case requestChs[i] <- req:
default:
log.ZError(context.Background(), "user online processing is too slow", nil)
}
}
}
}
pushAllUserState := func() {
for i, status := range changeStatus {
if len(status) == 0 {
continue
}
req := &pbuser.SetUserOnlineStatusReq{
Status: datautil.Slice(status, local2pb),
}
changeStatus[i] = status[:0]
select {
case requestChs[i] <- req:
default:
log.ZError(context.Background(), "user online processing is too slow", nil)
}
}
}
opIdCtx := mcontext.SetOperationID(context.Background(), "r"+strconv.FormatUint(rNum, 10))
doRequest := func(req *pbuser.SetUserOnlineStatusReq) {
ctx, cancel := context.WithTimeout(opIdCtx, time.Second*5)
defer cancel()
if _, err := ws.userClient.Client.SetUserOnlineStatus(ctx, req); err != nil {
log.ZError(ctx, "update user online status", err)
}
}
for i := 0; i < concurrent; i++ {
go func(ch <-chan *pbuser.SetUserOnlineStatusReq) {
for req := range ch {
doRequest(req)
}
}(requestChs[i])
}
for {
select {
case <-mergeTicker.C:
pushAllUserState()
case now := <-renewalTicker.C:
deadline := now.Add(-cachekey.OnlineExpire / 3)
users := ws.clients.GetAllUserStatus(deadline, now)
log.ZDebug(context.Background(), "renewal ticker", "deadline", deadline, "nowtime", now, "num", len(users))
pushUserState(users...)
case state := <-ws.clients.UserState():
log.ZDebug(context.Background(), "OnlineCache user online change", "userID", state.UserID, "online", state.Online, "offline", state.Offline)
pushUserState(state)
}
}
}

@ -0,0 +1,181 @@
package msggateway
import (
"context"
"encoding/json"
"github.com/openimsdk/protocol/constant"
"github.com/openimsdk/protocol/sdkws"
"github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/utils/datautil"
"github.com/openimsdk/tools/utils/idutil"
"google.golang.org/protobuf/proto"
"sync"
"time"
)
func (ws *WsServer) subscriberUserOnlineStatusChanges(ctx context.Context, userID string, platformIDs []int32) {
if ws.clients.RecvSubChange(userID, platformIDs) {
log.ZDebug(ctx, "gateway receive subscription message and go back online", "userID", userID, "platformIDs", platformIDs)
} else {
log.ZDebug(ctx, "gateway ignore user online status changes", "userID", userID, "platformIDs", platformIDs)
}
ws.pushUserIDOnlineStatus(ctx, userID, platformIDs)
}
func (ws *WsServer) SubUserOnlineStatus(ctx context.Context, client *Client, data *Req) ([]byte, error) {
var sub sdkws.SubUserOnlineStatus
if err := proto.Unmarshal(data.Data, &sub); err != nil {
return nil, err
}
ws.subscription.Sub(client, sub.SubscribeUserID, sub.UnsubscribeUserID)
var resp sdkws.SubUserOnlineStatusTips
if len(sub.SubscribeUserID) > 0 {
resp.Subscribers = make([]*sdkws.SubUserOnlineStatusElem, 0, len(sub.SubscribeUserID))
for _, userID := range sub.SubscribeUserID {
platformIDs, err := ws.online.GetUserOnlinePlatform(ctx, userID)
if err != nil {
return nil, err
}
resp.Subscribers = append(resp.Subscribers, &sdkws.SubUserOnlineStatusElem{
UserID: userID,
OnlinePlatformIDs: platformIDs,
})
}
}
return proto.Marshal(&resp)
}
type subClient struct {
clients map[string]*Client
}
func newSubscription() *Subscription {
return &Subscription{
userIDs: make(map[string]*subClient),
}
}
type Subscription struct {
lock sync.RWMutex
userIDs map[string]*subClient
}
func (s *Subscription) GetClient(userID string) []*Client {
s.lock.RLock()
defer s.lock.RUnlock()
cs, ok := s.userIDs[userID]
if !ok {
return nil
}
clients := make([]*Client, 0, len(cs.clients))
for _, client := range cs.clients {
clients = append(clients, client)
}
return clients
}
func (s *Subscription) DelClient(client *Client) {
client.subLock.Lock()
userIDs := datautil.Keys(client.subUserIDs)
for _, userID := range userIDs {
delete(client.subUserIDs, userID)
}
client.subLock.Unlock()
if len(userIDs) == 0 {
return
}
addr := client.ctx.GetRemoteAddr()
s.lock.Lock()
defer s.lock.Unlock()
for _, userID := range userIDs {
sub, ok := s.userIDs[userID]
if !ok {
continue
}
delete(sub.clients, addr)
if len(sub.clients) == 0 {
delete(s.userIDs, userID)
}
}
}
func (s *Subscription) Sub(client *Client, addUserIDs, delUserIDs []string) {
if len(addUserIDs)+len(delUserIDs) == 0 {
return
}
var (
del = make(map[string]struct{})
add = make(map[string]struct{})
)
client.subLock.Lock()
for _, userID := range delUserIDs {
if _, ok := client.subUserIDs[userID]; !ok {
continue
}
del[userID] = struct{}{}
delete(client.subUserIDs, userID)
}
for _, userID := range addUserIDs {
delete(del, userID)
if _, ok := client.subUserIDs[userID]; ok {
continue
}
client.subUserIDs[userID] = struct{}{}
}
client.subLock.Unlock()
if len(del)+len(add) == 0 {
return
}
addr := client.ctx.GetRemoteAddr()
s.lock.Lock()
defer s.lock.Unlock()
for userID := range del {
sub, ok := s.userIDs[userID]
if !ok {
continue
}
delete(sub.clients, addr)
if len(sub.clients) == 0 {
delete(s.userIDs, userID)
}
}
for userID := range add {
sub, ok := s.userIDs[userID]
if !ok {
sub = &subClient{clients: make(map[string]*Client)}
s.userIDs[userID] = sub
}
sub.clients[addr] = client
}
}
func (ws *WsServer) pushUserIDOnlineStatus(ctx context.Context, userID string, platformIDs []int32) {
clients := ws.subscription.GetClient(userID)
if len(clients) == 0 {
return
}
msgContent, err := json.Marshal(platformIDs)
if err != nil {
log.ZError(ctx, "pushUserIDOnlineStatus json.Marshal", err)
return
}
now := time.Now().UnixMilli()
msgID := idutil.GetMsgIDByMD5(userID)
msg := &sdkws.MsgData{
SendID: userID,
ClientMsgID: msgID,
ServerMsgID: msgID,
SenderPlatformID: constant.AdminPlatformID,
SessionType: constant.NotificationChatType,
ContentType: constant.UserSubscribeOnlineStatusNotification,
Content: msgContent,
SendTime: now,
CreateTime: now,
}
for _, client := range clients {
msg.RecvID = client.UserID
if err := client.PushMessage(ctx, msg); err != nil {
log.ZError(ctx, "UserSubscribeOnlineStatusNotification push failed", err, "userID", client.UserID, "platformID", client.PlatformID, "changeUserID", userID, "content", msgContent)
}
}
}

@ -1,135 +1,185 @@
// Copyright © 2023 OpenIM. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package msggateway
import (
"context"
"sync"
"github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/utils/datautil"
"sync"
"time"
)
type UserMap struct {
m sync.Map
type UserMap interface {
GetAll(userID string) ([]*Client, bool)
Get(userID string, platformID int) ([]*Client, bool, bool)
Set(userID string, v *Client)
DeleteClients(userID string, clients []*Client) (isDeleteUser bool)
UserState() <-chan UserState
GetAllUserStatus(deadline time.Time, nowtime time.Time) []UserState
RecvSubChange(userID string, platformIDs []int32) bool
}
func newUserMap() *UserMap {
return &UserMap{}
type UserState struct {
UserID string
Online []int32
Offline []int32
}
func (u *UserMap) GetAll(key string) ([]*Client, bool) {
allClients, ok := u.m.Load(key)
if ok {
return allClients.([]*Client), ok
}
return nil, ok
type UserPlatform struct {
Time time.Time
Clients []*Client
}
func (u *UserMap) Get(key string, platformID int) ([]*Client, bool, bool) {
allClients, userExisted := u.m.Load(key)
if userExisted {
var clients []*Client
for _, client := range allClients.([]*Client) {
if client.PlatformID == platformID {
clients = append(clients, client)
}
}
if len(clients) > 0 {
return clients, userExisted, true
}
return clients, userExisted, false
func (u *UserPlatform) PlatformIDs() []int32 {
if len(u.Clients) == 0 {
return nil
}
platformIDs := make([]int32, 0, len(u.Clients))
for _, client := range u.Clients {
platformIDs = append(platformIDs, int32(client.PlatformID))
}
return nil, userExisted, false
return platformIDs
}
// Set adds a client to the map.
func (u *UserMap) Set(key string, v *Client) {
allClients, existed := u.m.Load(key)
if existed {
log.ZDebug(context.Background(), "Set existed", "user_id", key, "client_user_id", v.UserID)
oldClients := allClients.([]*Client)
oldClients = append(oldClients, v)
u.m.Store(key, oldClients)
} else {
log.ZDebug(context.Background(), "Set not existed", "user_id", key, "client_user_id", v.UserID)
func (u *UserPlatform) PlatformIDSet() map[int32]struct{} {
if len(u.Clients) == 0 {
return nil
}
platformIDs := make(map[int32]struct{})
for _, client := range u.Clients {
platformIDs[int32(client.PlatformID)] = struct{}{}
}
return platformIDs
}
var clients []*Client
clients = append(clients, v)
u.m.Store(key, clients)
func newUserMap() UserMap {
return &userMap{
data: make(map[string]*UserPlatform),
ch: make(chan UserState, 10000),
}
}
func (u *UserMap) delete(key string, connRemoteAddr string) (isDeleteUser bool) {
// Attempt to load the clients associated with the key.
allClients, existed := u.m.Load(key)
if !existed {
// Return false immediately if the key does not exist.
type userMap struct {
lock sync.RWMutex
data map[string]*UserPlatform
ch chan UserState
}
func (u *userMap) RecvSubChange(userID string, platformIDs []int32) bool {
u.lock.RLock()
defer u.lock.RUnlock()
result, ok := u.data[userID]
if !ok {
return false
}
// Convert allClients to a slice of *Client.
oldClients := allClients.([]*Client)
var remainingClients []*Client
for _, client := range oldClients {
// Keep clients that do not match the connRemoteAddr.
if client.ctx.GetRemoteAddr() != connRemoteAddr {
remainingClients = append(remainingClients, client)
}
localPlatformIDs := result.PlatformIDSet()
for _, platformID := range platformIDs {
delete(localPlatformIDs, platformID)
}
if len(localPlatformIDs) == 0 {
return false
}
u.push(userID, result, nil)
return true
}
// If no clients remain after filtering, delete the key from the map.
if len(remainingClients) == 0 {
u.m.Delete(key)
func (u *userMap) push(userID string, userPlatform *UserPlatform, offline []int32) bool {
select {
case u.ch <- UserState{UserID: userID, Online: userPlatform.PlatformIDs(), Offline: offline}:
userPlatform.Time = time.Now()
return true
default:
return false
}
}
// Otherwise, update the key with the remaining clients.
u.m.Store(key, remainingClients)
return false
func (u *userMap) GetAll(userID string) ([]*Client, bool) {
u.lock.RLock()
defer u.lock.RUnlock()
result, ok := u.data[userID]
if !ok {
return nil, false
}
return result.Clients, true
}
func (u *UserMap) deleteClients(key string, clients []*Client) (isDeleteUser bool) {
m := datautil.SliceToMapAny(clients, func(c *Client) (string, struct{}) {
return c.ctx.GetRemoteAddr(), struct{}{}
})
allClients, existed := u.m.Load(key)
if !existed {
// If the key doesn't exist, return false.
return false
func (u *userMap) Get(userID string, platformID int) ([]*Client, bool, bool) {
u.lock.RLock()
defer u.lock.RUnlock()
result, ok := u.data[userID]
if !ok {
return nil, false, false
}
var clients []*Client
for _, client := range result.Clients {
if client.PlatformID == platformID {
clients = append(clients, client)
}
}
return clients, true, len(clients) > 0
}
// Filter out clients that are in the deleteMap.
oldClients := allClients.([]*Client)
var remainingClients []*Client
for _, client := range oldClients {
if _, shouldBeDeleted := m[client.ctx.GetRemoteAddr()]; !shouldBeDeleted {
remainingClients = append(remainingClients, client)
func (u *userMap) Set(userID string, client *Client) {
u.lock.Lock()
defer u.lock.Unlock()
result, ok := u.data[userID]
if ok {
result.Clients = append(result.Clients, client)
} else {
result = &UserPlatform{
Clients: []*Client{client},
}
u.data[userID] = result
}
u.push(client.UserID, result, nil)
}
// Update or delete the key based on the remaining clients.
if len(remainingClients) == 0 {
u.m.Delete(key)
return true
func (u *userMap) DeleteClients(userID string, clients []*Client) (isDeleteUser bool) {
if len(clients) == 0 {
return false
}
u.lock.Lock()
defer u.lock.Unlock()
result, ok := u.data[userID]
if !ok {
return false
}
offline := make([]int32, 0, len(clients))
deleteAddr := datautil.SliceSetAny(clients, func(client *Client) string {
return client.ctx.GetRemoteAddr()
})
tmp := result.Clients
result.Clients = result.Clients[:0]
for _, client := range tmp {
if _, delCli := deleteAddr[client.ctx.GetRemoteAddr()]; delCli {
offline = append(offline, int32(client.PlatformID))
} else {
result.Clients = append(result.Clients, client)
}
}
defer u.push(userID, result, offline)
if len(result.Clients) > 0 {
return false
}
delete(u.data, userID)
return true
}
u.m.Store(key, remainingClients)
return false
func (u *userMap) GetAllUserStatus(deadline time.Time, nowtime time.Time) []UserState {
u.lock.RLock()
defer u.lock.RUnlock()
result := make([]UserState, 0, len(u.data))
for userID, userPlatform := range u.data {
if userPlatform.Time.Before(deadline) {
continue
}
userPlatform.Time = nowtime
online := make([]int32, 0, len(userPlatform.Clients))
for _, client := range userPlatform.Clients {
online = append(online, int32(client.PlatformID))
}
result = append(result, UserState{UserID: userID, Online: online})
}
return result
}
func (u *UserMap) DeleteAll(key string) {
u.m.Delete(key)
func (u *userMap) UserState() <-chan UserState {
return u.ch
}

@ -18,6 +18,7 @@ import (
"context"
"fmt"
"github.com/openimsdk/open-im-server/v3/pkg/common/webhook"
"github.com/openimsdk/open-im-server/v3/pkg/rpccache"
pbAuth "github.com/openimsdk/protocol/auth"
"github.com/openimsdk/tools/mcontext"
"net/http"
@ -48,6 +49,7 @@ type LongConnServer interface {
KickUserConn(client *Client) error
UnRegister(c *Client)
SetKickHandlerInfo(i *kickHandler)
SubUserOnlineStatus(ctx context.Context, client *Client, data *Req) ([]byte, error)
Compressor
Encoder
MessageHandler
@ -60,7 +62,9 @@ type WsServer struct {
registerChan chan *Client
unregisterChan chan *Client
kickHandlerChan chan *kickHandler
clients *UserMap
clients UserMap
online *rpccache.OnlineCache
subscription *Subscription
clientPool sync.Pool
onlineUserNum atomic.Int64
onlineUserConnNum atomic.Int64
@ -90,18 +94,18 @@ func (ws *WsServer) SetDiscoveryRegistry(disCov discovery.SvcDiscoveryRegistry,
ws.disCov = disCov
}
func (ws *WsServer) SetUserOnlineStatus(ctx context.Context, client *Client, status int32) {
err := ws.userClient.SetUserStatus(ctx, client.UserID, status, client.PlatformID)
if err != nil {
log.ZWarn(ctx, "SetUserStatus err", err)
}
switch status {
case constant.Online:
ws.webhookAfterUserOnline(ctx, &ws.msgGatewayConfig.WebhooksConfig.AfterUserOnline, client.UserID, client.PlatformID, client.IsBackground, client.ctx.GetConnID())
case constant.Offline:
ws.webhookAfterUserOffline(ctx, &ws.msgGatewayConfig.WebhooksConfig.AfterUserOffline, client.UserID, client.PlatformID, client.ctx.GetConnID())
}
}
//func (ws *WsServer) SetUserOnlineStatus(ctx context.Context, client *Client, status int32) {
// err := ws.userClient.SetUserStatus(ctx, client.UserID, status, client.PlatformID)
// if err != nil {
// log.ZWarn(ctx, "SetUserStatus err", err)
// }
// switch status {
// case constant.Online:
// ws.webhookAfterUserOnline(ctx, &ws.msgGatewayConfig.WebhooksConfig.AfterUserOnline, client.UserID, client.PlatformID, client.IsBackground, client.ctx.GetConnID())
// case constant.Offline:
// ws.webhookAfterUserOffline(ctx, &ws.msgGatewayConfig.WebhooksConfig.AfterUserOffline, client.UserID, client.PlatformID, client.ctx.GetConnID())
// }
//}
func (ws *WsServer) UnRegister(c *Client) {
ws.unregisterChan <- c
@ -119,11 +123,13 @@ func (ws *WsServer) GetUserPlatformCons(userID string, platform int) ([]*Client,
return ws.clients.Get(userID, platform)
}
func NewWsServer(msgGatewayConfig *Config, opts ...Option) (*WsServer, error) {
func NewWsServer(msgGatewayConfig *Config, opts ...Option) *WsServer {
var config configs
for _, o := range opts {
o(&config)
}
//userRpcClient := rpcclient.NewUserRpcClient(client, config.Share.RpcRegisterName.User, config.Share.IMAdminUserID)
v := validator.New()
return &WsServer{
msgGatewayConfig: msgGatewayConfig,
@ -141,10 +147,11 @@ func NewWsServer(msgGatewayConfig *Config, opts ...Option) (*WsServer, error) {
kickHandlerChan: make(chan *kickHandler, 1000),
validate: v,
clients: newUserMap(),
subscription: newSubscription(),
Compressor: NewGzipCompressor(),
Encoder: NewGobEncoder(),
webhookClient: webhook.NewWebhookClient(msgGatewayConfig.WebhooksConfig.URL),
}, nil
}
}
func (ws *WsServer) Run(done chan error) error {
@ -278,11 +285,11 @@ func (ws *WsServer) registerClient(client *Client) {
}()
}
wg.Add(1)
go func() {
defer wg.Done()
ws.SetUserOnlineStatus(client.ctx, client, constant.Online)
}()
//wg.Add(1)
//go func() {
// defer wg.Done()
// ws.SetUserOnlineStatus(client.ctx, client, constant.Online)
//}()
wg.Wait()
@ -309,7 +316,7 @@ func getRemoteAdders(client []*Client) string {
}
func (ws *WsServer) KickUserConn(client *Client) error {
ws.clients.deleteClients(client.UserID, []*Client{client})
ws.clients.DeleteClients(client.UserID, []*Client{client})
return client.KickOnlineMessage()
}
@ -325,7 +332,7 @@ func (ws *WsServer) multiTerminalLoginChecker(clientOK bool, oldClients []*Clien
if !clientOK {
return
}
ws.clients.deleteClients(newClient.UserID, oldClients)
ws.clients.DeleteClients(newClient.UserID, oldClients)
for _, c := range oldClients {
err := c.KickOnlineMessage()
if err != nil {
@ -345,13 +352,16 @@ func (ws *WsServer) multiTerminalLoginChecker(clientOK bool, oldClients []*Clien
func (ws *WsServer) unregisterClient(client *Client) {
defer ws.clientPool.Put(client)
isDeleteUser := ws.clients.delete(client.UserID, client.ctx.GetRemoteAddr())
isDeleteUser := ws.clients.DeleteClients(client.UserID, []*Client{client})
if isDeleteUser {
ws.onlineUserNum.Add(-1)
prommetrics.OnlineUserGauge.Dec()
}
ws.onlineUserConnNum.Add(-1)
ws.SetUserOnlineStatus(client.ctx, client, constant.Offline)
client.subLock.Lock()
clear(client.subUserIDs)
client.subLock.Unlock()
//ws.SetUserOnlineStatus(client.ctx, client, constant.Offline)
log.ZInfo(client.ctx, "user offline", "close reason", client.closedErr, "online user Num",
ws.onlineUserNum.Load(), "online user conn Num",
ws.onlineUserConnNum.Load(),

@ -17,6 +17,7 @@ package msgtransfer
import (
"context"
"fmt"
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
"github.com/openimsdk/tools/db/mongoutil"
@ -29,16 +30,12 @@ import (
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
kdisc "github.com/openimsdk/open-im-server/v3/pkg/common/discoveryregister"
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
"github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/mw"
"github.com/openimsdk/tools/system/program"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/collectors"
"github.com/prometheus/client_golang/prometheus/promhttp"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
)
@ -82,12 +79,21 @@ func Start(ctx context.Context, index int, config *Config) error {
client.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"LoadBalancingPolicy": "%s"}`, "round_robin")))
msgModel := redis.NewMsgCache(rdb)
seqModel := redis.NewSeqCache(rdb)
msgDocModel, err := mgo.NewMsgMongo(mgocli.GetDB())
if err != nil {
return err
}
msgDatabase, err := controller.NewCommonMsgDatabase(msgDocModel, msgModel, seqModel, &config.KafkaConfig)
seqConversation, err := mgo.NewSeqConversationMongo(mgocli.GetDB())
if err != nil {
return err
}
seqConversationCache := redis.NewSeqConversationCacheRedis(rdb, seqConversation)
seqUser, err := mgo.NewSeqUserMongo(mgocli.GetDB())
if err != nil {
return err
}
seqUserCache := redis.NewSeqUserCacheRedis(rdb, seqUser)
msgDatabase, err := controller.NewCommonMsgDatabase(msgDocModel, msgModel, seqUserCache, seqConversationCache, &config.KafkaConfig)
if err != nil {
return err
}
@ -130,14 +136,8 @@ func (m *MsgTransfer) Start(index int, config *Config) error {
netDone <- struct{}{}
return
}
proreg := prometheus.NewRegistry()
proreg.MustRegister(
collectors.NewGoCollector(),
)
proreg.MustRegister(prommetrics.GetGrpcCusMetrics("Transfer", &config.Share)...)
http.Handle("/metrics", promhttp.HandlerFor(proreg, promhttp.HandlerOpts{Registry: proreg}))
err = http.ListenAndServe(fmt.Sprintf(":%d", prometheusPort), nil)
if err != nil && err != http.ErrServerClosed {
if err := prommetrics.TransferInit(prometheusPort); err != nil && err != http.ErrServerClosed {
netErr = errs.WrapMsg(err, "prometheus start error", "prometheusPort", prometheusPort)
netDone <- struct{}{}
}

@ -28,6 +28,7 @@ import (
"github.com/openimsdk/open-im-server/v3/pkg/util/conversationutil"
"github.com/openimsdk/protocol/constant"
pbchat "github.com/openimsdk/protocol/msg"
"github.com/openimsdk/protocol/msggateway"
pbpush "github.com/openimsdk/protocol/push"
"github.com/openimsdk/protocol/sdkws"
"github.com/openimsdk/tools/discovery"
@ -45,6 +46,7 @@ type ConsumerHandler struct {
pushConsumerGroup *kafka.MConsumerGroup
offlinePusher offlinepush.OfflinePusher
onlinePusher OnlinePusher
onlineCache *rpccache.OnlineCache
groupLocalCache *rpccache.GroupLocalCache
conversationLocalCache *rpccache.ConversationLocalCache
msgRpcClient rpcclient.MessageRpcClient
@ -63,16 +65,17 @@ func NewConsumerHandler(config *Config, offlinePusher offlinepush.OfflinePusher,
if err != nil {
return nil, err
}
userRpcClient := rpcclient.NewUserRpcClient(client, config.Share.RpcRegisterName.User, config.Share.IMAdminUserID)
consumerHandler.offlinePusher = offlinePusher
consumerHandler.onlinePusher = NewOnlinePusher(client, config)
consumerHandler.groupRpcClient = rpcclient.NewGroupRpcClient(client, config.Share.RpcRegisterName.Group)
consumerHandler.groupLocalCache = rpccache.NewGroupLocalCache(consumerHandler.groupRpcClient, &config.LocalCacheConfig, rdb)
consumerHandler.msgRpcClient = rpcclient.NewMessageRpcClient(client, config.Share.RpcRegisterName.Msg)
consumerHandler.conversationRpcClient = rpcclient.NewConversationRpcClient(client, config.Share.RpcRegisterName.Conversation)
consumerHandler.conversationLocalCache = rpccache.NewConversationLocalCache(consumerHandler.conversationRpcClient,
&config.LocalCacheConfig, rdb)
consumerHandler.conversationLocalCache = rpccache.NewConversationLocalCache(consumerHandler.conversationRpcClient, &config.LocalCacheConfig, rdb)
consumerHandler.webhookClient = webhook.NewWebhookClient(config.WebhooksConfig.URL)
consumerHandler.config = config
consumerHandler.onlineCache = rpccache.NewOnlineCache(userRpcClient, consumerHandler.groupLocalCache, rdb, nil)
return &consumerHandler, nil
}
@ -125,12 +128,12 @@ func (c *ConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSession, claim s
}
// Push2User Suitable for two types of conversations, one is SingleChatType and the other is NotificationChatType.
func (c *ConsumerHandler) Push2User(ctx context.Context, userIDs []string, msg *sdkws.MsgData) error {
func (c *ConsumerHandler) Push2User(ctx context.Context, userIDs []string, msg *sdkws.MsgData) (err error) {
log.ZDebug(ctx, "Get msg from msg_transfer And push msg", "userIDs", userIDs, "msg", msg.String())
if err := c.webhookBeforeOnlinePush(ctx, &c.config.WebhooksConfig.BeforeOnlinePush, userIDs, msg); err != nil {
return err
}
wsResults, err := c.onlinePusher.GetConnsAndOnlinePush(ctx, msg, userIDs)
wsResults, err := c.GetConnsAndOnlinePush(ctx, msg, userIDs)
if err != nil {
return err
}
@ -179,6 +182,38 @@ func (c *ConsumerHandler) shouldPushOffline(_ context.Context, msg *sdkws.MsgDat
return true
}
func (c *ConsumerHandler) GetConnsAndOnlinePush(ctx context.Context, msg *sdkws.MsgData, pushToUserIDs []string) ([]*msggateway.SingleMsgToUserResults, error) {
var (
onlineUserIDs []string
offlineUserIDs []string
)
for _, userID := range pushToUserIDs {
online, err := c.onlineCache.GetUserOnline(ctx, userID)
if err != nil {
return nil, err
}
if online {
onlineUserIDs = append(onlineUserIDs, userID)
} else {
offlineUserIDs = append(offlineUserIDs, userID)
}
}
var result []*msggateway.SingleMsgToUserResults
if len(onlineUserIDs) > 0 {
var err error
result, err = c.onlinePusher.GetConnsAndOnlinePush(ctx, msg, pushToUserIDs)
if err != nil {
return nil, err
}
}
for _, userID := range offlineUserIDs {
result = append(result, &msggateway.SingleMsgToUserResults{
UserID: userID,
})
}
return result, nil
}
func (c *ConsumerHandler) Push2Group(ctx context.Context, groupID string, msg *sdkws.MsgData) (err error) {
log.ZDebug(ctx, "Get group msg from msg_transfer and push msg", "msg", msg.String(), "groupID", groupID)
var pushToUserIDs []string
@ -192,7 +227,7 @@ func (c *ConsumerHandler) Push2Group(ctx context.Context, groupID string, msg *s
return err
}
wsResults, err := c.onlinePusher.GetConnsAndOnlinePush(ctx, msg, pushToUserIDs)
wsResults, err := c.GetConnsAndOnlinePush(ctx, msg, pushToUserIDs)
if err != nil {
return err
}

@ -86,12 +86,21 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
return err
}
msgModel := redis.NewMsgCache(rdb)
seqModel := redis.NewSeqCache(rdb)
conversationClient := rpcclient.NewConversationRpcClient(client, config.Share.RpcRegisterName.Conversation)
userRpcClient := rpcclient.NewUserRpcClient(client, config.Share.RpcRegisterName.User, config.Share.IMAdminUserID)
groupRpcClient := rpcclient.NewGroupRpcClient(client, config.Share.RpcRegisterName.Group)
friendRpcClient := rpcclient.NewFriendRpcClient(client, config.Share.RpcRegisterName.Friend)
msgDatabase, err := controller.NewCommonMsgDatabase(msgDocModel, msgModel, seqModel, &config.KafkaConfig)
seqConversation, err := mgo.NewSeqConversationMongo(mgocli.GetDB())
if err != nil {
return err
}
seqConversationCache := redis.NewSeqConversationCacheRedis(rdb, seqConversation)
seqUser, err := mgo.NewSeqUserMongo(mgocli.GetDB())
if err != nil {
return err
}
seqUserCache := redis.NewSeqUserCacheRedis(rdb, seqUser)
msgDatabase, err := controller.NewCommonMsgDatabase(msgDocModel, msgModel, seqUserCache, seqConversationCache, &config.KafkaConfig)
if err != nil {
return err
}

@ -19,13 +19,17 @@ import (
"encoding/base64"
"encoding/hex"
"encoding/json"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
"path"
"strconv"
"time"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
"go.mongodb.org/mongo-driver/mongo"
"github.com/google/uuid"
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
"github.com/openimsdk/protocol/sdkws"
"github.com/openimsdk/protocol/third"
"github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log"
@ -283,6 +287,52 @@ func (t *thirdServer) apiAddress(prefix, name string) string {
return prefix + name
}
func (t *thirdServer) DeleteOutdatedData(ctx context.Context, req *third.DeleteOutdatedDataReq) (*third.DeleteOutdatedDataResp, error) {
var conf config.Third
expireTime := time.UnixMilli(req.ExpireTime)
findPagination := &sdkws.RequestPagination{
PageNumber: 1,
ShowNumber: 1000,
}
for {
total, models, err := t.s3dataBase.FindByExpires(ctx, expireTime, findPagination)
if err != nil && errs.Unwrap(err) != mongo.ErrNoDocuments {
return nil, errs.Wrap(err)
}
needDelObjectKeys := make([]string, 0)
for _, model := range models {
needDelObjectKeys = append(needDelObjectKeys, model.Key)
}
needDelObjectKeys = datautil.Distinct(needDelObjectKeys)
for _, key := range needDelObjectKeys {
count, err := t.s3dataBase.FindNotDelByS3(ctx, key, expireTime)
if err != nil && errs.Unwrap(err) != mongo.ErrNoDocuments {
return nil, errs.Wrap(err)
}
if int(count) < 1 && t.minio != nil {
thumbnailKey, err := t.getMinioImageThumbnailKey(ctx, key)
if err != nil {
return nil, errs.Wrap(err)
}
t.s3dataBase.DeleteObject(ctx, thumbnailKey)
t.s3dataBase.DelS3Key(ctx, conf.Object.Enable, needDelObjectKeys...)
t.s3dataBase.DeleteObject(ctx, key)
}
}
for _, model := range models {
err := t.s3dataBase.DeleteSpecifiedData(ctx, model.Engine, model.Name)
if err != nil {
return nil, errs.Wrap(err)
}
}
if total < int64(findPagination.ShowNumber) {
break
}
}
return &third.DeleteOutdatedDataResp{}, nil
}
type FormDataMate struct {
Name string `json:"name"`
Size int64 `json:"size"`
@ -290,8 +340,3 @@ type FormDataMate struct {
Group string `json:"group"`
Key string `json:"key"`
}
func (t *thirdServer) DeleteOutdatedData(ctx context.Context, req *third.DeleteOutdatedDataReq) (*third.DeleteOutdatedDataResp, error) {
//TODO implement me
panic("implement me")
}

@ -31,9 +31,9 @@ import (
"github.com/openimsdk/tools/discovery"
"github.com/openimsdk/tools/s3"
"github.com/openimsdk/tools/s3/cos"
"github.com/openimsdk/tools/s3/kodo"
"github.com/openimsdk/tools/s3/minio"
"github.com/openimsdk/tools/s3/oss"
"github.com/openimsdk/tools/s3/kodo"
"google.golang.org/grpc"
)
@ -43,6 +43,7 @@ type thirdServer struct {
userRpcClient rpcclient.UserRpcClient
defaultExpire time.Duration
config *Config
minio *minio.Minio
}
type Config struct {
@ -75,10 +76,14 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
}
// Select the oss method according to the profile policy
enable := config.RpcConfig.Object.Enable
var o s3.Interface
var (
o s3.Interface
minioCli *minio.Minio
)
switch enable {
case "minio":
o, err = minio.NewMinio(ctx, redis.NewMinioCache(rdb), *config.MinioConfig.Build())
minioCli, err = minio.NewMinio(ctx, redis.NewMinioCache(rdb), *config.MinioConfig.Build())
o = minioCli
case "cos":
o, err = cos.NewCos(*config.RpcConfig.Object.Cos.Build())
case "oss":
@ -98,10 +103,15 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
s3dataBase: controller.NewS3Database(rdb, o, s3db),
defaultExpire: time.Hour * 24 * 7,
config: config,
minio: minioCli,
})
return nil
}
func (t *thirdServer) getMinioImageThumbnailKey(ctx context.Context, name string) (string, error) {
return t.minio.GetImageThumbnailKey(ctx, name)
}
func (t *thirdServer) FcmUpdateToken(ctx context.Context, req *third.FcmUpdateTokenReq) (resp *third.FcmUpdateTokenResp, err error) {
err = t.thirdDatabase.FcmUpdateToken(ctx, req.Account, int(req.PlatformID), req.FcmToken, req.ExpireTime)
if err != nil {

@ -0,0 +1,122 @@
package user
import (
"context"
"github.com/openimsdk/protocol/constant"
"github.com/openimsdk/protocol/sdkws"
pbuser "github.com/openimsdk/protocol/user"
)
func (s *userServer) getUserOnlineStatus(ctx context.Context, userID string) (*pbuser.OnlineStatus, error) {
platformIDs, err := s.online.GetOnline(ctx, userID)
if err != nil {
return nil, err
}
status := pbuser.OnlineStatus{
UserID: userID,
PlatformIDs: platformIDs,
}
if len(platformIDs) > 0 {
status.Status = constant.Online
} else {
status.Status = constant.Offline
}
return &status, nil
}
func (s *userServer) getUsersOnlineStatus(ctx context.Context, userIDs []string) ([]*pbuser.OnlineStatus, error) {
res := make([]*pbuser.OnlineStatus, 0, len(userIDs))
for _, userID := range userIDs {
status, err := s.getUserOnlineStatus(ctx, userID)
if err != nil {
return nil, err
}
res = append(res, status)
}
return res, nil
}
// SubscribeOrCancelUsersStatus Subscribe online or cancel online users.
func (s *userServer) SubscribeOrCancelUsersStatus(ctx context.Context, req *pbuser.SubscribeOrCancelUsersStatusReq) (*pbuser.SubscribeOrCancelUsersStatusResp, error) {
if req.Genre == constant.SubscriberUser {
err := s.db.SubscribeUsersStatus(ctx, req.UserID, req.UserIDs)
if err != nil {
return nil, err
}
var status []*pbuser.OnlineStatus
status, err = s.getUsersOnlineStatus(ctx, req.UserIDs)
if err != nil {
return nil, err
}
return &pbuser.SubscribeOrCancelUsersStatusResp{StatusList: status}, nil
} else if req.Genre == constant.Unsubscribe {
err := s.db.UnsubscribeUsersStatus(ctx, req.UserID, req.UserIDs)
if err != nil {
return nil, err
}
}
return &pbuser.SubscribeOrCancelUsersStatusResp{}, nil
}
// GetUserStatus Get the online status of the user.
func (s *userServer) GetUserStatus(ctx context.Context, req *pbuser.GetUserStatusReq) (*pbuser.GetUserStatusResp, error) {
res, err := s.getUsersOnlineStatus(ctx, req.UserIDs)
if err != nil {
return nil, err
}
return &pbuser.GetUserStatusResp{StatusList: res}, nil
}
// SetUserStatus Synchronize user's online status.
func (s *userServer) SetUserStatus(ctx context.Context, req *pbuser.SetUserStatusReq) (*pbuser.SetUserStatusResp, error) {
var (
online []int32
offline []int32
)
switch req.Status {
case constant.Online:
online = []int32{req.PlatformID}
case constant.Offline:
online = []int32{req.PlatformID}
}
if err := s.online.SetUserOnline(ctx, req.UserID, online, offline); err != nil {
return nil, err
}
list, err := s.db.GetSubscribedList(ctx, req.UserID)
if err != nil {
return nil, err
}
for _, userID := range list {
tips := &sdkws.UserStatusChangeTips{
FromUserID: req.UserID,
ToUserID: userID,
Status: req.Status,
PlatformID: req.PlatformID,
}
s.userNotificationSender.UserStatusChangeNotification(ctx, tips)
}
return &pbuser.SetUserStatusResp{}, nil
}
// GetSubscribeUsersStatus Get the online status of subscribers.
func (s *userServer) GetSubscribeUsersStatus(ctx context.Context, req *pbuser.GetSubscribeUsersStatusReq) (*pbuser.GetSubscribeUsersStatusResp, error) {
userList, err := s.db.GetAllSubscribeList(ctx, req.UserID)
if err != nil {
return nil, err
}
onlineStatusList, err := s.getUsersOnlineStatus(ctx, userList)
if err != nil {
return nil, err
}
return &pbuser.GetSubscribeUsersStatusResp{StatusList: onlineStatusList}, nil
}
func (s *userServer) SetUserOnlineStatus(ctx context.Context, req *pbuser.SetUserOnlineStatusReq) (*pbuser.SetUserOnlineStatusResp, error) {
for _, status := range req.Status {
if err := s.online.SetUserOnline(ctx, status.UserID, status.Online, status.Offline); err != nil {
return nil, err
}
}
return &pbuser.SetUserOnlineStatusResp{}, nil
}

@ -19,6 +19,7 @@ import (
"errors"
"github.com/openimsdk/open-im-server/v3/internal/rpc/friend"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
tablerelation "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
@ -50,6 +51,7 @@ import (
)
type userServer struct {
online cache.OnlineCache
db controller.UserDatabase
friendNotificationSender *friend.FriendNotificationSender
userNotificationSender *UserNotificationSender
@ -98,6 +100,7 @@ func Start(ctx context.Context, config *Config, client registry.SvcDiscoveryRegi
msgRpcClient := rpcclient.NewMessageRpcClient(client, config.Share.RpcRegisterName.Msg)
localcache.InitLocalCache(&config.LocalCacheConfig)
u := &userServer{
online: redis.NewUserOnline(rdb),
db: database,
RegisterCenter: client,
friendRpcClient: &friendRpcClient,
@ -329,76 +332,6 @@ func (s *userServer) GetAllUserID(ctx context.Context, req *pbuser.GetAllUserIDR
return &pbuser.GetAllUserIDResp{Total: int32(total), UserIDs: userIDs}, nil
}
// SubscribeOrCancelUsersStatus Subscribe online or cancel online users.
func (s *userServer) SubscribeOrCancelUsersStatus(ctx context.Context, req *pbuser.SubscribeOrCancelUsersStatusReq) (resp *pbuser.SubscribeOrCancelUsersStatusResp, err error) {
if req.Genre == constant.SubscriberUser {
err = s.db.SubscribeUsersStatus(ctx, req.UserID, req.UserIDs)
if err != nil {
return nil, err
}
var status []*pbuser.OnlineStatus
status, err = s.db.GetUserStatus(ctx, req.UserIDs)
if err != nil {
return nil, err
}
return &pbuser.SubscribeOrCancelUsersStatusResp{StatusList: status}, nil
} else if req.Genre == constant.Unsubscribe {
err = s.db.UnsubscribeUsersStatus(ctx, req.UserID, req.UserIDs)
if err != nil {
return nil, err
}
}
return &pbuser.SubscribeOrCancelUsersStatusResp{}, nil
}
// GetUserStatus Get the online status of the user.
func (s *userServer) GetUserStatus(ctx context.Context, req *pbuser.GetUserStatusReq) (resp *pbuser.GetUserStatusResp,
err error) {
onlineStatusList, err := s.db.GetUserStatus(ctx, req.UserIDs)
if err != nil {
return nil, err
}
return &pbuser.GetUserStatusResp{StatusList: onlineStatusList}, nil
}
// SetUserStatus Synchronize user's online status.
func (s *userServer) SetUserStatus(ctx context.Context, req *pbuser.SetUserStatusReq) (resp *pbuser.SetUserStatusResp,
err error) {
err = s.db.SetUserStatus(ctx, req.UserID, req.Status, req.PlatformID)
if err != nil {
return nil, err
}
list, err := s.db.GetSubscribedList(ctx, req.UserID)
if err != nil {
return nil, err
}
for _, userID := range list {
tips := &sdkws.UserStatusChangeTips{
FromUserID: req.UserID,
ToUserID: userID,
Status: req.Status,
PlatformID: req.PlatformID,
}
s.userNotificationSender.UserStatusChangeNotification(ctx, tips)
}
return &pbuser.SetUserStatusResp{}, nil
}
// GetSubscribeUsersStatus Get the online status of subscribers.
func (s *userServer) GetSubscribeUsersStatus(ctx context.Context,
req *pbuser.GetSubscribeUsersStatusReq) (*pbuser.GetSubscribeUsersStatusResp, error) {
userList, err := s.db.GetAllSubscribeList(ctx, req.UserID)
if err != nil {
return nil, err
}
onlineStatusList, err := s.db.GetUserStatus(ctx, userList)
if err != nil {
return nil, err
}
return &pbuser.GetSubscribeUsersStatusResp{StatusList: onlineStatusList}, nil
}
// ProcessUserCommandAdd user general function add.
func (s *userServer) ProcessUserCommandAdd(ctx context.Context, req *pbuser.ProcessUserCommandAddReq) (*pbuser.ProcessUserCommandAddResp, error) {
err := authverify.CheckAccessV3(ctx, req.UserID, s.config.Share.IMAdminUserID)
@ -736,8 +669,3 @@ func (s *userServer) SortQuery(ctx context.Context, req *pbuser.SortQueryReq) (*
}
return &pbuser.SortQueryResp{Users: convert.UsersDB2Pb(users)}, nil
}
func (s *userServer) SetUserOnlineStatus(ctx context.Context, req *pbuser.SetUserOnlineStatusReq) (*pbuser.SetUserOnlineStatusResp, error) {
//TODO implement me
panic("implement me")
}

@ -20,6 +20,7 @@ import (
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
kdisc "github.com/openimsdk/open-im-server/v3/pkg/common/discoveryregister"
"github.com/openimsdk/protocol/msg"
"github.com/openimsdk/protocol/third"
"github.com/openimsdk/tools/mcontext"
"github.com/openimsdk/tools/mw"
"google.golang.org/grpc"
@ -39,7 +40,7 @@ type CronTaskConfig struct {
}
func Start(ctx context.Context, config *CronTaskConfig) error {
log.CInfo(ctx, "CRON-TASK server is initializing", "chatRecordsClearTime", config.CronTask.ChatRecordsClearTime, "msgDestructTime", config.CronTask.RetainChatRecords)
log.CInfo(ctx, "CRON-TASK server is initializing", "chatRecordsClearTime", config.CronTask.CronExecuteTime, "msgDestructTime", config.CronTask.RetainChatRecords)
if config.CronTask.RetainChatRecords < 1 {
return errs.New("msg destruct time must be greater than 1").Wrap()
}
@ -66,10 +67,31 @@ func Start(ctx context.Context, config *CronTaskConfig) error {
}
log.ZInfo(ctx, "cron clear chat records success", "deltime", deltime, "cont", time.Since(now))
}
if _, err := crontab.AddFunc(config.CronTask.ChatRecordsClearTime, clearFunc); err != nil {
if _, err := crontab.AddFunc(config.CronTask.CronExecuteTime, clearFunc); err != nil {
return errs.Wrap(err)
}
log.ZInfo(ctx, "start cron task", "chatRecordsClearTime", config.CronTask.ChatRecordsClearTime)
tConn, err := client.GetConn(ctx, config.Share.RpcRegisterName.Third)
if err != nil {
return err
}
thirdClient := third.NewThirdClient(tConn)
deleteFunc := func() {
now := time.Now()
deleteTime := now.Add(-time.Hour * 24 * time.Duration(config.CronTask.FileExpireTime))
ctx := mcontext.SetOperationID(ctx, fmt.Sprintf("cron_%d_%d", os.Getpid(), deleteTime.UnixMilli()))
log.ZInfo(ctx, "deleteoutDatedData ", "deletetime", deleteTime, "timestamp", deleteTime.UnixMilli())
if _, err := thirdClient.DeleteOutdatedData(ctx, &third.DeleteOutdatedDataReq{ExpireTime: deleteTime.UnixMilli()}); err != nil {
log.ZError(ctx, "cron deleteoutDatedData failed", err, "deleteTime", deleteTime, "cont", time.Since(now))
return
}
log.ZInfo(ctx, "cron deleteoutDatedData success", "deltime", deleteTime, "cont", time.Since(now))
}
if _, err := crontab.AddFunc(config.CronTask.CronExecuteTime, deleteFunc); err != nil {
return errs.Wrap(err)
}
log.ZInfo(ctx, "start cron task", "CronExecuteTime", config.CronTask.CronExecuteTime)
crontab.Start()
<-ctx.Done()
return nil

@ -15,7 +15,7 @@
package apistruct
import (
sdkws "github.com/openimsdk/protocol/sdkws"
"github.com/openimsdk/protocol/sdkws"
)
// SendMsg defines the structure for sending messages with various metadata.

@ -37,6 +37,7 @@ func NewMsgGatewayCmd() *MsgGatewayCmd {
ret.configMap = map[string]any{
OpenIMMsgGatewayCfgFileName: &msgGatewayConfig.MsgGateway,
ShareFileName: &msgGatewayConfig.Share,
RedisConfigFileName: &msgGatewayConfig.RedisConfig,
WebhooksConfigFileName: &msgGatewayConfig.WebhooksConfig,
DiscoveryConfigFilename: &msgGatewayConfig.Discovery,
}

@ -109,8 +109,9 @@ type API struct {
}
type CronTask struct {
ChatRecordsClearTime string `mapstructure:"chatRecordsClearTime"`
RetainChatRecords int `mapstructure:"retainChatRecords"`
CronExecuteTime string `mapstructure:"cronExecuteTime"`
RetainChatRecords int `mapstructure:"retainChatRecords"`
FileExpireTime int `mapstructure:"fileExpireTime"`
}
type OfflinePushConfig struct {

@ -14,430 +14,431 @@
package ginprometheus
import (
"bytes"
"fmt"
"io"
"net/http"
"os"
"strconv"
"time"
"github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
var defaultMetricPath = "/metrics"
// counter, counter_vec, gauge, gauge_vec,
// histogram, histogram_vec, summary, summary_vec.
var (
reqCounter = &Metric{
ID: "reqCnt",
Name: "requests_total",
Description: "How many HTTP requests processed, partitioned by status code and HTTP method.",
Type: "counter_vec",
Args: []string{"code", "method", "handler", "host", "url"}}
reqDuration = &Metric{
ID: "reqDur",
Name: "request_duration_seconds",
Description: "The HTTP request latencies in seconds.",
Type: "histogram_vec",
Args: []string{"code", "method", "url"},
}
resSize = &Metric{
ID: "resSz",
Name: "response_size_bytes",
Description: "The HTTP response sizes in bytes.",
Type: "summary"}
reqSize = &Metric{
ID: "reqSz",
Name: "request_size_bytes",
Description: "The HTTP request sizes in bytes.",
Type: "summary"}
standardMetrics = []*Metric{
reqCounter,
reqDuration,
resSize,
reqSize,
}
)
/*
RequestCounterURLLabelMappingFn is a function which can be supplied to the middleware to control
the cardinality of the request counter's "url" label, which might be required in some contexts.
For instance, if for a "/customer/:name" route you don't want to generate a time series for every
possible customer name, you could use this function:
func(c *gin.Context) string {
url := c.Request.URL.Path
for _, p := range c.Params {
if p.Key == "name" {
url = strings.Replace(url, p.Value, ":name", 1)
break
}
}
return url
}
which would map "/customer/alice" and "/customer/bob" to their template "/customer/:name".
*/
type RequestCounterURLLabelMappingFn func(c *gin.Context) string
// Metric is a definition for the name, description, type, ID, and
// prometheus.Collector type (i.e. CounterVec, Summary, etc) of each metric.
type Metric struct {
MetricCollector prometheus.Collector
ID string
Name string
Description string
Type string
Args []string
}
// Prometheus contains the metrics gathered by the instance and its path.
type Prometheus struct {
reqCnt *prometheus.CounterVec
reqDur *prometheus.HistogramVec
reqSz, resSz prometheus.Summary
router *gin.Engine
listenAddress string
Ppg PrometheusPushGateway
MetricsList []*Metric
MetricsPath string
ReqCntURLLabelMappingFn RequestCounterURLLabelMappingFn
// gin.Context string to use as a prometheus URL label
URLLabelFromContext string
}
// PrometheusPushGateway contains the configuration for pushing to a Prometheus pushgateway (optional).
type PrometheusPushGateway struct {
// Push interval in seconds
PushIntervalSeconds time.Duration
// Push Gateway URL in format http://domain:port
// where JOBNAME can be any string of your choice
PushGatewayURL string
// Local metrics URL where metrics are fetched from, this could be omitted in the future
// if implemented using prometheus common/expfmt instead
MetricsURL string
// pushgateway job name, defaults to "gin"
Job string
}
// NewPrometheus generates a new set of metrics with a certain subsystem name.
func NewPrometheus(subsystem string, customMetricsList ...[]*Metric) *Prometheus {
if subsystem == "" {
subsystem = "app"
}
var metricsList []*Metric
if len(customMetricsList) > 1 {
panic("Too many args. NewPrometheus( string, <optional []*Metric> ).")
} else if len(customMetricsList) == 1 {
metricsList = customMetricsList[0]
}
metricsList = append(metricsList, standardMetrics...)
p := &Prometheus{
MetricsList: metricsList,
MetricsPath: defaultMetricPath,
ReqCntURLLabelMappingFn: func(c *gin.Context) string {
return c.FullPath() // e.g. /user/:id , /user/:id/info
},
}
p.registerMetrics(subsystem)
return p
}
// SetPushGateway sends metrics to a remote pushgateway exposed on pushGatewayURL
// every pushIntervalSeconds. Metrics are fetched from metricsURL.
func (p *Prometheus) SetPushGateway(pushGatewayURL, metricsURL string, pushIntervalSeconds time.Duration) {
p.Ppg.PushGatewayURL = pushGatewayURL
p.Ppg.MetricsURL = metricsURL
p.Ppg.PushIntervalSeconds = pushIntervalSeconds
p.startPushTicker()
}
// SetPushGatewayJob job name, defaults to "gin".
func (p *Prometheus) SetPushGatewayJob(j string) {
p.Ppg.Job = j
}
// SetListenAddress for exposing metrics on address. If not set, it will be exposed at the
// same address of the gin engine that is being used.
func (p *Prometheus) SetListenAddress(address string) {
p.listenAddress = address
if p.listenAddress != "" {
p.router = gin.Default()
}
}
// SetListenAddressWithRouter for using a separate router to expose metrics. (this keeps things like GET /metrics out of
// your content's access log).
func (p *Prometheus) SetListenAddressWithRouter(listenAddress string, r *gin.Engine) {
p.listenAddress = listenAddress
if len(p.listenAddress) > 0 {
p.router = r
}
}
// SetMetricsPath set metrics paths.
func (p *Prometheus) SetMetricsPath(e *gin.Engine) error {
if p.listenAddress != "" {
p.router.GET(p.MetricsPath, prometheusHandler())
return p.runServer()
} else {
e.GET(p.MetricsPath, prometheusHandler())
return nil
}
}
// SetMetricsPathWithAuth set metrics paths with authentication.
func (p *Prometheus) SetMetricsPathWithAuth(e *gin.Engine, accounts gin.Accounts) error {
if p.listenAddress != "" {
p.router.GET(p.MetricsPath, gin.BasicAuth(accounts), prometheusHandler())
return p.runServer()
} else {
e.GET(p.MetricsPath, gin.BasicAuth(accounts), prometheusHandler())
return nil
}
}
func (p *Prometheus) runServer() error {
return p.router.Run(p.listenAddress)
}
func (p *Prometheus) getMetrics() []byte {
response, err := http.Get(p.Ppg.MetricsURL)
if err != nil {
return nil
}
defer response.Body.Close()
body, _ := io.ReadAll(response.Body)
return body
}
var hostname, _ = os.Hostname()
func (p *Prometheus) getPushGatewayURL() string {
if p.Ppg.Job == "" {
p.Ppg.Job = "gin"
}
return p.Ppg.PushGatewayURL + "/metrics/job/" + p.Ppg.Job + "/instance/" + hostname
}
func (p *Prometheus) sendMetricsToPushGateway(metrics []byte) {
req, err := http.NewRequest("POST", p.getPushGatewayURL(), bytes.NewBuffer(metrics))
if err != nil {
return
}
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
fmt.Println("Error sending to push gateway error:", err.Error())
}
resp.Body.Close()
}
func (p *Prometheus) startPushTicker() {
ticker := time.NewTicker(time.Second * p.Ppg.PushIntervalSeconds)
go func() {
for range ticker.C {
p.sendMetricsToPushGateway(p.getMetrics())
}
}()
}
// NewMetric associates prometheus.Collector based on Metric.Type.
func NewMetric(m *Metric, subsystem string) prometheus.Collector {
var metric prometheus.Collector
switch m.Type {
case "counter_vec":
metric = prometheus.NewCounterVec(
prometheus.CounterOpts{
Subsystem: subsystem,
Name: m.Name,
Help: m.Description,
},
m.Args,
)
case "counter":
metric = prometheus.NewCounter(
prometheus.CounterOpts{
Subsystem: subsystem,
Name: m.Name,
Help: m.Description,
},
)
case "gauge_vec":
metric = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Subsystem: subsystem,
Name: m.Name,
Help: m.Description,
},
m.Args,
)
case "gauge":
metric = prometheus.NewGauge(
prometheus.GaugeOpts{
Subsystem: subsystem,
Name: m.Name,
Help: m.Description,
},
)
case "histogram_vec":
metric = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Subsystem: subsystem,
Name: m.Name,
Help: m.Description,
},
m.Args,
)
case "histogram":
metric = prometheus.NewHistogram(
prometheus.HistogramOpts{
Subsystem: subsystem,
Name: m.Name,
Help: m.Description,
},
)
case "summary_vec":
metric = prometheus.NewSummaryVec(
prometheus.SummaryOpts{
Subsystem: subsystem,
Name: m.Name,
Help: m.Description,
},
m.Args,
)
case "summary":
metric = prometheus.NewSummary(
prometheus.SummaryOpts{
Subsystem: subsystem,
Name: m.Name,
Help: m.Description,
},
)
}
return metric
}
func (p *Prometheus) registerMetrics(subsystem string) {
for _, metricDef := range p.MetricsList {
metric := NewMetric(metricDef, subsystem)
if err := prometheus.Register(metric); err != nil {
fmt.Println("could not be registered in Prometheus,metricDef.Name:", metricDef.Name, " error:", err.Error())
}
switch metricDef {
case reqCounter:
p.reqCnt = metric.(*prometheus.CounterVec)
case reqDuration:
p.reqDur = metric.(*prometheus.HistogramVec)
case resSize:
p.resSz = metric.(prometheus.Summary)
case reqSize:
p.reqSz = metric.(prometheus.Summary)
}
metricDef.MetricCollector = metric
}
}
// Use adds the middleware to a gin engine.
func (p *Prometheus) Use(e *gin.Engine) error {
e.Use(p.HandlerFunc())
return p.SetMetricsPath(e)
}
// UseWithAuth adds the middleware to a gin engine with BasicAuth.
func (p *Prometheus) UseWithAuth(e *gin.Engine, accounts gin.Accounts) error {
e.Use(p.HandlerFunc())
return p.SetMetricsPathWithAuth(e, accounts)
}
// HandlerFunc defines handler function for middleware.
func (p *Prometheus) HandlerFunc() gin.HandlerFunc {
return func(c *gin.Context) {
if c.Request.URL.Path == p.MetricsPath {
c.Next()
return
}
start := time.Now()
reqSz := computeApproximateRequestSize(c.Request)
c.Next()
status := strconv.Itoa(c.Writer.Status())
elapsed := float64(time.Since(start)) / float64(time.Second)
resSz := float64(c.Writer.Size())
url := p.ReqCntURLLabelMappingFn(c)
if len(p.URLLabelFromContext) > 0 {
u, found := c.Get(p.URLLabelFromContext)
if !found {
u = "unknown"
}
url = u.(string)
}
p.reqDur.WithLabelValues(status, c.Request.Method, url).Observe(elapsed)
p.reqCnt.WithLabelValues(status, c.Request.Method, c.HandlerName(), c.Request.Host, url).Inc()
p.reqSz.Observe(float64(reqSz))
p.resSz.Observe(resSz)
}
}
func prometheusHandler() gin.HandlerFunc {
h := promhttp.Handler()
return func(c *gin.Context) {
h.ServeHTTP(c.Writer, c.Request)
}
}
func computeApproximateRequestSize(r *http.Request) int {
var s int
if r.URL != nil {
s = len(r.URL.Path)
}
s += len(r.Method)
s += len(r.Proto)
for name, values := range r.Header {
s += len(name)
for _, value := range values {
s += len(value)
}
}
s += len(r.Host)
// r.FormData and r.MultipartForm are assumed to be included in r.URL.
if r.ContentLength != -1 {
s += int(r.ContentLength)
}
return s
}
//
//import (
// "bytes"
// "fmt"
// "io"
// "net/http"
// "os"
// "strconv"
// "time"
//
// "github.com/gin-gonic/gin"
// "github.com/prometheus/client_golang/prometheus"
// "github.com/prometheus/client_golang/prometheus/promhttp"
//)
//
//var defaultMetricPath = "/metrics"
//
//// counter, counter_vec, gauge, gauge_vec,
//// histogram, histogram_vec, summary, summary_vec.
//var (
// reqCounter = &Metric{
// ID: "reqCnt",
// Name: "requests_total",
// Description: "How many HTTP requests processed, partitioned by status code and HTTP method.",
// Type: "counter_vec",
// Args: []string{"code", "method", "handler", "host", "url"}}
//
// reqDuration = &Metric{
// ID: "reqDur",
// Name: "request_duration_seconds",
// Description: "The HTTP request latencies in seconds.",
// Type: "histogram_vec",
// Args: []string{"code", "method", "url"},
// }
//
// resSize = &Metric{
// ID: "resSz",
// Name: "response_size_bytes",
// Description: "The HTTP response sizes in bytes.",
// Type: "summary"}
//
// reqSize = &Metric{
// ID: "reqSz",
// Name: "request_size_bytes",
// Description: "The HTTP request sizes in bytes.",
// Type: "summary"}
//
// standardMetrics = []*Metric{
// reqCounter,
// reqDuration,
// resSize,
// reqSize,
// }
//)
//
///*
//RequestCounterURLLabelMappingFn is a function which can be supplied to the middleware to control
//the cardinality of the request counter's "url" label, which might be required in some contexts.
//For instance, if for a "/customer/:name" route you don't want to generate a time series for every
//possible customer name, you could use this function:
//
// func(c *gin.Context) string {
// url := c.Request.URL.Path
// for _, p := range c.Params {
// if p.Key == "name" {
// url = strings.Replace(url, p.Value, ":name", 1)
// break
// }
// }
// return url
// }
//
//which would map "/customer/alice" and "/customer/bob" to their template "/customer/:name".
//*/
//type RequestCounterURLLabelMappingFn func(c *gin.Context) string
//
//// Metric is a definition for the name, description, type, ID, and
//// prometheus.Collector type (i.e. CounterVec, Summary, etc) of each metric.
//type Metric struct {
// MetricCollector prometheus.Collector
// ID string
// Name string
// Description string
// Type string
// Args []string
//}
//
//// Prometheus contains the metrics gathered by the instance and its path.
//type Prometheus struct {
// reqCnt *prometheus.CounterVec
// reqDur *prometheus.HistogramVec
// reqSz, resSz prometheus.Summary
// router *gin.Engine
// listenAddress string
// Ppg PrometheusPushGateway
//
// MetricsList []*Metric
// MetricsPath string
//
// ReqCntURLLabelMappingFn RequestCounterURLLabelMappingFn
//
// // gin.Context string to use as a prometheus URL label
// URLLabelFromContext string
//}
//
//// PrometheusPushGateway contains the configuration for pushing to a Prometheus pushgateway (optional).
//type PrometheusPushGateway struct {
//
// // Push interval in seconds
// PushIntervalSeconds time.Duration
//
// // Push Gateway URL in format http://domain:port
// // where JOBNAME can be any string of your choice
// PushGatewayURL string
//
// // Local metrics URL where metrics are fetched from, this could be omitted in the future
// // if implemented using prometheus common/expfmt instead
// MetricsURL string
//
// // pushgateway job name, defaults to "gin"
// Job string
//}
//
//// NewPrometheus generates a new set of metrics with a certain subsystem name.
//func NewPrometheus(subsystem string, customMetricsList ...[]*Metric) *Prometheus {
// if subsystem == "" {
// subsystem = "app"
// }
//
// var metricsList []*Metric
//
// if len(customMetricsList) > 1 {
// panic("Too many args. NewPrometheus( string, <optional []*Metric> ).")
// } else if len(customMetricsList) == 1 {
// metricsList = customMetricsList[0]
// }
// metricsList = append(metricsList, standardMetrics...)
//
// p := &Prometheus{
// MetricsList: metricsList,
// MetricsPath: defaultMetricPath,
// ReqCntURLLabelMappingFn: func(c *gin.Context) string {
// return c.FullPath() // e.g. /user/:id , /user/:id/info
// },
// }
//
// p.registerMetrics(subsystem)
//
// return p
//}
//
//// SetPushGateway sends metrics to a remote pushgateway exposed on pushGatewayURL
//// every pushIntervalSeconds. Metrics are fetched from metricsURL.
//func (p *Prometheus) SetPushGateway(pushGatewayURL, metricsURL string, pushIntervalSeconds time.Duration) {
// p.Ppg.PushGatewayURL = pushGatewayURL
// p.Ppg.MetricsURL = metricsURL
// p.Ppg.PushIntervalSeconds = pushIntervalSeconds
// p.startPushTicker()
//}
//
//// SetPushGatewayJob job name, defaults to "gin".
//func (p *Prometheus) SetPushGatewayJob(j string) {
// p.Ppg.Job = j
//}
//
//// SetListenAddress for exposing metrics on address. If not set, it will be exposed at the
//// same address of the gin engine that is being used.
//func (p *Prometheus) SetListenAddress(address string) {
// p.listenAddress = address
// if p.listenAddress != "" {
// p.router = gin.Default()
// }
//}
//
//// SetListenAddressWithRouter for using a separate router to expose metrics. (this keeps things like GET /metrics out of
//// your content's access log).
//func (p *Prometheus) SetListenAddressWithRouter(listenAddress string, r *gin.Engine) {
// p.listenAddress = listenAddress
// if len(p.listenAddress) > 0 {
// p.router = r
// }
//}
//
//// SetMetricsPath set metrics paths.
//func (p *Prometheus) SetMetricsPath(e *gin.Engine) error {
//
// if p.listenAddress != "" {
// p.router.GET(p.MetricsPath, prometheusHandler())
// return p.runServer()
// } else {
// e.GET(p.MetricsPath, prometheusHandler())
// return nil
// }
//}
//
//// SetMetricsPathWithAuth set metrics paths with authentication.
//func (p *Prometheus) SetMetricsPathWithAuth(e *gin.Engine, accounts gin.Accounts) error {
//
// if p.listenAddress != "" {
// p.router.GET(p.MetricsPath, gin.BasicAuth(accounts), prometheusHandler())
// return p.runServer()
// } else {
// e.GET(p.MetricsPath, gin.BasicAuth(accounts), prometheusHandler())
// return nil
// }
//
//}
//
//func (p *Prometheus) runServer() error {
// return p.router.Run(p.listenAddress)
//}
//
//func (p *Prometheus) getMetrics() []byte {
// response, err := http.Get(p.Ppg.MetricsURL)
// if err != nil {
// return nil
// }
//
// defer response.Body.Close()
//
// body, _ := io.ReadAll(response.Body)
// return body
//}
//
//var hostname, _ = os.Hostname()
//
//func (p *Prometheus) getPushGatewayURL() string {
// if p.Ppg.Job == "" {
// p.Ppg.Job = "gin"
// }
// return p.Ppg.PushGatewayURL + "/metrics/job/" + p.Ppg.Job + "/instance/" + hostname
//}
//
//func (p *Prometheus) sendMetricsToPushGateway(metrics []byte) {
// req, err := http.NewRequest("POST", p.getPushGatewayURL(), bytes.NewBuffer(metrics))
// if err != nil {
// return
// }
//
// client := &http.Client{}
// resp, err := client.Do(req)
// if err != nil {
// fmt.Println("Error sending to push gateway error:", err.Error())
// }
//
// resp.Body.Close()
//}
//
//func (p *Prometheus) startPushTicker() {
// ticker := time.NewTicker(time.Second * p.Ppg.PushIntervalSeconds)
// go func() {
// for range ticker.C {
// p.sendMetricsToPushGateway(p.getMetrics())
// }
// }()
//}
//
//// NewMetric associates prometheus.Collector based on Metric.Type.
//func NewMetric(m *Metric, subsystem string) prometheus.Collector {
// var metric prometheus.Collector
// switch m.Type {
// case "counter_vec":
// metric = prometheus.NewCounterVec(
// prometheus.CounterOpts{
// Subsystem: subsystem,
// Name: m.Name,
// Help: m.Description,
// },
// m.Args,
// )
// case "counter":
// metric = prometheus.NewCounter(
// prometheus.CounterOpts{
// Subsystem: subsystem,
// Name: m.Name,
// Help: m.Description,
// },
// )
// case "gauge_vec":
// metric = prometheus.NewGaugeVec(
// prometheus.GaugeOpts{
// Subsystem: subsystem,
// Name: m.Name,
// Help: m.Description,
// },
// m.Args,
// )
// case "gauge":
// metric = prometheus.NewGauge(
// prometheus.GaugeOpts{
// Subsystem: subsystem,
// Name: m.Name,
// Help: m.Description,
// },
// )
// case "histogram_vec":
// metric = prometheus.NewHistogramVec(
// prometheus.HistogramOpts{
// Subsystem: subsystem,
// Name: m.Name,
// Help: m.Description,
// },
// m.Args,
// )
// case "histogram":
// metric = prometheus.NewHistogram(
// prometheus.HistogramOpts{
// Subsystem: subsystem,
// Name: m.Name,
// Help: m.Description,
// },
// )
// case "summary_vec":
// metric = prometheus.NewSummaryVec(
// prometheus.SummaryOpts{
// Subsystem: subsystem,
// Name: m.Name,
// Help: m.Description,
// },
// m.Args,
// )
// case "summary":
// metric = prometheus.NewSummary(
// prometheus.SummaryOpts{
// Subsystem: subsystem,
// Name: m.Name,
// Help: m.Description,
// },
// )
// }
// return metric
//}
//
//func (p *Prometheus) registerMetrics(subsystem string) {
// for _, metricDef := range p.MetricsList {
// metric := NewMetric(metricDef, subsystem)
// if err := prometheus.Register(metric); err != nil {
// fmt.Println("could not be registered in Prometheus,metricDef.Name:", metricDef.Name, " error:", err.Error())
// }
//
// switch metricDef {
// case reqCounter:
// p.reqCnt = metric.(*prometheus.CounterVec)
// case reqDuration:
// p.reqDur = metric.(*prometheus.HistogramVec)
// case resSize:
// p.resSz = metric.(prometheus.Summary)
// case reqSize:
// p.reqSz = metric.(prometheus.Summary)
// }
// metricDef.MetricCollector = metric
// }
//}
//
//// Use adds the middleware to a gin engine.
//func (p *Prometheus) Use(e *gin.Engine) error {
// e.Use(p.HandlerFunc())
// return p.SetMetricsPath(e)
//}
//
//// UseWithAuth adds the middleware to a gin engine with BasicAuth.
//func (p *Prometheus) UseWithAuth(e *gin.Engine, accounts gin.Accounts) error {
// e.Use(p.HandlerFunc())
// return p.SetMetricsPathWithAuth(e, accounts)
//}
//
//// HandlerFunc defines handler function for middleware.
//func (p *Prometheus) HandlerFunc() gin.HandlerFunc {
// return func(c *gin.Context) {
// if c.Request.URL.Path == p.MetricsPath {
// c.Next()
// return
// }
//
// start := time.Now()
// reqSz := computeApproximateRequestSize(c.Request)
//
// c.Next()
//
// status := strconv.Itoa(c.Writer.Status())
// elapsed := float64(time.Since(start)) / float64(time.Second)
// resSz := float64(c.Writer.Size())
//
// url := p.ReqCntURLLabelMappingFn(c)
// if len(p.URLLabelFromContext) > 0 {
// u, found := c.Get(p.URLLabelFromContext)
// if !found {
// u = "unknown"
// }
// url = u.(string)
// }
// p.reqDur.WithLabelValues(status, c.Request.Method, url).Observe(elapsed)
// p.reqCnt.WithLabelValues(status, c.Request.Method, c.HandlerName(), c.Request.Host, url).Inc()
// p.reqSz.Observe(float64(reqSz))
// p.resSz.Observe(resSz)
// }
//}
//
//func prometheusHandler() gin.HandlerFunc {
// h := promhttp.Handler()
// return func(c *gin.Context) {
// h.ServeHTTP(c.Writer, c.Request)
// }
//}
//
//func computeApproximateRequestSize(r *http.Request) int {
// var s int
// if r.URL != nil {
// s = len(r.URL.Path)
// }
//
// s += len(r.Method)
// s += len(r.Proto)
// for name, values := range r.Header {
// s += len(name)
// for _, value := range values {
// s += len(value)
// }
// }
// s += len(r.Host)
//
// // r.FormData and r.MultipartForm are assumed to be included in r.URL.
//
// if r.ContentLength != -1 {
// s += int(r.ContentLength)
// }
// return s
//}

@ -0,0 +1,48 @@
package prommetrics
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"strconv"
)
var (
apiCounter = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "api_count",
Help: "Total number of API calls",
},
[]string{"path", "method", "code"},
)
httpCounter = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "http_count",
Help: "Total number of HTTP calls",
},
[]string{"path", "method", "status"},
)
)
func ApiInit(prometheusPort int) error {
apiRegistry := prometheus.NewRegistry()
cs := append(
baseCollector,
apiCounter,
httpCounter,
)
return Init(apiRegistry, prometheusPort, commonPath, promhttp.HandlerFor(apiRegistry, promhttp.HandlerOpts{}), cs...)
}
func APICall(path string, method string, apiCode int) {
apiCounter.With(prometheus.Labels{"path": path, "method": method, "code": strconv.Itoa(apiCode)}).Inc()
}
func HttpCall(path string, method string, status int) {
httpCounter.With(prometheus.Labels{"path": path, "method": method, "status": strconv.Itoa(status)}).Inc()
}
//func ApiHandler() http.Handler {
// return promhttp.InstrumentMetricHandler(
// apiRegistry, promhttp.HandlerFor(apiRegistry, promhttp.HandlerOpts{}),
// )
//}

@ -1,30 +0,0 @@
// Copyright © 2023 OpenIM. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prommetrics
import ginprom "github.com/openimsdk/open-im-server/v3/pkg/common/ginprometheus"
/*
labels := prometheus.Labels{"label_one": "any", "label_two": "value"}
ApiCustomCnt.MetricCollector.(*prometheus.CounterVec).With(labels).Inc().
*/
var (
ApiCustomCnt = &ginprom.Metric{
Name: "custom_total",
Description: "Custom counter events.",
Type: "counter_vec",
Args: []string{"label_one", "label_two"},
}
)

@ -15,44 +15,24 @@
package prommetrics
import (
gp "github.com/grpc-ecosystem/go-grpc-prometheus"
config2 "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/ginprometheus"
"fmt"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/collectors"
"net/http"
)
func NewGrpcPromObj(cusMetrics []prometheus.Collector) (*prometheus.Registry, *gp.ServerMetrics, error) {
reg := prometheus.NewRegistry()
grpcMetrics := gp.NewServerMetrics()
grpcMetrics.EnableHandlingTimeHistogram()
cusMetrics = append(cusMetrics, grpcMetrics, collectors.NewGoCollector())
reg.MustRegister(cusMetrics...)
return reg, grpcMetrics, nil
}
const commonPath = "/metrics"
func GetGrpcCusMetrics(registerName string, share *config2.Share) []prometheus.Collector {
switch registerName {
case share.RpcRegisterName.MessageGateway:
return []prometheus.Collector{OnlineUserGauge}
case share.RpcRegisterName.Msg:
return []prometheus.Collector{SingleChatMsgProcessSuccessCounter, SingleChatMsgProcessFailedCounter, GroupChatMsgProcessSuccessCounter, GroupChatMsgProcessFailedCounter}
case "Transfer":
return []prometheus.Collector{MsgInsertRedisSuccessCounter, MsgInsertRedisFailedCounter, MsgInsertMongoSuccessCounter, MsgInsertMongoFailedCounter, SeqSetFailedCounter}
case share.RpcRegisterName.Push:
return []prometheus.Collector{MsgOfflinePushFailedCounter}
case share.RpcRegisterName.Auth:
return []prometheus.Collector{UserLoginCounter}
default:
return nil
var (
baseCollector = []prometheus.Collector{
collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}),
collectors.NewGoCollector(),
}
}
)
func GetGinCusMetrics(name string) []*ginprometheus.Metric {
switch name {
case "Api":
return []*ginprometheus.Metric{ApiCustomCnt}
default:
return []*ginprometheus.Metric{ApiCustomCnt}
}
func Init(registry *prometheus.Registry, prometheusPort int, path string, handler http.Handler, cs ...prometheus.Collector) error {
registry.MustRegister(cs...)
srv := http.NewServeMux()
srv.Handle(path, handler)
return http.ListenAndServe(fmt.Sprintf(":%d", prometheusPort), srv)
}

@ -14,46 +14,39 @@
package prommetrics
import (
"testing"
"github.com/prometheus/client_golang/prometheus"
"github.com/stretchr/testify/assert"
)
func TestNewGrpcPromObj(t *testing.T) {
// Create a custom metric to pass into the NewGrpcPromObj function.
customMetric := prometheus.NewCounter(prometheus.CounterOpts{
Name: "test_metric",
Help: "This is a test metric.",
})
cusMetrics := []prometheus.Collector{customMetric}
// Call NewGrpcPromObj with the custom metrics.
reg, grpcMetrics, err := NewGrpcPromObj(cusMetrics)
// Assert no error was returned.
assert.NoError(t, err)
// Assert the registry was correctly initialized.
assert.NotNil(t, reg)
// Assert the grpcMetrics was correctly initialized.
assert.NotNil(t, grpcMetrics)
// Assert that the custom metric is registered.
mfs, err := reg.Gather()
assert.NoError(t, err)
assert.NotEmpty(t, mfs) // Ensure some metrics are present.
found := false
for _, mf := range mfs {
if *mf.Name == "test_metric" {
found = true
break
}
}
assert.True(t, found, "Custom metric not found in registry")
}
//func TestNewGrpcPromObj(t *testing.T) {
// // Create a custom metric to pass into the NewGrpcPromObj function.
// customMetric := prometheus.NewCounter(prometheus.CounterOpts{
// Name: "test_metric",
// Help: "This is a test metric.",
// })
// cusMetrics := []prometheus.Collector{customMetric}
//
// // Call NewGrpcPromObj with the custom metrics.
// reg, grpcMetrics, err := NewGrpcPromObj(cusMetrics)
//
// // Assert no error was returned.
// assert.NoError(t, err)
//
// // Assert the registry was correctly initialized.
// assert.NotNil(t, reg)
//
// // Assert the grpcMetrics was correctly initialized.
// assert.NotNil(t, grpcMetrics)
//
// // Assert that the custom metric is registered.
// mfs, err := reg.Gather()
// assert.NoError(t, err)
// assert.NotEmpty(t, mfs) // Ensure some metrics are present.
// found := false
// for _, mf := range mfs {
// if *mf.Name == "test_metric" {
// found = true
// break
// }
// }
// assert.True(t, found, "Custom metric not found in registry")
//}
//func TestGetGrpcCusMetrics(t *testing.T) {
// conf := config2.NewGlobalConfig()

@ -0,0 +1,58 @@
package prommetrics
import (
gp "github.com/grpc-ecosystem/go-grpc-prometheus"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"strconv"
)
const rpcPath = commonPath
var (
grpcMetrics *gp.ServerMetrics
rpcCounter = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "rpc_count",
Help: "Total number of RPC calls",
},
[]string{"name", "path", "code"},
)
)
func RpcInit(cs []prometheus.Collector, prometheusPort int) error {
reg := prometheus.NewRegistry()
cs = append(append(
baseCollector,
rpcCounter,
), cs...)
return Init(reg, prometheusPort, rpcPath, promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg}), cs...)
}
func RPCCall(name string, path string, code int) {
rpcCounter.With(prometheus.Labels{"name": name, "path": path, "code": strconv.Itoa(code)}).Inc()
}
func GetGrpcServerMetrics() *gp.ServerMetrics {
if grpcMetrics == nil {
grpcMetrics = gp.NewServerMetrics()
grpcMetrics.EnableHandlingTimeHistogram()
}
return grpcMetrics
}
func GetGrpcCusMetrics(registerName string, share *config.Share) []prometheus.Collector {
switch registerName {
case share.RpcRegisterName.MessageGateway:
return []prometheus.Collector{OnlineUserGauge}
case share.RpcRegisterName.Msg:
return []prometheus.Collector{SingleChatMsgProcessSuccessCounter, SingleChatMsgProcessFailedCounter, GroupChatMsgProcessSuccessCounter, GroupChatMsgProcessFailedCounter}
case share.RpcRegisterName.Push:
return []prometheus.Collector{MsgOfflinePushFailedCounter}
case share.RpcRegisterName.Auth:
return []prometheus.Collector{UserLoginCounter}
default:
return nil
}
}

@ -16,6 +16,7 @@ package prommetrics
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
var (
@ -40,3 +41,16 @@ var (
Help: "The number of failed set seq",
})
)
func TransferInit(prometheusPort int) error {
reg := prometheus.NewRegistry()
cs := append(
baseCollector,
MsgInsertRedisSuccessCounter,
MsgInsertRedisFailedCounter,
MsgInsertMongoSuccessCounter,
MsgInsertMongoFailedCounter,
SeqSetFailedCounter,
)
return Init(reg, prometheusPort, commonPath, promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg}), cs...)
}

@ -17,9 +17,9 @@ package startrpc
import (
"context"
"fmt"
config2 "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/tools/utils/datautil"
"github.com/prometheus/client_golang/prometheus"
"google.golang.org/grpc/status"
"net"
"net/http"
"os"
@ -29,7 +29,6 @@ import (
"syscall"
"time"
grpcprometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
kdisc "github.com/openimsdk/open-im-server/v3/pkg/common/discoveryregister"
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
"github.com/openimsdk/tools/discovery"
@ -38,14 +37,13 @@ import (
"github.com/openimsdk/tools/mw"
"github.com/openimsdk/tools/system/program"
"github.com/openimsdk/tools/utils/network"
"github.com/prometheus/client_golang/prometheus/promhttp"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
)
// Start rpc server.
func Start[T any](ctx context.Context, discovery *config2.Discovery, prometheusConfig *config2.Prometheus, listenIP,
registerIP string, rpcPorts []int, index int, rpcRegisterName string, share *config2.Share, config T, rpcFn func(ctx context.Context,
func Start[T any](ctx context.Context, discovery *config.Discovery, prometheusConfig *config.Prometheus, listenIP,
registerIP string, rpcPorts []int, index int, rpcRegisterName string, share *config.Share, config T, rpcFn func(ctx context.Context,
config T, client discovery.SvcDiscoveryRegistry, server *grpc.Server) error, options ...grpc.ServerOption) error {
rpcPort, err := datautil.GetElemByIndex(rpcPorts, index)
@ -77,13 +75,18 @@ func Start[T any](ctx context.Context, discovery *config2.Discovery, prometheusC
return err
}
var reg *prometheus.Registry
var metric *grpcprometheus.ServerMetrics
//var reg *prometheus.Registry
//var metric *grpcprometheus.ServerMetrics
if prometheusConfig.Enable {
cusMetrics := prommetrics.GetGrpcCusMetrics(rpcRegisterName, share)
reg, metric, _ = prommetrics.NewGrpcPromObj(cusMetrics)
options = append(options, mw.GrpcServer(), grpc.StreamInterceptor(metric.StreamServerInterceptor()),
grpc.UnaryInterceptor(metric.UnaryServerInterceptor()))
//cusMetrics := prommetrics.GetGrpcCusMetrics(rpcRegisterName, share)
//reg, metric, _ = prommetrics.NewGrpcPromObj(cusMetrics)
//options = append(options, mw.GrpcServer(), grpc.StreamInterceptor(metric.StreamServerInterceptor()),
// grpc.UnaryInterceptor(metric.UnaryServerInterceptor()))
options = append(
options, mw.GrpcServer(),
prommetricsUnaryInterceptor(rpcRegisterName),
prommetricsStreamInterceptor(rpcRegisterName),
)
} else {
options = append(options, mw.GrpcServer())
}
@ -122,13 +125,18 @@ func Start[T any](ctx context.Context, discovery *config2.Discovery, prometheusC
netDone <- struct{}{}
return
}
metric.InitializeMetrics(srv)
// Create a HTTP server for prometheus.
httpServer = &http.Server{Handler: promhttp.HandlerFor(reg, promhttp.HandlerOpts{}), Addr: fmt.Sprintf("0.0.0.0:%d", prometheusPort)}
if err := httpServer.ListenAndServe(); err != nil && err != http.ErrServerClosed {
netErr = errs.WrapMsg(err, "prometheus start err", httpServer.Addr)
cs := prommetrics.GetGrpcCusMetrics(rpcRegisterName, share)
if err := prommetrics.RpcInit(cs, prometheusPort); err != nil && err != http.ErrServerClosed {
netErr = errs.WrapMsg(err, fmt.Sprintf("rpc %s prometheus start err: %d", rpcRegisterName, prometheusPort))
netDone <- struct{}{}
}
//metric.InitializeMetrics(srv)
// Create a HTTP server for prometheus.
//httpServer = &http.Server{Handler: promhttp.HandlerFor(reg, promhttp.HandlerOpts{}), Addr: fmt.Sprintf("0.0.0.0:%d", prometheusPort)}
//if err := httpServer.ListenAndServe(); err != nil && err != http.ErrServerClosed {
// netErr = errs.WrapMsg(err, "prometheus start err", httpServer.Addr)
// netDone <- struct{}{}
//}
}()
}
@ -175,3 +183,25 @@ func gracefulStopWithCtx(ctx context.Context, f func()) error {
return nil
}
}
func prommetricsUnaryInterceptor(rpcRegisterName string) grpc.ServerOption {
getCode := func(err error) int {
if err == nil {
return 0
}
rpcErr, ok := err.(interface{ GRPCStatus() *status.Status })
if !ok {
return -1
}
return int(rpcErr.GRPCStatus().Code())
}
return grpc.ChainUnaryInterceptor(func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) {
resp, err := handler(ctx, req)
prommetrics.RPCCall(rpcRegisterName, info.FullMethod, getCode(err))
return resp, err
})
}
func prommetricsStreamInterceptor(rpcRegisterName string) grpc.ServerOption {
return grpc.ChainStreamInterceptor()
}

@ -0,0 +1,13 @@
package cachekey
import "time"
const (
OnlineKey = "ONLINE:"
OnlineChannel = "online_change"
OnlineExpire = time.Hour / 2
)
func GetOnlineKey(userID string) string {
return OnlineKey + userID
}

@ -1,38 +1,30 @@
// Copyright © 2024 OpenIM. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cachekey
const (
maxSeq = "MAX_SEQ:"
minSeq = "MIN_SEQ:"
conversationUserMinSeq = "CON_USER_MIN_SEQ:"
hasReadSeq = "HAS_READ_SEQ:"
MallocSeq = "MALLOC_SEQ:"
MallocMinSeqLock = "MALLOC_MIN_SEQ:"
SeqUserMaxSeq = "SEQ_USER_MAX:"
SeqUserMinSeq = "SEQ_USER_MIN:"
SeqUserReadSeq = "SEQ_USER_READ:"
)
func GetMaxSeqKey(conversationID string) string {
return maxSeq + conversationID
func GetMallocSeqKey(conversationID string) string {
return MallocSeq + conversationID
}
func GetMallocMinSeqKey(conversationID string) string {
return MallocMinSeqLock + conversationID
}
func GetMinSeqKey(conversationID string) string {
return minSeq + conversationID
func GetSeqUserMaxSeqKey(conversationID string, userID string) string {
return SeqUserMaxSeq + conversationID + ":" + userID
}
func GetHasReadSeqKey(conversationID string, userID string) string {
return hasReadSeq + userID + ":" + conversationID
func GetSeqUserMinSeqKey(conversationID string, userID string) string {
return SeqUserMinSeq + conversationID + ":" + userID
}
func GetConversationUserMinSeqKey(conversationID, userID string) string {
return conversationUserMinSeq + conversationID + "u:" + userID
func GetSeqUserReadSeqKey(conversationID string, userID string) string {
return SeqUserReadSeq + conversationID + ":" + userID
}

@ -17,7 +17,6 @@ package cachekey
const (
UserInfoKey = "USER_INFO:"
UserGlobalRecvMsgOptKey = "USER_GLOBAL_RECV_MSG_OPT_KEY:"
olineStatusKey = "ONLINE_STATUS:"
)
func GetUserInfoKey(userID string) string {
@ -27,7 +26,3 @@ func GetUserInfoKey(userID string) string {
func GetUserGlobalRecvMsgOptKey(userID string) string {
return UserGlobalRecvMsgOptKey + userID
}
func GetOnlineStatusKey(modKey string) string {
return olineStatusKey + modKey
}

@ -36,7 +36,6 @@ type GroupCache interface {
DelGroupMembersHash(groupID string) GroupCache
GetGroupMemberIDs(ctx context.Context, groupID string) (groupMemberIDs []string, err error)
GetGroupsMemberIDs(ctx context.Context, groupIDs []string) (groupMemberIDs map[string][]string, err error)
DelGroupMemberIDs(groupID string) GroupCache

@ -0,0 +1,8 @@
package cache
import "context"
type OnlineCache interface {
GetOnline(ctx context.Context, userID string) ([]int32, error)
SetUserOnline(ctx context.Context, userID string, online, offline []int32) error
}

@ -0,0 +1,94 @@
package redis
import (
"context"
"encoding/json"
"github.com/dtm-labs/rockscache"
"github.com/redis/go-redis/v9"
"golang.org/x/sync/singleflight"
"time"
"unsafe"
)
func getRocksCacheRedisClient(cli *rockscache.Client) redis.UniversalClient {
type Client struct {
rdb redis.UniversalClient
_ rockscache.Options
_ singleflight.Group
}
return (*Client)(unsafe.Pointer(cli)).rdb
}
func batchGetCache2[K comparable, V any](ctx context.Context, rcClient *rockscache.Client, expire time.Duration, ids []K, idKey func(id K) string, vId func(v *V) K, fn func(ctx context.Context, ids []K) ([]*V, error)) ([]*V, error) {
if len(ids) == 0 {
return nil, nil
}
findKeys := make([]string, 0, len(ids))
keyId := make(map[string]K)
for _, id := range ids {
key := idKey(id)
if _, ok := keyId[key]; ok {
continue
}
keyId[key] = id
findKeys = append(findKeys, key)
}
slotKeys, err := groupKeysBySlot(ctx, getRocksCacheRedisClient(rcClient), findKeys)
if err != nil {
return nil, err
}
result := make([]*V, 0, len(findKeys))
for _, keys := range slotKeys {
indexCache, err := rcClient.FetchBatch2(ctx, keys, expire, func(idx []int) (map[int]string, error) {
queryIds := make([]K, 0, len(idx))
idIndex := make(map[K]int)
for _, index := range idx {
id := keyId[keys[index]]
idIndex[id] = index
queryIds = append(queryIds, id)
}
values, err := fn(ctx, queryIds)
if err != nil {
return nil, err
}
if len(values) == 0 {
return map[int]string{}, nil
}
cacheIndex := make(map[int]string)
for _, value := range values {
id := vId(value)
index, ok := idIndex[id]
if !ok {
continue
}
bs, err := json.Marshal(value)
if err != nil {
return nil, err
}
cacheIndex[index] = string(bs)
}
return cacheIndex, nil
})
if err != nil {
return nil, err
}
for index, data := range indexCache {
if data == "" {
continue
}
var value V
if err := json.Unmarshal([]byte(data), &value); err != nil {
return nil, err
}
if cb, ok := any(&value).(BatchCacheCallback[K]); ok {
cb.BatchCache(keyId[keys[index]])
}
result = append(result, &value)
}
}
return result, nil
}
type BatchCacheCallback[K comparable] interface {
BatchCache(id K)
}

@ -23,7 +23,6 @@ import (
"github.com/openimsdk/open-im-server/v3/pkg/localcache"
"github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/mw/specialerror"
"github.com/openimsdk/tools/utils/datautil"
"github.com/redis/go-redis/v9"
"time"
@ -147,30 +146,30 @@ func getCache[T any](ctx context.Context, rcClient *rockscache.Client, key strin
return t, nil
}
func batchGetCache[T any, K comparable](
ctx context.Context,
rcClient *rockscache.Client,
expire time.Duration,
keys []K,
keyFn func(key K) string,
fns func(ctx context.Context, key K) (T, error),
) ([]T, error) {
if len(keys) == 0 {
return nil, nil
}
res := make([]T, 0, len(keys))
for _, key := range keys {
val, err := getCache(ctx, rcClient, keyFn(key), expire, func(ctx context.Context) (T, error) {
return fns(ctx, key)
})
if err != nil {
if errs.ErrRecordNotFound.Is(specialerror.ErrCode(errs.Unwrap(err))) {
continue
}
return nil, errs.Wrap(err)
}
res = append(res, val)
}
return res, nil
}
//func batchGetCache[T any, K comparable](
// ctx context.Context,
// rcClient *rockscache.Client,
// expire time.Duration,
// keys []K,
// keyFn func(key K) string,
// fns func(ctx context.Context, key K) (T, error),
//) ([]T, error) {
// if len(keys) == 0 {
// return nil, nil
// }
// res := make([]T, 0, len(keys))
// for _, key := range keys {
// val, err := getCache(ctx, rcClient, keyFn(key), expire, func(ctx context.Context) (T, error) {
// return fns(ctx, key)
// })
// if err != nil {
// if errs.ErrRecordNotFound.Is(specialerror.ErrCode(errs.Unwrap(err))) {
// continue
// }
// return nil, errs.Wrap(err)
// }
// res = append(res, val)
// }
//
// return res, nil
//}

@ -0,0 +1,55 @@
package redis
import (
"context"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
"github.com/openimsdk/tools/db/mongoutil"
"github.com/openimsdk/tools/db/redisutil"
"testing"
)
func TestName(t *testing.T) {
//var rocks rockscache.Client
//rdb := getRocksCacheRedisClient(&rocks)
//t.Log(rdb == nil)
ctx := context.Background()
rdb, err := redisutil.NewRedisClient(ctx, (&config.Redis{
Address: []string{"172.16.8.48:16379"},
Password: "openIM123",
DB: 3,
}).Build())
if err != nil {
panic(err)
}
mgocli, err := mongoutil.NewMongoDB(ctx, (&config.Mongo{
Address: []string{"172.16.8.48:37017"},
Database: "openim_v3",
Username: "openIM",
Password: "openIM123",
MaxPoolSize: 100,
MaxRetry: 1,
}).Build())
if err != nil {
panic(err)
}
//userMgo, err := mgo.NewUserMongo(mgocli.GetDB())
//if err != nil {
// panic(err)
//}
//rock := rockscache.NewClient(rdb, rockscache.NewDefaultOptions())
mgoSeqUser, err := mgo.NewSeqUserMongo(mgocli.GetDB())
if err != nil {
panic(err)
}
seqUser := NewSeqUserCacheRedis(rdb, mgoSeqUser)
res, err := seqUser.GetReadSeqs(ctx, "2110910952", []string{"sg_2920732023", "sg_345762580"})
if err != nil {
panic(err)
}
t.Log(res)
}

@ -164,10 +164,12 @@ func (c *ConversationRedisCache) DelConversations(ownerUserID string, conversati
}
func (c *ConversationRedisCache) GetConversations(ctx context.Context, ownerUserID string, conversationIDs []string) ([]*model.Conversation, error) {
return batchGetCache(ctx, c.rcClient, c.expireTime, conversationIDs, func(conversationID string) string {
return batchGetCache2(ctx, c.rcClient, c.expireTime, conversationIDs, func(conversationID string) string {
return c.getConversationKey(ownerUserID, conversationID)
}, func(ctx context.Context, conversationID string) (*model.Conversation, error) {
return c.conversationDB.Take(ctx, ownerUserID, conversationID)
}, func(conversation *model.Conversation) string {
return conversation.ConversationID
}, func(ctx context.Context, conversationIDs []string) ([]*model.Conversation, error) {
return c.conversationDB.Find(ctx, ownerUserID, conversationIDs)
})
}

@ -70,10 +70,6 @@ func (f *FriendCacheRedis) getFriendIDsKey(ownerUserID string) string {
return cachekey.GetFriendIDsKey(ownerUserID)
}
//func (f *FriendCacheRedis) getFriendSyncSortUserIDsKey(ownerUserID string) string {
// return cachekey.GetFriendSyncSortUserIDsKey(ownerUserID, f.syncCount)
//}
func (f *FriendCacheRedis) getFriendMaxVersionKey(ownerUserID string) string {
return cachekey.GetFriendMaxVersionKey(ownerUserID)
}
@ -107,16 +103,6 @@ func (f *FriendCacheRedis) DelFriendIDs(ownerUserIDs ...string) cache.FriendCach
return newFriendCache
}
//func (f *FriendCacheRedis) DelSortFriendUserIDs(ownerUserIDs ...string) cache.FriendCache {
// newGroupCache := f.CloneFriendCache()
// keys := make([]string, 0, len(ownerUserIDs))
// for _, userID := range ownerUserIDs {
// keys = append(keys, f.getFriendSyncSortUserIDsKey(userID))
// }
// newGroupCache.AddKeys(keys...)
// return newGroupCache
//}
// GetTwoWayFriendIDs retrieves two-way friend IDs from the cache.
func (f *FriendCacheRedis) GetTwoWayFriendIDs(ctx context.Context, ownerUserID string) (twoWayFriendIDs []string, err error) {
friendIDs, err := f.GetFriendIDs(ctx, ownerUserID)
@ -193,17 +179,6 @@ func (f *FriendCacheRedis) DelMaxFriendVersion(ownerUserIDs ...string) cache.Fri
return newFriendCache
}
//func (f *FriendCacheRedis) FindSortFriendUserIDs(ctx context.Context, ownerUserID string) ([]string, error) {
// userIDs, err := f.GetFriendIDs(ctx, ownerUserID)
// if err != nil {
// return nil, err
// }
// if len(userIDs) > f.syncCount {
// userIDs = userIDs[:f.syncCount]
// }
// return userIDs, nil
//}
func (f *FriendCacheRedis) FindMaxFriendVersion(ctx context.Context, ownerUserID string) (*model.VersionLog, error) {
return getCache(ctx, f.rcClient, f.getFriendMaxVersionKey(ownerUserID), f.expireTime, func(ctx context.Context) (*model.VersionLog, error) {
return f.friendDB.FindIncrVersion(ctx, ownerUserID, 0, 0)

@ -118,34 +118,12 @@ func (g *GroupCacheRedis) getJoinGroupMaxVersionKey(userID string) string {
return cachekey.GetJoinGroupMaxVersionKey(userID)
}
func (g *GroupCacheRedis) GetGroupIndex(group *model.Group, keys []string) (int, error) {
key := g.getGroupInfoKey(group.GroupID)
for i, _key := range keys {
if _key == key {
return i, nil
}
}
return 0, errIndex
}
func (g *GroupCacheRedis) GetGroupMemberIndex(groupMember *model.GroupMember, keys []string) (int, error) {
key := g.getGroupMemberInfoKey(groupMember.GroupID, groupMember.UserID)
for i, _key := range keys {
if _key == key {
return i, nil
}
}
return 0, errIndex
func (g *GroupCacheRedis) getGroupID(group *model.Group) string {
return group.GroupID
}
func (g *GroupCacheRedis) GetGroupsInfo(ctx context.Context, groupIDs []string) (groups []*model.Group, err error) {
return batchGetCache(ctx, g.rcClient, g.expireTime, groupIDs, func(groupID string) string {
return g.getGroupInfoKey(groupID)
}, func(ctx context.Context, groupID string) (*model.Group, error) {
return g.groupDB.Take(ctx, groupID)
})
return batchGetCache2(ctx, g.rcClient, g.expireTime, groupIDs, g.getGroupInfoKey, g.getGroupID, g.groupDB.Find)
}
func (g *GroupCacheRedis) GetGroupInfo(ctx context.Context, groupID string) (group *model.Group, err error) {
@ -233,19 +211,6 @@ func (g *GroupCacheRedis) GetGroupMemberIDs(ctx context.Context, groupID string)
})
}
func (g *GroupCacheRedis) GetGroupsMemberIDs(ctx context.Context, groupIDs []string) (map[string][]string, error) {
m := make(map[string][]string)
for _, groupID := range groupIDs {
userIDs, err := g.GetGroupMemberIDs(ctx, groupID)
if err != nil {
return nil, err
}
m[groupID] = userIDs
}
return m, nil
}
func (g *GroupCacheRedis) DelGroupMemberIDs(groupID string) cache.GroupCache {
cache := g.CloneGroupCache()
cache.AddKeys(g.getGroupMemberIDsKey(groupID))
@ -285,10 +250,12 @@ func (g *GroupCacheRedis) GetGroupMemberInfo(ctx context.Context, groupID, userI
}
func (g *GroupCacheRedis) GetGroupMembersInfo(ctx context.Context, groupID string, userIDs []string) ([]*model.GroupMember, error) {
return batchGetCache(ctx, g.rcClient, g.expireTime, userIDs, func(userID string) string {
return batchGetCache2(ctx, g.rcClient, g.expireTime, userIDs, func(userID string) string {
return g.getGroupMemberInfoKey(groupID, userID)
}, func(ctx context.Context, userID string) (*model.GroupMember, error) {
return g.groupMemberDB.Take(ctx, groupID, userID)
}, func(member *model.GroupMember) string {
return member.UserID
}, func(ctx context.Context, userIDs []string) ([]*model.GroupMember, error) {
return g.groupMemberDB.Find(ctx, groupID, userIDs)
})
}
@ -301,14 +268,6 @@ func (g *GroupCacheRedis) GetAllGroupMembersInfo(ctx context.Context, groupID st
return g.GetGroupMembersInfo(ctx, groupID, groupMemberIDs)
}
func (g *GroupCacheRedis) GetAllGroupMemberInfo(ctx context.Context, groupID string) ([]*model.GroupMember, error) {
groupMemberIDs, err := g.GetGroupMemberIDs(ctx, groupID)
if err != nil {
return nil, err
}
return g.GetGroupMembersInfo(ctx, groupID, groupMemberIDs)
}
func (g *GroupCacheRedis) DelGroupMembersInfo(groupID string, userIDs ...string) cache.GroupCache {
keys := make([]string, 0, len(userIDs))
for _, userID := range userIDs {
@ -388,42 +347,23 @@ func (g *GroupCacheRedis) GetGroupRolesLevelMemberInfo(ctx context.Context, grou
return g.GetGroupMembersInfo(ctx, groupID, userIDs)
}
func (g *GroupCacheRedis) FindGroupMemberUser(ctx context.Context, groupIDs []string, userID string) (_ []*model.GroupMember, err error) {
func (g *GroupCacheRedis) FindGroupMemberUser(ctx context.Context, groupIDs []string, userID string) ([]*model.GroupMember, error) {
if len(groupIDs) == 0 {
var err error
groupIDs, err = g.GetJoinedGroupIDs(ctx, userID)
if err != nil {
return nil, err
}
}
return batchGetCache(ctx, g.rcClient, g.expireTime, groupIDs, func(groupID string) string {
return batchGetCache2(ctx, g.rcClient, g.expireTime, groupIDs, func(groupID string) string {
return g.getGroupMemberInfoKey(groupID, userID)
}, func(ctx context.Context, groupID string) (*model.GroupMember, error) {
return g.groupMemberDB.Take(ctx, groupID, userID)
}, func(member *model.GroupMember) string {
return member.GroupID
}, func(ctx context.Context, groupIDs []string) ([]*model.GroupMember, error) {
return g.groupMemberDB.FindInGroup(ctx, userID, groupIDs)
})
}
//func (g *GroupCacheRedis) FindSortGroupMemberUserIDs(ctx context.Context, groupID string) ([]string, error) {
// userIDs, err := g.GetGroupMemberIDs(ctx, groupID)
// if err != nil {
// return nil, err
// }
// if len(userIDs) > g.syncCount {
// userIDs = userIDs[:g.syncCount]
// }
// return userIDs, nil
//}
//
//func (g *GroupCacheRedis) FindSortJoinGroupIDs(ctx context.Context, userID string) ([]string, error) {
// groupIDs, err := g.GetJoinedGroupIDs(ctx, userID)
// if err != nil {
// return nil, err
// }
// if len(groupIDs) > g.syncCount {
// groupIDs = groupIDs[:g.syncCount]
// }
// return groupIDs, nil
//}
func (g *GroupCacheRedis) DelMaxGroupMemberVersion(groupIDs ...string) cache.GroupCache {
keys := make([]string, 0, len(groupIDs))
for _, groupID := range groupIDs {

@ -183,5 +183,4 @@ func (c *msgCache) GetMessagesBySeq(ctx context.Context, conversationID string,
return nil, nil, err
}
return seqMsgs, failedSeqs, nil
}

@ -0,0 +1,89 @@
package redis
import (
"context"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
"github.com/openimsdk/tools/errs"
"github.com/redis/go-redis/v9"
"strconv"
"time"
)
func NewUserOnline(rdb redis.UniversalClient) cache.OnlineCache {
return &userOnline{
rdb: rdb,
expire: cachekey.OnlineExpire,
channelName: cachekey.OnlineChannel,
}
}
type userOnline struct {
rdb redis.UniversalClient
expire time.Duration
channelName string
}
func (s *userOnline) getUserOnlineKey(userID string) string {
return cachekey.GetOnlineKey(userID)
}
func (s *userOnline) GetOnline(ctx context.Context, userID string) ([]int32, error) {
members, err := s.rdb.ZRangeByScore(ctx, s.getUserOnlineKey(userID), &redis.ZRangeBy{
Min: strconv.FormatInt(time.Now().Unix(), 10),
Max: "+inf",
}).Result()
if err != nil {
return nil, errs.Wrap(err)
}
platformIDs := make([]int32, 0, len(members))
for _, member := range members {
val, err := strconv.Atoi(member)
if err != nil {
return nil, errs.Wrap(err)
}
platformIDs = append(platformIDs, int32(val))
}
return platformIDs, nil
}
func (s *userOnline) SetUserOnline(ctx context.Context, userID string, online, offline []int32) error {
script := `
local key = KEYS[1]
local score = ARGV[3]
local num1 = redis.call("ZCARD", key)
redis.call("ZREMRANGEBYSCORE", key, "-inf", ARGV[2])
for i = 5, tonumber(ARGV[4])+4 do
redis.call("ZREM", key, ARGV[i])
end
local num2 = redis.call("ZCARD", key)
for i = 5+tonumber(ARGV[4]), #ARGV do
redis.call("ZADD", key, score, ARGV[i])
end
redis.call("EXPIRE", key, ARGV[1])
local num3 = redis.call("ZCARD", key)
local change = (num1 ~= num2) or (num2 ~= num3)
if change then
local members = redis.call("ZRANGE", key, 0, -1)
table.insert(members, KEYS[2])
redis.call("PUBLISH", KEYS[3], table.concat(members, ":"))
return 1
else
return 0
end
`
now := time.Now()
argv := make([]any, 0, 2+len(online)+len(offline))
argv = append(argv, int32(s.expire/time.Second), now.Unix(), now.Add(s.expire).Unix(), int32(len(offline)))
for _, platformID := range offline {
argv = append(argv, platformID)
}
for _, platformID := range online {
argv = append(argv, platformID)
}
keys := []string{s.getUserOnlineKey(userID), userID, s.channelName}
if err := s.rdb.Eval(ctx, script, keys, argv).Err(); err != nil {
return err
}
return nil
}

@ -2,6 +2,7 @@ package redis
import (
"context"
"github.com/dtm-labs/rockscache"
"github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log"
"github.com/redis/go-redis/v9"
@ -109,7 +110,7 @@ func (rsm *RedisShardManager) ProcessKeysBySlot(
func groupKeysBySlot(ctx context.Context, redisClient redis.UniversalClient, keys []string) (map[int64][]string, error) {
slots := make(map[int64][]string)
clusterClient, isCluster := redisClient.(*redis.ClusterClient)
if isCluster {
if isCluster && len(keys) > 1 {
pipe := clusterClient.Pipeline()
cmds := make([]*redis.IntCmd, len(keys))
for i, key := range keys {
@ -195,3 +196,16 @@ func ProcessKeysBySlot(
}
return nil
}
func DeleteCacheBySlot(ctx context.Context, rcClient *rockscache.Client, keys []string) error {
switch len(keys) {
case 0:
return nil
case 1:
return rcClient.TagAsDeletedBatch2(ctx, keys)
default:
return ProcessKeysBySlot(ctx, getRocksCacheRedisClient(rcClient), keys, func(ctx context.Context, slot int64, keys []string) error {
return rcClient.TagAsDeletedBatch2(ctx, keys)
})
}
}

@ -1,200 +0,0 @@
// Copyright © 2023 OpenIM. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package redis
import (
"context"
"errors"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
"github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/utils/stringutil"
"github.com/redis/go-redis/v9"
"sync"
)
func NewSeqCache(rdb redis.UniversalClient) cache.SeqCache {
return &seqCache{rdb: rdb}
}
type seqCache struct {
rdb redis.UniversalClient
}
func (c *seqCache) getMaxSeqKey(conversationID string) string {
return cachekey.GetMaxSeqKey(conversationID)
}
func (c *seqCache) getMinSeqKey(conversationID string) string {
return cachekey.GetMinSeqKey(conversationID)
}
func (c *seqCache) getHasReadSeqKey(conversationID string, userID string) string {
return cachekey.GetHasReadSeqKey(conversationID, userID)
}
func (c *seqCache) getConversationUserMinSeqKey(conversationID, userID string) string {
return cachekey.GetConversationUserMinSeqKey(conversationID, userID)
}
func (c *seqCache) setSeq(ctx context.Context, conversationID string, seq int64, getkey func(conversationID string) string) error {
return errs.Wrap(c.rdb.Set(ctx, getkey(conversationID), seq, 0).Err())
}
func (c *seqCache) getSeq(ctx context.Context, conversationID string, getkey func(conversationID string) string) (int64, error) {
val, err := c.rdb.Get(ctx, getkey(conversationID)).Int64()
if err != nil {
return 0, errs.Wrap(err)
}
return val, nil
}
func (c *seqCache) getSeqs(ctx context.Context, items []string, getkey func(s string) string) (m map[string]int64, err error) {
m = make(map[string]int64, len(items))
var (
reverseMap = make(map[string]string, len(items))
keys = make([]string, len(items))
lock sync.Mutex
)
for i, v := range items {
keys[i] = getkey(v)
reverseMap[getkey(v)] = v
}
manager := NewRedisShardManager(c.rdb)
if err = manager.ProcessKeysBySlot(ctx, keys, func(ctx context.Context, _ int64, keys []string) error {
res, err := c.rdb.MGet(ctx, keys...).Result()
if err != nil && !errors.Is(err, redis.Nil) {
return errs.Wrap(err)
}
// len(res) <= len(items)
for i := range res {
strRes, ok := res[i].(string)
if !ok {
continue
}
val := stringutil.StringToInt64(strRes)
if val != 0 {
lock.Lock()
m[reverseMap[keys[i]]] = val
lock.Unlock()
}
}
return nil
}); err != nil {
return nil, err
}
return m, nil
}
func (c *seqCache) SetMaxSeq(ctx context.Context, conversationID string, maxSeq int64) error {
return c.setSeq(ctx, conversationID, maxSeq, c.getMaxSeqKey)
}
func (c *seqCache) GetMaxSeqs(ctx context.Context, conversationIDs []string) (m map[string]int64, err error) {
return c.getSeqs(ctx, conversationIDs, c.getMaxSeqKey)
}
func (c *seqCache) GetMaxSeq(ctx context.Context, conversationID string) (int64, error) {
return c.getSeq(ctx, conversationID, c.getMaxSeqKey)
}
func (c *seqCache) SetMinSeq(ctx context.Context, conversationID string, minSeq int64) error {
return c.setSeq(ctx, conversationID, minSeq, c.getMinSeqKey)
}
func (c *seqCache) setSeqs(ctx context.Context, seqs map[string]int64, getkey func(key string) string) error {
for conversationID, seq := range seqs {
if err := c.rdb.Set(ctx, getkey(conversationID), seq, 0).Err(); err != nil {
return errs.Wrap(err)
}
}
return nil
}
func (c *seqCache) SetMinSeqs(ctx context.Context, seqs map[string]int64) error {
return c.setSeqs(ctx, seqs, c.getMinSeqKey)
}
func (c *seqCache) GetMinSeqs(ctx context.Context, conversationIDs []string) (map[string]int64, error) {
return c.getSeqs(ctx, conversationIDs, c.getMinSeqKey)
}
func (c *seqCache) GetMinSeq(ctx context.Context, conversationID string) (int64, error) {
return c.getSeq(ctx, conversationID, c.getMinSeqKey)
}
func (c *seqCache) GetConversationUserMinSeq(ctx context.Context, conversationID string, userID string) (int64, error) {
val, err := c.rdb.Get(ctx, c.getConversationUserMinSeqKey(conversationID, userID)).Int64()
if err != nil {
return 0, errs.Wrap(err)
}
return val, nil
}
func (c *seqCache) GetConversationUserMinSeqs(ctx context.Context, conversationID string, userIDs []string) (m map[string]int64, err error) {
return c.getSeqs(ctx, userIDs, func(userID string) string {
return c.getConversationUserMinSeqKey(conversationID, userID)
})
}
func (c *seqCache) SetConversationUserMinSeq(ctx context.Context, conversationID string, userID string, minSeq int64) error {
return errs.Wrap(c.rdb.Set(ctx, c.getConversationUserMinSeqKey(conversationID, userID), minSeq, 0).Err())
}
func (c *seqCache) SetConversationUserMinSeqs(ctx context.Context, conversationID string, seqs map[string]int64) (err error) {
return c.setSeqs(ctx, seqs, func(userID string) string {
return c.getConversationUserMinSeqKey(conversationID, userID)
})
}
func (c *seqCache) SetUserConversationsMinSeqs(ctx context.Context, userID string, seqs map[string]int64) (err error) {
return c.setSeqs(ctx, seqs, func(conversationID string) string {
return c.getConversationUserMinSeqKey(conversationID, userID)
})
}
func (c *seqCache) SetHasReadSeq(ctx context.Context, userID string, conversationID string, hasReadSeq int64) error {
return errs.Wrap(c.rdb.Set(ctx, c.getHasReadSeqKey(conversationID, userID), hasReadSeq, 0).Err())
}
func (c *seqCache) SetHasReadSeqs(ctx context.Context, conversationID string, hasReadSeqs map[string]int64) error {
return c.setSeqs(ctx, hasReadSeqs, func(userID string) string {
return c.getHasReadSeqKey(conversationID, userID)
})
}
func (c *seqCache) UserSetHasReadSeqs(ctx context.Context, userID string, hasReadSeqs map[string]int64) error {
return c.setSeqs(ctx, hasReadSeqs, func(conversationID string) string {
return c.getHasReadSeqKey(conversationID, userID)
})
}
func (c *seqCache) GetHasReadSeqs(ctx context.Context, userID string, conversationIDs []string) (map[string]int64, error) {
return c.getSeqs(ctx, conversationIDs, func(conversationID string) string {
return c.getHasReadSeqKey(conversationID, userID)
})
}
func (c *seqCache) GetHasReadSeq(ctx context.Context, userID string, conversationID string) (int64, error) {
val, err := c.rdb.Get(ctx, c.getHasReadSeqKey(conversationID, userID)).Int64()
if err != nil {
return 0, err
}
return val, nil
}

@ -0,0 +1,333 @@
package redis
import (
"context"
"errors"
"fmt"
"github.com/dtm-labs/rockscache"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
"github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log"
"github.com/redis/go-redis/v9"
"time"
)
func NewSeqConversationCacheRedis(rdb redis.UniversalClient, mgo database.SeqConversation) cache.SeqConversationCache {
return &seqConversationCacheRedis{
rdb: rdb,
mgo: mgo,
lockTime: time.Second * 3,
dataTime: time.Hour * 24 * 365,
minSeqExpireTime: time.Hour,
rocks: rockscache.NewClient(rdb, *GetRocksCacheOptions()),
}
}
type seqConversationCacheRedis struct {
rdb redis.UniversalClient
mgo database.SeqConversation
rocks *rockscache.Client
lockTime time.Duration
dataTime time.Duration
minSeqExpireTime time.Duration
}
func (s *seqConversationCacheRedis) getMinSeqKey(conversationID string) string {
return cachekey.GetMallocMinSeqKey(conversationID)
}
func (s *seqConversationCacheRedis) SetMinSeq(ctx context.Context, conversationID string, seq int64) error {
return s.SetMinSeqs(ctx, map[string]int64{conversationID: seq})
}
func (s *seqConversationCacheRedis) GetMinSeq(ctx context.Context, conversationID string) (int64, error) {
return getCache(ctx, s.rocks, s.getMinSeqKey(conversationID), s.minSeqExpireTime, func(ctx context.Context) (int64, error) {
return s.mgo.GetMinSeq(ctx, conversationID)
})
}
func (s *seqConversationCacheRedis) getSingleMaxSeq(ctx context.Context, conversationID string) (map[string]int64, error) {
seq, err := s.GetMaxSeq(ctx, conversationID)
if err != nil {
return nil, err
}
return map[string]int64{conversationID: seq}, nil
}
func (s *seqConversationCacheRedis) batchGetMaxSeq(ctx context.Context, keys []string, keyConversationID map[string]string, seqs map[string]int64) error {
result := make([]*redis.StringCmd, len(keys))
pipe := s.rdb.Pipeline()
for i, key := range keys {
result[i] = pipe.HGet(ctx, key, "CURR")
}
if _, err := pipe.Exec(ctx); err != nil {
return errs.Wrap(err)
}
var notFoundKey []string
for i, r := range result {
req, err := r.Int64()
if err == nil {
seqs[keyConversationID[keys[i]]] = req
} else if errors.Is(err, redis.Nil) {
notFoundKey = append(notFoundKey, keys[i])
} else {
return errs.Wrap(err)
}
}
if len(notFoundKey) > 0 {
conversationID := keyConversationID[notFoundKey[0]]
seq, err := s.GetMaxSeq(ctx, conversationID)
if err != nil {
return err
}
seqs[conversationID] = seq
}
return nil
}
func (s *seqConversationCacheRedis) GetMaxSeqs(ctx context.Context, conversationIDs []string) (map[string]int64, error) {
switch len(conversationIDs) {
case 0:
return map[string]int64{}, nil
case 1:
return s.getSingleMaxSeq(ctx, conversationIDs[0])
}
keys := make([]string, 0, len(conversationIDs))
keyConversationID := make(map[string]string, len(conversationIDs))
for _, conversationID := range conversationIDs {
key := s.getSeqMallocKey(conversationID)
if _, ok := keyConversationID[key]; ok {
continue
}
keys = append(keys, key)
keyConversationID[key] = conversationID
}
if len(keys) == 1 {
return s.getSingleMaxSeq(ctx, conversationIDs[0])
}
slotKeys, err := groupKeysBySlot(ctx, s.rdb, keys)
if err != nil {
return nil, err
}
seqs := make(map[string]int64, len(conversationIDs))
for _, keys := range slotKeys {
if err := s.batchGetMaxSeq(ctx, keys, keyConversationID, seqs); err != nil {
return nil, err
}
}
return seqs, nil
}
func (s *seqConversationCacheRedis) getSeqMallocKey(conversationID string) string {
return cachekey.GetMallocSeqKey(conversationID)
}
func (s *seqConversationCacheRedis) setSeq(ctx context.Context, key string, owner int64, currSeq int64, lastSeq int64) (int64, error) {
if lastSeq < currSeq {
return 0, errs.New("lastSeq must be greater than currSeq")
}
// 0: success
// 1: success the lock has expired, but has not been locked by anyone else
// 2: already locked, but not by yourself
script := `
local key = KEYS[1]
local lockValue = ARGV[1]
local dataSecond = ARGV[2]
local curr_seq = tonumber(ARGV[3])
local last_seq = tonumber(ARGV[4])
if redis.call("EXISTS", key) == 0 then
redis.call("HSET", key, "CURR", curr_seq, "LAST", last_seq)
redis.call("EXPIRE", key, dataSecond)
return 1
end
if redis.call("HGET", key, "LOCK") ~= lockValue then
return 2
end
redis.call("HDEL", key, "LOCK")
redis.call("HSET", key, "CURR", curr_seq, "LAST", last_seq)
redis.call("EXPIRE", key, dataSecond)
return 0
`
result, err := s.rdb.Eval(ctx, script, []string{key}, owner, int64(s.dataTime/time.Second), currSeq, lastSeq).Int64()
if err != nil {
return 0, errs.Wrap(err)
}
return result, nil
}
// malloc size=0 is to get the current seq size>0 is to allocate seq
func (s *seqConversationCacheRedis) malloc(ctx context.Context, key string, size int64) ([]int64, error) {
// 0: success
// 1: need to obtain and lock
// 2: already locked
// 3: exceeded the maximum value and locked
script := `
local key = KEYS[1]
local size = tonumber(ARGV[1])
local lockSecond = ARGV[2]
local dataSecond = ARGV[3]
local result = {}
if redis.call("EXISTS", key) == 0 then
local lockValue = math.random(0, 999999999)
redis.call("HSET", key, "LOCK", lockValue)
redis.call("EXPIRE", key, lockSecond)
table.insert(result, 1)
table.insert(result, lockValue)
return result
end
if redis.call("HEXISTS", key, "LOCK") == 1 then
table.insert(result, 2)
return result
end
local curr_seq = tonumber(redis.call("HGET", key, "CURR"))
local last_seq = tonumber(redis.call("HGET", key, "LAST"))
if size == 0 then
redis.call("EXPIRE", key, dataSecond)
table.insert(result, 0)
table.insert(result, curr_seq)
table.insert(result, last_seq)
return result
end
local max_seq = curr_seq + size
if max_seq > last_seq then
local lockValue = math.random(0, 999999999)
redis.call("HSET", key, "LOCK", lockValue)
redis.call("HSET", key, "CURR", last_seq)
redis.call("EXPIRE", key, lockSecond)
table.insert(result, 3)
table.insert(result, curr_seq)
table.insert(result, last_seq)
table.insert(result, lockValue)
return result
end
redis.call("HSET", key, "CURR", max_seq)
redis.call("EXPIRE", key, dataSecond)
table.insert(result, 0)
table.insert(result, curr_seq)
table.insert(result, last_seq)
return result
`
result, err := s.rdb.Eval(ctx, script, []string{key}, size, int64(s.lockTime/time.Second), int64(s.dataTime/time.Second)).Int64Slice()
if err != nil {
return nil, errs.Wrap(err)
}
return result, nil
}
func (s *seqConversationCacheRedis) wait(ctx context.Context) error {
timer := time.NewTimer(time.Second / 4)
defer timer.Stop()
select {
case <-timer.C:
return nil
case <-ctx.Done():
return ctx.Err()
}
}
func (s *seqConversationCacheRedis) setSeqRetry(ctx context.Context, key string, owner int64, currSeq int64, lastSeq int64) {
for i := 0; i < 10; i++ {
state, err := s.setSeq(ctx, key, owner, currSeq, lastSeq)
if err != nil {
log.ZError(ctx, "set seq cache failed", err, "key", key, "owner", owner, "currSeq", currSeq, "lastSeq", lastSeq, "count", i+1)
if err := s.wait(ctx); err != nil {
return
}
continue
}
switch state {
case 0: // ideal state
case 1:
log.ZWarn(ctx, "set seq cache lock not found", nil, "key", key, "owner", owner, "currSeq", currSeq, "lastSeq", lastSeq)
case 2:
log.ZWarn(ctx, "set seq cache lock to be held by someone else", nil, "key", key, "owner", owner, "currSeq", currSeq, "lastSeq", lastSeq)
default:
log.ZError(ctx, "set seq cache lock unknown state", nil, "key", key, "owner", owner, "currSeq", currSeq, "lastSeq", lastSeq)
}
return
}
log.ZError(ctx, "set seq cache retrying still failed", nil, "key", key, "owner", owner, "currSeq", currSeq, "lastSeq", lastSeq)
}
func (s *seqConversationCacheRedis) getMallocSize(conversationID string, size int64) int64 {
if size == 0 {
return 0
}
var basicSize int64
if msgprocessor.IsGroupConversationID(conversationID) {
basicSize = 100
} else {
basicSize = 50
}
basicSize += size
return basicSize
}
func (s *seqConversationCacheRedis) Malloc(ctx context.Context, conversationID string, size int64) (int64, error) {
if size < 0 {
return 0, errs.New("size must be greater than 0")
}
key := s.getSeqMallocKey(conversationID)
for i := 0; i < 10; i++ {
states, err := s.malloc(ctx, key, size)
if err != nil {
return 0, err
}
switch states[0] {
case 0: // success
return states[1], nil
case 1: // not found
mallocSize := s.getMallocSize(conversationID, size)
seq, err := s.mgo.Malloc(ctx, conversationID, mallocSize)
if err != nil {
return 0, err
}
s.setSeqRetry(ctx, key, states[1], seq+size, seq+mallocSize)
return seq, nil
case 2: // locked
if err := s.wait(ctx); err != nil {
return 0, err
}
continue
case 3: // exceeded cache max value
currSeq := states[1]
lastSeq := states[2]
mallocSize := s.getMallocSize(conversationID, size)
seq, err := s.mgo.Malloc(ctx, conversationID, mallocSize)
if err != nil {
return 0, err
}
if lastSeq == seq {
s.setSeqRetry(ctx, key, states[3], currSeq+size, seq+mallocSize)
return currSeq, nil
} else {
log.ZWarn(ctx, "malloc seq not equal cache last seq", nil, "conversationID", conversationID, "currSeq", currSeq, "lastSeq", lastSeq, "mallocSeq", seq)
s.setSeqRetry(ctx, key, states[3], seq+size, seq+mallocSize)
return seq, nil
}
default:
log.ZError(ctx, "malloc seq unknown state", nil, "state", states[0], "conversationID", conversationID, "size", size)
return 0, errs.New(fmt.Sprintf("unknown state: %d", states[0]))
}
}
log.ZError(ctx, "malloc seq retrying still failed", nil, "conversationID", conversationID, "size", size)
return 0, errs.New("malloc seq waiting for lock timeout", "conversationID", conversationID, "size", size)
}
func (s *seqConversationCacheRedis) GetMaxSeq(ctx context.Context, conversationID string) (int64, error) {
return s.Malloc(ctx, conversationID, 0)
}
func (s *seqConversationCacheRedis) SetMinSeqs(ctx context.Context, seqs map[string]int64) error {
keys := make([]string, 0, len(seqs))
for conversationID, seq := range seqs {
keys = append(keys, s.getMinSeqKey(conversationID))
if err := s.mgo.SetMinSeq(ctx, conversationID, seq); err != nil {
return err
}
}
return DeleteCacheBySlot(ctx, s.rocks, keys)
}

@ -0,0 +1,109 @@
package redis
import (
"context"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
"github.com/redis/go-redis/v9"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
"strconv"
"sync"
"sync/atomic"
"testing"
"time"
)
func newTestSeq() *seqConversationCacheRedis {
mgocli, err := mongo.Connect(context.Background(), options.Client().ApplyURI("mongodb://openIM:openIM123@172.16.8.48:37017/openim_v3?maxPoolSize=100").SetConnectTimeout(5*time.Second))
if err != nil {
panic(err)
}
model, err := mgo.NewSeqConversationMongo(mgocli.Database("openim_v3"))
if err != nil {
panic(err)
}
opt := &redis.Options{
Addr: "172.16.8.48:16379",
Password: "openIM123",
DB: 1,
}
rdb := redis.NewClient(opt)
if err := rdb.Ping(context.Background()).Err(); err != nil {
panic(err)
}
return NewSeqConversationCacheRedis(rdb, model).(*seqConversationCacheRedis)
}
func TestSeq(t *testing.T) {
ts := newTestSeq()
var (
wg sync.WaitGroup
speed atomic.Int64
)
const count = 128
wg.Add(count)
for i := 0; i < count; i++ {
index := i + 1
go func() {
defer wg.Done()
var size int64 = 10
cID := strconv.Itoa(index * 1)
for i := 1; ; i++ {
//first, err := ts.mgo.Malloc(context.Background(), cID, size) // mongo
first, err := ts.Malloc(context.Background(), cID, size) // redis
if err != nil {
t.Logf("[%d-%d] %s %s", index, i, cID, err)
return
}
speed.Add(size)
_ = first
//t.Logf("[%d] %d -> %d", i, first+1, first+size)
}
}()
}
done := make(chan struct{})
go func() {
wg.Wait()
close(done)
}()
ticker := time.NewTicker(time.Second)
for {
select {
case <-done:
ticker.Stop()
return
case <-ticker.C:
value := speed.Swap(0)
t.Logf("speed: %d/s", value)
}
}
}
func TestDel(t *testing.T) {
ts := newTestSeq()
for i := 1; i < 100; i++ {
var size int64 = 100
first, err := ts.Malloc(context.Background(), "100", size)
if err != nil {
t.Logf("[%d] %s", i, err)
return
}
t.Logf("[%d] %d -> %d", i, first+1, first+size)
time.Sleep(time.Second)
}
}
func TestSeqMalloc(t *testing.T) {
ts := newTestSeq()
t.Log(ts.GetMaxSeq(context.Background(), "100"))
}
func TestMinSeq(t *testing.T) {
ts := newTestSeq()
t.Log(ts.GetMinSeq(context.Background(), "10000000"))
}

@ -0,0 +1,185 @@
package redis
import (
"context"
"github.com/dtm-labs/rockscache"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
"github.com/openimsdk/tools/errs"
"github.com/redis/go-redis/v9"
"strconv"
"time"
)
func NewSeqUserCacheRedis(rdb redis.UniversalClient, mgo database.SeqUser) cache.SeqUser {
return &seqUserCacheRedis{
rdb: rdb,
mgo: mgo,
readSeqWriteRatio: 100,
expireTime: time.Hour * 24 * 7,
readExpireTime: time.Hour * 24 * 30,
rocks: rockscache.NewClient(rdb, *GetRocksCacheOptions()),
}
}
type seqUserCacheRedis struct {
rdb redis.UniversalClient
mgo database.SeqUser
rocks *rockscache.Client
expireTime time.Duration
readExpireTime time.Duration
readSeqWriteRatio int64
}
func (s *seqUserCacheRedis) getSeqUserMaxSeqKey(conversationID string, userID string) string {
return cachekey.GetSeqUserMaxSeqKey(conversationID, userID)
}
func (s *seqUserCacheRedis) getSeqUserMinSeqKey(conversationID string, userID string) string {
return cachekey.GetSeqUserMinSeqKey(conversationID, userID)
}
func (s *seqUserCacheRedis) getSeqUserReadSeqKey(conversationID string, userID string) string {
return cachekey.GetSeqUserReadSeqKey(conversationID, userID)
}
func (s *seqUserCacheRedis) GetMaxSeq(ctx context.Context, conversationID string, userID string) (int64, error) {
return getCache(ctx, s.rocks, s.getSeqUserMaxSeqKey(conversationID, userID), s.expireTime, func(ctx context.Context) (int64, error) {
return s.mgo.GetMaxSeq(ctx, conversationID, userID)
})
}
func (s *seqUserCacheRedis) SetMaxSeq(ctx context.Context, conversationID string, userID string, seq int64) error {
if err := s.mgo.SetMaxSeq(ctx, conversationID, userID, seq); err != nil {
return err
}
return s.rocks.TagAsDeleted2(ctx, s.getSeqUserMaxSeqKey(conversationID, userID))
}
func (s *seqUserCacheRedis) GetMinSeq(ctx context.Context, conversationID string, userID string) (int64, error) {
return getCache(ctx, s.rocks, s.getSeqUserMinSeqKey(conversationID, userID), s.expireTime, func(ctx context.Context) (int64, error) {
return s.mgo.GetMaxSeq(ctx, conversationID, userID)
})
}
func (s *seqUserCacheRedis) SetMinSeq(ctx context.Context, conversationID string, userID string, seq int64) error {
return s.SetMinSeqs(ctx, userID, map[string]int64{conversationID: seq})
}
func (s *seqUserCacheRedis) GetReadSeq(ctx context.Context, conversationID string, userID string) (int64, error) {
return getCache(ctx, s.rocks, s.getSeqUserReadSeqKey(conversationID, userID), s.readExpireTime, func(ctx context.Context) (int64, error) {
return s.mgo.GetMaxSeq(ctx, conversationID, userID)
})
}
func (s *seqUserCacheRedis) SetReadSeq(ctx context.Context, conversationID string, userID string, seq int64) error {
if seq%s.readSeqWriteRatio == 0 {
if err := s.mgo.SetReadSeq(ctx, conversationID, userID, seq); err != nil {
return err
}
}
if err := s.rocks.RawSet(ctx, s.getSeqUserReadSeqKey(conversationID, userID), strconv.Itoa(int(seq)), s.readExpireTime); err != nil {
return errs.Wrap(err)
}
return nil
}
func (s *seqUserCacheRedis) SetMinSeqs(ctx context.Context, userID string, seqs map[string]int64) error {
keys := make([]string, 0, len(seqs))
for conversationID, seq := range seqs {
if err := s.mgo.SetMinSeq(ctx, conversationID, userID, seq); err != nil {
return err
}
keys = append(keys, s.getSeqUserMinSeqKey(conversationID, userID))
}
return DeleteCacheBySlot(ctx, s.rocks, keys)
}
func (s *seqUserCacheRedis) setRedisReadSeqs(ctx context.Context, userID string, seqs map[string]int64) error {
keys := make([]string, 0, len(seqs))
keySeq := make(map[string]int64)
for conversationID, seq := range seqs {
key := s.getSeqUserReadSeqKey(conversationID, userID)
keys = append(keys, key)
keySeq[key] = seq
}
slotKeys, err := groupKeysBySlot(ctx, s.rdb, keys)
if err != nil {
return err
}
for _, keys := range slotKeys {
pipe := s.rdb.Pipeline()
for _, key := range keys {
pipe.HSet(ctx, key, "value", strconv.FormatInt(keySeq[key], 10))
pipe.Expire(ctx, key, s.readExpireTime)
}
if _, err := pipe.Exec(ctx); err != nil {
return err
}
}
return nil
}
func (s *seqUserCacheRedis) SetReadSeqs(ctx context.Context, userID string, seqs map[string]int64) error {
if len(seqs) == 0 {
return nil
}
if err := s.setRedisReadSeqs(ctx, userID, seqs); err != nil {
return err
}
for conversationID, seq := range seqs {
if seq%s.readSeqWriteRatio == 0 {
if err := s.mgo.SetReadSeq(ctx, conversationID, userID, seq); err != nil {
return err
}
}
}
return nil
}
func (s *seqUserCacheRedis) GetReadSeqs(ctx context.Context, userID string, conversationIDs []string) (map[string]int64, error) {
res, err := batchGetCache2(ctx, s.rocks, s.readExpireTime, conversationIDs, func(conversationID string) string {
return s.getSeqUserReadSeqKey(conversationID, userID)
}, func(v *readSeqModel) string {
return v.ConversationID
}, func(ctx context.Context, conversationIDs []string) ([]*readSeqModel, error) {
seqs, err := s.mgo.GetReadSeqs(ctx, userID, conversationIDs)
if err != nil {
return nil, err
}
res := make([]*readSeqModel, 0, len(seqs))
for conversationID, seq := range seqs {
res = append(res, &readSeqModel{ConversationID: conversationID, Seq: seq})
}
return res, nil
})
if err != nil {
return nil, err
}
data := make(map[string]int64)
for _, v := range res {
data[v.ConversationID] = v.Seq
}
return data, nil
}
var _ BatchCacheCallback[string] = (*readSeqModel)(nil)
type readSeqModel struct {
ConversationID string
Seq int64
}
func (r *readSeqModel) BatchCache(conversationID string) {
r.ConversationID = conversationID
}
func (r *readSeqModel) UnmarshalJSON(bytes []byte) (err error) {
r.Seq, err = strconv.ParseInt(string(bytes), 10, 64)
return
}
func (r *readSeqModel) MarshalJSON() ([]byte, error) {
return []byte(strconv.FormatInt(r.Seq, 10)), nil
}

@ -0,0 +1,79 @@
package redis
import (
"context"
"fmt"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
"github.com/redis/go-redis/v9"
"log"
"strconv"
"sync/atomic"
"testing"
"time"
)
func newTestOnline() *userOnline {
opt := &redis.Options{
Addr: "172.16.8.48:16379",
Password: "openIM123",
DB: 0,
}
rdb := redis.NewClient(opt)
if err := rdb.Ping(context.Background()).Err(); err != nil {
panic(err)
}
return &userOnline{rdb: rdb, expire: time.Hour, channelName: "user_online"}
}
func TestOnline(t *testing.T) {
ts := newTestOnline()
var count atomic.Int64
for i := 0; i < 64; i++ {
go func(userID string) {
var err error
for i := 0; ; i++ {
if i%2 == 0 {
err = ts.SetUserOnline(context.Background(), userID, []int32{5, 6}, []int32{7, 8, 9})
} else {
err = ts.SetUserOnline(context.Background(), userID, []int32{1, 2, 3}, []int32{4, 5, 6})
}
if err != nil {
panic(err)
}
count.Add(1)
}
}(strconv.Itoa(10000 + i))
}
ticker := time.NewTicker(time.Second)
for range ticker.C {
t.Log(count.Swap(0))
}
}
func TestGetOnline(t *testing.T) {
ts := newTestOnline()
ctx := context.Background()
pIDs, err := ts.GetOnline(ctx, "10000")
if err != nil {
panic(err)
}
t.Log(pIDs)
}
func TestRecvOnline(t *testing.T) {
ts := newTestOnline()
ctx := context.Background()
pubsub := ts.rdb.Subscribe(ctx, cachekey.OnlineChannel)
_, err := pubsub.Receive(ctx)
if err != nil {
log.Fatalf("Could not subscribe: %v", err)
}
ch := pubsub.Channel()
for msg := range ch {
fmt.Printf("Received message from channel %s: %s\n", msg.Channel, msg.Payload)
}
}

@ -16,21 +16,14 @@ package redis
import (
"context"
"encoding/json"
"errors"
"github.com/dtm-labs/rockscache"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
"github.com/openimsdk/protocol/constant"
"github.com/openimsdk/protocol/user"
"github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log"
"github.com/redis/go-redis/v9"
"hash/crc32"
"strconv"
"time"
)
@ -61,8 +54,8 @@ func NewUserCacheRedis(rdb redis.UniversalClient, localCache *config.LocalCache,
}
}
func (u *UserCacheRedis) getOnlineStatusKey(modKey string) string {
return cachekey.GetOnlineStatusKey(modKey)
func (u *UserCacheRedis) getUserID(user *model.User) string {
return user.UserID
}
func (u *UserCacheRedis) CloneUserCache() cache.UserCache {
@ -90,11 +83,7 @@ func (u *UserCacheRedis) GetUserInfo(ctx context.Context, userID string) (userIn
}
func (u *UserCacheRedis) GetUsersInfo(ctx context.Context, userIDs []string) ([]*model.User, error) {
return batchGetCache(ctx, u.rcClient, u.expireTime, userIDs, func(userID string) string {
return u.getUserInfoKey(userID)
}, func(ctx context.Context, userID string) (*model.User, error) {
return u.userDB.Take(ctx, userID)
})
return batchGetCache2(ctx, u.rcClient, u.expireTime, userIDs, u.getUserInfoKey, u.getUserID, u.userDB.Find)
}
func (u *UserCacheRedis) DelUsersInfo(userIDs ...string) cache.UserCache {
@ -130,174 +119,3 @@ func (u *UserCacheRedis) DelUsersGlobalRecvMsgOpt(userIDs ...string) cache.UserC
return cache
}
// GetUserStatus get user status.
func (u *UserCacheRedis) GetUserStatus(ctx context.Context, userIDs []string) ([]*user.OnlineStatus, error) {
userStatus := make([]*user.OnlineStatus, 0, len(userIDs))
for _, userID := range userIDs {
UserIDNum := crc32.ChecksumIEEE([]byte(userID))
modKey := strconv.Itoa(int(UserIDNum % statusMod))
var onlineStatus user.OnlineStatus
key := u.getOnlineStatusKey(modKey)
result, err := u.rdb.HGet(ctx, key, userID).Result()
if err != nil {
if errors.Is(err, redis.Nil) {
// key or field does not exist
userStatus = append(userStatus, &user.OnlineStatus{
UserID: userID,
Status: constant.Offline,
PlatformIDs: nil,
})
continue
} else {
return nil, errs.Wrap(err)
}
}
err = json.Unmarshal([]byte(result), &onlineStatus)
if err != nil {
return nil, errs.Wrap(err)
}
onlineStatus.UserID = userID
onlineStatus.Status = constant.Online
userStatus = append(userStatus, &onlineStatus)
}
return userStatus, nil
}
// SetUserStatus Set the user status and save it in redis.
func (u *UserCacheRedis) SetUserStatus(ctx context.Context, userID string, status, platformID int32) error {
UserIDNum := crc32.ChecksumIEEE([]byte(userID))
modKey := strconv.Itoa(int(UserIDNum % statusMod))
key := u.getOnlineStatusKey(modKey)
log.ZDebug(ctx, "SetUserStatus args", "userID", userID, "status", status, "platformID", platformID, "modKey", modKey, "key", key)
isNewKey, err := u.rdb.Exists(ctx, key).Result()
if err != nil {
return errs.Wrap(err)
}
if isNewKey == 0 {
if status == constant.Online {
onlineStatus := user.OnlineStatus{
UserID: userID,
Status: constant.Online,
PlatformIDs: []int32{platformID},
}
jsonData, err := json.Marshal(&onlineStatus)
if err != nil {
return errs.Wrap(err)
}
_, err = u.rdb.HSet(ctx, key, userID, string(jsonData)).Result()
if err != nil {
return errs.Wrap(err)
}
u.rdb.Expire(ctx, key, userOlineStatusExpireTime)
return nil
}
}
isNil := false
result, err := u.rdb.HGet(ctx, key, userID).Result()
if err != nil {
if errors.Is(err, redis.Nil) {
isNil = true
} else {
return errs.Wrap(err)
}
}
if status == constant.Offline {
err = u.refreshStatusOffline(ctx, userID, status, platformID, isNil, err, result, key)
if err != nil {
return err
}
} else {
err = u.refreshStatusOnline(ctx, userID, platformID, isNil, err, result, key)
if err != nil {
return errs.Wrap(err)
}
}
return nil
}
func (u *UserCacheRedis) refreshStatusOffline(ctx context.Context, userID string, status, platformID int32, isNil bool, err error, result, key string) error {
if isNil {
log.ZWarn(ctx, "this user not online,maybe trigger order not right",
err, "userStatus", status)
return nil
}
var onlineStatus user.OnlineStatus
err = json.Unmarshal([]byte(result), &onlineStatus)
if err != nil {
return errs.Wrap(err)
}
var newPlatformIDs []int32
for _, val := range onlineStatus.PlatformIDs {
if val != platformID {
newPlatformIDs = append(newPlatformIDs, val)
}
}
if newPlatformIDs == nil {
_, err = u.rdb.HDel(ctx, key, userID).Result()
if err != nil {
return errs.Wrap(err)
}
} else {
onlineStatus.PlatformIDs = newPlatformIDs
newjsonData, err := json.Marshal(&onlineStatus)
if err != nil {
return errs.Wrap(err)
}
_, err = u.rdb.HSet(ctx, key, userID, string(newjsonData)).Result()
if err != nil {
return errs.Wrap(err)
}
}
return nil
}
func (u *UserCacheRedis) refreshStatusOnline(ctx context.Context, userID string, platformID int32, isNil bool, err error, result, key string) error {
var onlineStatus user.OnlineStatus
if !isNil {
err := json.Unmarshal([]byte(result), &onlineStatus)
if err != nil {
return errs.Wrap(err)
}
onlineStatus.PlatformIDs = RemoveRepeatedElementsInList(append(onlineStatus.PlatformIDs, platformID))
} else {
onlineStatus.PlatformIDs = append(onlineStatus.PlatformIDs, platformID)
}
onlineStatus.Status = constant.Online
onlineStatus.UserID = userID
newjsonData, err := json.Marshal(&onlineStatus)
if err != nil {
return errs.WrapMsg(err, "json.Marshal failed")
}
_, err = u.rdb.HSet(ctx, key, userID, string(newjsonData)).Result()
if err != nil {
return errs.Wrap(err)
}
return nil
}
type Comparable interface {
~int | ~string | ~float64 | ~int32
}
func RemoveRepeatedElementsInList[T Comparable](slc []T) []T {
var result []T
tempMap := map[T]struct{}{}
for _, e := range slc {
if _, found := tempMap[e]; !found {
tempMap[e] = struct{}{}
result = append(result, e)
}
}
return result
}

@ -1,30 +0,0 @@
package cache
import (
"context"
)
type SeqCache interface {
SetMaxSeq(ctx context.Context, conversationID string, maxSeq int64) error
GetMaxSeqs(ctx context.Context, conversationIDs []string) (map[string]int64, error)
GetMaxSeq(ctx context.Context, conversationID string) (int64, error)
SetMinSeq(ctx context.Context, conversationID string, minSeq int64) error
SetMinSeqs(ctx context.Context, seqs map[string]int64) error
GetMinSeqs(ctx context.Context, conversationIDs []string) (map[string]int64, error)
GetMinSeq(ctx context.Context, conversationID string) (int64, error)
GetConversationUserMinSeq(ctx context.Context, conversationID string, userID string) (int64, error)
GetConversationUserMinSeqs(ctx context.Context, conversationID string, userIDs []string) (map[string]int64, error)
SetConversationUserMinSeq(ctx context.Context, conversationID string, userID string, minSeq int64) error
// seqs map: key userID value minSeq
SetConversationUserMinSeqs(ctx context.Context, conversationID string, seqs map[string]int64) (err error)
// seqs map: key conversationID value minSeq
SetUserConversationsMinSeqs(ctx context.Context, userID string, seqs map[string]int64) error
// has read seq
SetHasReadSeq(ctx context.Context, userID string, conversationID string, hasReadSeq int64) error
// k: user, v: seq
SetHasReadSeqs(ctx context.Context, conversationID string, hasReadSeqs map[string]int64) error
// k: conversation, v :seq
UserSetHasReadSeqs(ctx context.Context, userID string, hasReadSeqs map[string]int64) error
GetHasReadSeqs(ctx context.Context, userID string, conversationIDs []string) (map[string]int64, error)
GetHasReadSeq(ctx context.Context, userID string, conversationID string) (int64, error)
}

@ -0,0 +1,12 @@
package cache
import "context"
type SeqConversationCache interface {
Malloc(ctx context.Context, conversationID string, size int64) (int64, error)
GetMaxSeq(ctx context.Context, conversationID string) (int64, error)
SetMinSeq(ctx context.Context, conversationID string, seq int64) error
GetMinSeq(ctx context.Context, conversationID string) (int64, error)
GetMaxSeqs(ctx context.Context, conversationIDs []string) (map[string]int64, error)
SetMinSeqs(ctx context.Context, seqs map[string]int64) error
}

@ -0,0 +1,15 @@
package cache
import "context"
type SeqUser interface {
GetMaxSeq(ctx context.Context, conversationID string, userID string) (int64, error)
SetMaxSeq(ctx context.Context, conversationID string, userID string, seq int64) error
GetMinSeq(ctx context.Context, conversationID string, userID string) (int64, error)
SetMinSeq(ctx context.Context, conversationID string, userID string, seq int64) error
GetReadSeq(ctx context.Context, conversationID string, userID string) (int64, error)
SetReadSeq(ctx context.Context, conversationID string, userID string, seq int64) error
SetMinSeqs(ctx context.Context, userID string, seqs map[string]int64) error
SetReadSeqs(ctx context.Context, userID string, seqs map[string]int64) error
GetReadSeqs(ctx context.Context, userID string, conversationIDs []string) (map[string]int64, error)
}

@ -17,7 +17,6 @@ package cache
import (
"context"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
"github.com/openimsdk/protocol/user"
)
type UserCache interface {
@ -28,6 +27,6 @@ type UserCache interface {
DelUsersInfo(userIDs ...string) UserCache
GetUserGlobalRecvMsgOpt(ctx context.Context, userID string) (opt int, err error)
DelUsersGlobalRecvMsgOpt(userIDs ...string) UserCache
GetUserStatus(ctx context.Context, userIDs []string) ([]*user.OnlineStatus, error)
SetUserStatus(ctx context.Context, userID string, status, platformID int32) error
//GetUserStatus(ctx context.Context, userIDs []string) ([]*user.OnlineStatus, error)
//SetUserStatus(ctx context.Context, userID string, status, platformID int32) error
}

@ -69,26 +69,19 @@ type CommonMsgDatabase interface {
DeleteUserMsgsBySeqs(ctx context.Context, userID string, conversationID string, seqs []int64) error
// DeleteMsgsPhysicalBySeqs physically deletes messages by emptying them based on sequence numbers.
DeleteMsgsPhysicalBySeqs(ctx context.Context, conversationID string, seqs []int64) error
SetMaxSeq(ctx context.Context, conversationID string, maxSeq int64) error
//SetMaxSeq(ctx context.Context, conversationID string, maxSeq int64) error
GetMaxSeqs(ctx context.Context, conversationIDs []string) (map[string]int64, error)
GetMaxSeq(ctx context.Context, conversationID string) (int64, error)
SetMinSeq(ctx context.Context, conversationID string, minSeq int64) error
SetMinSeqs(ctx context.Context, seqs map[string]int64) error
GetMinSeqs(ctx context.Context, conversationIDs []string) (map[string]int64, error)
GetMinSeq(ctx context.Context, conversationID string) (int64, error)
GetConversationUserMinSeq(ctx context.Context, conversationID string, userID string) (int64, error)
GetConversationUserMinSeqs(ctx context.Context, conversationID string, userIDs []string) (map[string]int64, error)
SetConversationUserMinSeq(ctx context.Context, conversationID string, userID string, minSeq int64) error
SetConversationUserMinSeqs(ctx context.Context, conversationID string, seqs map[string]int64) (err error)
SetUserConversationsMinSeqs(ctx context.Context, userID string, seqs map[string]int64) (err error)
SetHasReadSeq(ctx context.Context, userID string, conversationID string, hasReadSeq int64) error
GetHasReadSeqs(ctx context.Context, userID string, conversationIDs []string) (map[string]int64, error)
GetHasReadSeq(ctx context.Context, userID string, conversationID string) (int64, error)
UserSetHasReadSeqs(ctx context.Context, userID string, hasReadSeqs map[string]int64) error
GetMongoMaxAndMinSeq(ctx context.Context, conversationID string) (minSeqMongo, maxSeqMongo int64, err error)
GetConversationMinMaxSeqInMongoAndCache(ctx context.Context, conversationID string) (minSeqMongo, maxSeqMongo, minSeqCache, maxSeqCache int64, err error)
//GetMongoMaxAndMinSeq(ctx context.Context, conversationID string) (minSeqMongo, maxSeqMongo int64, err error)
//GetConversationMinMaxSeqInMongoAndCache(ctx context.Context, conversationID string) (minSeqMongo, maxSeqMongo, minSeqCache, maxSeqCache int64, err error)
SetSendMsgStatus(ctx context.Context, id string, status int32) error
GetSendMsgStatus(ctx context.Context, id string) (int32, error)
SearchMessage(ctx context.Context, req *pbmsg.SearchMessageReq) (total int32, msgData []*sdkws.MsgData, err error)
@ -108,7 +101,7 @@ type CommonMsgDatabase interface {
DeleteDocMsgBefore(ctx context.Context, ts int64, doc *model.MsgDocModel) ([]int, error)
}
func NewCommonMsgDatabase(msgDocModel database.Msg, msg cache.MsgCache, seq cache.SeqCache, kafkaConf *config.Kafka) (CommonMsgDatabase, error) {
func NewCommonMsgDatabase(msgDocModel database.Msg, msg cache.MsgCache, seqUser cache.SeqUser, seqConversation cache.SeqConversationCache, kafkaConf *config.Kafka) (CommonMsgDatabase, error) {
conf, err := kafka.BuildProducerConfig(*kafkaConf.Build())
if err != nil {
return nil, err
@ -128,29 +121,20 @@ func NewCommonMsgDatabase(msgDocModel database.Msg, msg cache.MsgCache, seq cach
return &commonMsgDatabase{
msgDocDatabase: msgDocModel,
msg: msg,
seq: seq,
seqUser: seqUser,
seqConversation: seqConversation,
producer: producerToRedis,
producerToMongo: producerToMongo,
producerToPush: producerToPush,
}, nil
}
//func InitCommonMsgDatabase(rdb redis.UniversalClient, database *mongo.Database, config *tools.CronTaskConfig) (CommonMsgDatabase, error) {
// msgDocModel, err := database.NewMsgMongo(database)
// if err != nil {
// return nil, err
// }
// //todo MsgCacheTimeout
// msg := cache.NewMsgCache(rdb, 86400, config.RedisConfig.EnablePipeline)
// seq := cache.NewSeqCache(rdb)
// return NewCommonMsgDatabase(msgDocModel, msg, seq, &config.KafkaConfig)
//}
type commonMsgDatabase struct {
msgDocDatabase database.Msg
msgTable model.MsgDocModel
msg cache.MsgCache
seq cache.SeqCache
seqConversation cache.SeqConversationCache
seqUser cache.SeqUser
producer *kafka.Producer
producerToMongo *kafka.Producer
producerToPush *kafka.Producer
@ -348,12 +332,16 @@ func (db *commonMsgDatabase) DeleteMessagesFromCache(ctx context.Context, conver
return db.msg.DeleteMessagesFromCache(ctx, conversationID, seqs)
}
func (db *commonMsgDatabase) BatchInsertChat2Cache(ctx context.Context, conversationID string, msgs []*sdkws.MsgData) (seq int64, isNew bool, err error) {
currentMaxSeq, err := db.seq.GetMaxSeq(ctx, conversationID)
if err != nil && errs.Unwrap(err) != redis.Nil {
log.ZError(ctx, "storage.seq.GetMaxSeq", err)
return 0, false, err
func (db *commonMsgDatabase) setHasReadSeqs(ctx context.Context, conversationID string, userSeqMap map[string]int64) error {
for userID, seq := range userSeqMap {
if err := db.seqUser.SetReadSeq(ctx, conversationID, userID, seq); err != nil {
return err
}
}
return nil
}
func (db *commonMsgDatabase) BatchInsertChat2Cache(ctx context.Context, conversationID string, msgs []*sdkws.MsgData) (seq int64, isNew bool, err error) {
lenList := len(msgs)
if int64(lenList) > db.msgTable.GetSingleGocMsgNum() {
return 0, false, errs.New("message count exceeds limit", "limit", db.msgTable.GetSingleGocMsgNum()).Wrap()
@ -361,9 +349,12 @@ func (db *commonMsgDatabase) BatchInsertChat2Cache(ctx context.Context, conversa
if lenList < 1 {
return 0, false, errs.New("no messages to insert", "minCount", 1).Wrap()
}
if errs.Unwrap(err) == redis.Nil {
isNew = true
currentMaxSeq, err := db.seqConversation.Malloc(ctx, conversationID, int64(len(msgs)))
if err != nil {
log.ZError(ctx, "storage.seq.Malloc", err)
return 0, false, err
}
isNew = currentMaxSeq == 0
lastMaxSeq := currentMaxSeq
userSeqMap := make(map[string]int64)
for _, m := range msgs {
@ -379,14 +370,7 @@ func (db *commonMsgDatabase) BatchInsertChat2Cache(ctx context.Context, conversa
} else {
prommetrics.MsgInsertRedisSuccessCounter.Inc()
}
err = db.seq.SetMaxSeq(ctx, conversationID, currentMaxSeq)
if err != nil {
log.ZError(ctx, "storage.seq.SetMaxSeq error", err, "conversationID", conversationID)
prommetrics.SeqSetFailedCounter.Inc()
}
err = db.seq.SetHasReadSeqs(ctx, conversationID, userSeqMap)
err = db.setHasReadSeqs(ctx, conversationID, userSeqMap)
if err != nil {
log.ZError(ctx, "SetHasReadSeqs error", err, "userSeqMap", userSeqMap, "conversationID", conversationID)
prommetrics.SeqSetFailedCounter.Inc()
@ -514,12 +498,12 @@ func (db *commonMsgDatabase) getMsgBySeqsRange(ctx context.Context, userID strin
// "userMinSeq" can be set as the same value as the conversation's "maxSeq" at the moment they join the group.
// This ensures that their message retrieval starts from the point they joined.
func (db *commonMsgDatabase) GetMsgBySeqsRange(ctx context.Context, userID string, conversationID string, begin, end, num, userMaxSeq int64) (int64, int64, []*sdkws.MsgData, error) {
userMinSeq, err := db.seq.GetConversationUserMinSeq(ctx, conversationID, userID)
userMinSeq, err := db.seqUser.GetMinSeq(ctx, conversationID, userID)
if err != nil && errs.Unwrap(err) != redis.Nil {
return 0, 0, nil, err
}
minSeq, err := db.seq.GetMinSeq(ctx, conversationID)
if err != nil && errs.Unwrap(err) != redis.Nil {
minSeq, err := db.seqConversation.GetMinSeq(ctx, conversationID)
if err != nil {
return 0, 0, nil, err
}
if userMinSeq > minSeq {
@ -530,8 +514,8 @@ func (db *commonMsgDatabase) GetMsgBySeqsRange(ctx context.Context, userID strin
log.ZWarn(ctx, "minSeq > end", errs.New("minSeq>end"), "minSeq", minSeq, "end", end)
return 0, 0, nil, nil
}
maxSeq, err := db.seq.GetMaxSeq(ctx, conversationID)
if err != nil && errs.Unwrap(err) != redis.Nil {
maxSeq, err := db.seqConversation.GetMaxSeq(ctx, conversationID)
if err != nil {
return 0, 0, nil, err
}
log.ZDebug(ctx, "GetMsgBySeqsRange", "userMinSeq", userMinSeq, "conMinSeq", minSeq, "conMaxSeq", maxSeq, "userMaxSeq", userMaxSeq)
@ -571,11 +555,8 @@ func (db *commonMsgDatabase) GetMsgBySeqsRange(ctx context.Context, userID strin
var successMsgs []*sdkws.MsgData
log.ZDebug(ctx, "GetMsgBySeqsRange", "first seqs", seqs, "newBegin", newBegin, "newEnd", newEnd)
cachedMsgs, failedSeqs, err := db.msg.GetMessagesBySeq(ctx, conversationID, seqs)
if err != nil {
if err != redis.Nil {
log.ZError(ctx, "get message from redis exception", err, "conversationID", conversationID, "seqs", seqs)
}
if err != nil && !errors.Is(err, redis.Nil) {
log.ZError(ctx, "get message from redis exception", err, "conversationID", conversationID, "seqs", seqs)
}
successMsgs = append(successMsgs, cachedMsgs...)
log.ZDebug(ctx, "get msgs from cache", "cachedMsgs", cachedMsgs)
@ -595,16 +576,16 @@ func (db *commonMsgDatabase) GetMsgBySeqsRange(ctx context.Context, userID strin
}
func (db *commonMsgDatabase) GetMsgBySeqs(ctx context.Context, userID string, conversationID string, seqs []int64) (int64, int64, []*sdkws.MsgData, error) {
userMinSeq, err := db.seq.GetConversationUserMinSeq(ctx, conversationID, userID)
userMinSeq, err := db.seqUser.GetMinSeq(ctx, conversationID, userID)
if err != nil && errs.Unwrap(err) != redis.Nil {
return 0, 0, nil, err
}
minSeq, err := db.seq.GetMinSeq(ctx, conversationID)
if err != nil && errs.Unwrap(err) != redis.Nil {
minSeq, err := db.seqConversation.GetMinSeq(ctx, conversationID)
if err != nil {
return 0, 0, nil, err
}
maxSeq, err := db.seq.GetMaxSeq(ctx, conversationID)
if err != nil && errs.Unwrap(err) != redis.Nil {
maxSeq, err := db.seqConversation.GetMaxSeq(ctx, conversationID)
if err != nil {
return 0, 0, nil, err
}
if userMinSeq < minSeq {
@ -648,7 +629,7 @@ func (db *commonMsgDatabase) DeleteConversationMsgsAndSetMinSeq(ctx context.Cont
if minSeq == 0 {
return nil
}
return db.seq.SetMinSeq(ctx, conversationID, minSeq)
return db.seqConversation.SetMinSeq(ctx, conversationID, minSeq)
}
func (db *commonMsgDatabase) UserMsgsDestruct(ctx context.Context, userID string, conversationID string, destructTime int64, lastMsgDestructTime time.Time) (seqs []int64, err error) {
@ -693,12 +674,12 @@ func (db *commonMsgDatabase) UserMsgsDestruct(ctx context.Context, userID string
log.ZDebug(ctx, "UserMsgsDestruct", "conversationID", conversationID, "userID", userID, "seqs", seqs)
if len(seqs) > 0 {
userMinSeq := seqs[len(seqs)-1] + 1
currentUserMinSeq, err := db.seq.GetConversationUserMinSeq(ctx, conversationID, userID)
if err != nil && errs.Unwrap(err) != redis.Nil {
currentUserMinSeq, err := db.seqUser.GetMinSeq(ctx, conversationID, userID)
if err != nil {
return nil, err
}
if currentUserMinSeq < userMinSeq {
if err := db.seq.SetConversationUserMinSeq(ctx, conversationID, userID, userMinSeq); err != nil {
if err := db.seqUser.SetMinSeq(ctx, conversationID, userID, userMinSeq); err != nil {
return nil, err
}
}
@ -796,89 +777,40 @@ func (db *commonMsgDatabase) DeleteUserMsgsBySeqs(ctx context.Context, userID st
return nil
}
func (db *commonMsgDatabase) DeleteMsgsBySeqs(ctx context.Context, conversationID string, seqs []int64) error {
return nil
}
func (db *commonMsgDatabase) CleanUpUserConversationsMsgs(ctx context.Context, user string, conversationIDs []string) {
for _, conversationID := range conversationIDs {
maxSeq, err := db.seq.GetMaxSeq(ctx, conversationID)
if err != nil {
if err == redis.Nil {
log.ZDebug(ctx, "max seq is nil", "conversationID", conversationID)
} else {
log.ZError(ctx, "get max seq failed", err, "conversationID", conversationID)
}
continue
}
if err := db.seq.SetMinSeq(ctx, conversationID, maxSeq+1); err != nil {
log.ZError(ctx, "set min seq failed", err, "conversationID", conversationID, "minSeq", maxSeq+1)
}
}
}
func (db *commonMsgDatabase) SetMaxSeq(ctx context.Context, conversationID string, maxSeq int64) error {
return db.seq.SetMaxSeq(ctx, conversationID, maxSeq)
}
func (db *commonMsgDatabase) GetMaxSeqs(ctx context.Context, conversationIDs []string) (map[string]int64, error) {
return db.seq.GetMaxSeqs(ctx, conversationIDs)
return db.seqConversation.GetMaxSeqs(ctx, conversationIDs)
}
func (db *commonMsgDatabase) GetMaxSeq(ctx context.Context, conversationID string) (int64, error) {
return db.seq.GetMaxSeq(ctx, conversationID)
return db.seqConversation.GetMaxSeq(ctx, conversationID)
}
func (db *commonMsgDatabase) SetMinSeq(ctx context.Context, conversationID string, minSeq int64) error {
return db.seq.SetMinSeq(ctx, conversationID, minSeq)
return db.seqConversation.SetMinSeq(ctx, conversationID, minSeq)
}
func (db *commonMsgDatabase) SetMinSeqs(ctx context.Context, seqs map[string]int64) error {
return db.seq.SetMinSeqs(ctx, seqs)
}
func (db *commonMsgDatabase) GetMinSeqs(ctx context.Context, conversationIDs []string) (map[string]int64, error) {
return db.seq.GetMinSeqs(ctx, conversationIDs)
}
func (db *commonMsgDatabase) GetMinSeq(ctx context.Context, conversationID string) (int64, error) {
return db.seq.GetMinSeq(ctx, conversationID)
}
func (db *commonMsgDatabase) GetConversationUserMinSeq(ctx context.Context, conversationID string, userID string) (int64, error) {
return db.seq.GetConversationUserMinSeq(ctx, conversationID, userID)
}
func (db *commonMsgDatabase) GetConversationUserMinSeqs(ctx context.Context, conversationID string, userIDs []string) (map[string]int64, error) {
return db.seq.GetConversationUserMinSeqs(ctx, conversationID, userIDs)
}
func (db *commonMsgDatabase) SetConversationUserMinSeq(ctx context.Context, conversationID string, userID string, minSeq int64) error {
return db.seq.SetConversationUserMinSeq(ctx, conversationID, userID, minSeq)
}
func (db *commonMsgDatabase) SetConversationUserMinSeqs(ctx context.Context, conversationID string, seqs map[string]int64) (err error) {
return db.seq.SetConversationUserMinSeqs(ctx, conversationID, seqs)
return db.seqConversation.SetMinSeqs(ctx, seqs)
}
func (db *commonMsgDatabase) SetUserConversationsMinSeqs(ctx context.Context, userID string, seqs map[string]int64) error {
return db.seq.SetUserConversationsMinSeqs(ctx, userID, seqs)
return db.seqUser.SetMinSeqs(ctx, userID, seqs)
}
func (db *commonMsgDatabase) UserSetHasReadSeqs(ctx context.Context, userID string, hasReadSeqs map[string]int64) error {
return db.seq.UserSetHasReadSeqs(ctx, userID, hasReadSeqs)
return db.seqUser.SetReadSeqs(ctx, userID, hasReadSeqs)
}
func (db *commonMsgDatabase) SetHasReadSeq(ctx context.Context, userID string, conversationID string, hasReadSeq int64) error {
return db.seq.SetHasReadSeq(ctx, userID, conversationID, hasReadSeq)
return db.seqUser.SetReadSeq(ctx, conversationID, userID, hasReadSeq)
}
func (db *commonMsgDatabase) GetHasReadSeqs(ctx context.Context, userID string, conversationIDs []string) (map[string]int64, error) {
return db.seq.GetHasReadSeqs(ctx, userID, conversationIDs)
return db.seqUser.GetReadSeqs(ctx, userID, conversationIDs)
}
func (db *commonMsgDatabase) GetHasReadSeq(ctx context.Context, userID string, conversationID string) (int64, error) {
return db.seq.GetHasReadSeq(ctx, userID, conversationID)
return db.seqUser.GetReadSeq(ctx, conversationID, userID)
}
func (db *commonMsgDatabase) SetSendMsgStatus(ctx context.Context, id string, status int32) error {
@ -894,11 +826,11 @@ func (db *commonMsgDatabase) GetConversationMinMaxSeqInMongoAndCache(ctx context
if err != nil {
return
}
minSeqCache, err = db.seq.GetMinSeq(ctx, conversationID)
minSeqCache, err = db.seqConversation.GetMinSeq(ctx, conversationID)
if err != nil {
return
}
maxSeqCache, err = db.seq.GetMaxSeq(ctx, conversationID)
maxSeqCache, err = db.seqConversation.GetMaxSeq(ctx, conversationID)
if err != nil {
return
}
@ -1010,33 +942,8 @@ func (db *commonMsgDatabase) DeleteDocMsgBefore(ctx context.Context, ts int64, d
}
}
//func (db *commonMsgDatabase) ClearMsg(ctx context.Context, ts int64) (err error) {
// var (
// docNum int
// msgNum int
// start = time.Now()
// )
// for {
// msgs, err := db.msgDocDatabase.GetBeforeMsg(ctx, ts, 100)
// if err != nil {
// return err
// }
// if len(msgs) == 0 {
// return nil
// }
// for _, msg := range msgs {
// num, err := db.deleteOneMsg(ctx, ts, msg)
// if err != nil {
// return err
// }
// docNum++
// msgNum += num
// }
// }
//}
func (db *commonMsgDatabase) setMinSeq(ctx context.Context, conversationID string, seq int64) error {
dbSeq, err := db.seq.GetMinSeq(ctx, conversationID)
dbSeq, err := db.seqConversation.GetMinSeq(ctx, conversationID)
if err != nil {
if errors.Is(errs.Unwrap(err), redis.Nil) {
return nil
@ -1046,5 +953,5 @@ func (db *commonMsgDatabase) setMinSeq(ctx context.Context, conversationID strin
if dbSeq >= seq {
return nil
}
return db.seq.SetMinSeq(ctx, conversationID, seq)
return db.seqConversation.SetMinSeq(ctx, conversationID, seq)
}

@ -16,13 +16,15 @@ package controller
import (
"context"
redis2 "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
"path/filepath"
"time"
redisCache "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/tools/db/pagination"
"github.com/openimsdk/tools/s3"
"github.com/openimsdk/tools/s3/cont"
"github.com/redis/go-redis/v9"
@ -38,20 +40,27 @@ type S3Database interface {
SetObject(ctx context.Context, info *model.Object) error
StatObject(ctx context.Context, name string) (*s3.ObjectInfo, error)
FormData(ctx context.Context, name string, size int64, contentType string, duration time.Duration) (*s3.FormData, error)
FindByExpires(ctx context.Context, duration time.Time, pagination pagination.Pagination) (total int64, objects []*model.Object, err error)
DeleteObject(ctx context.Context, name string) error
DeleteSpecifiedData(ctx context.Context, engine string, name string) error
FindNotDelByS3(ctx context.Context, key string, duration time.Time) (int64, error)
DelS3Key(ctx context.Context, engine string, keys ...string) error
}
func NewS3Database(rdb redis.UniversalClient, s3 s3.Interface, obj database.ObjectInfo) S3Database {
return &s3Database{
s3: cont.New(redis2.NewS3Cache(rdb, s3), s3),
cache: redis2.NewObjectCacheRedis(rdb, obj),
db: obj,
s3: cont.New(redisCache.NewS3Cache(rdb, s3), s3),
cache: redisCache.NewObjectCacheRedis(rdb, obj),
s3cache: redisCache.NewS3Cache(rdb, s3),
db: obj,
}
}
type s3Database struct {
s3 *cont.Controller
cache cache.ObjectCache
db database.ObjectInfo
s3 *cont.Controller
cache cache.ObjectCache
s3cache cont.S3Cache
db database.ObjectInfo
}
func (s *s3Database) PartSize(ctx context.Context, size int64) (int64, error) {
@ -111,3 +120,22 @@ func (s *s3Database) StatObject(ctx context.Context, name string) (*s3.ObjectInf
func (s *s3Database) FormData(ctx context.Context, name string, size int64, contentType string, duration time.Duration) (*s3.FormData, error) {
return s.s3.FormData(ctx, name, size, contentType, duration)
}
func (s *s3Database) FindByExpires(ctx context.Context, duration time.Time, pagination pagination.Pagination) (total int64, objects []*model.Object, err error) {
return s.db.FindByExpires(ctx, duration, pagination)
}
func (s *s3Database) DeleteObject(ctx context.Context, name string) error {
return s.s3.DeleteObject(ctx, name)
}
func (s *s3Database) DeleteSpecifiedData(ctx context.Context, engine string, name string) error {
return s.db.Delete(ctx, engine, name)
}
func (s *s3Database) FindNotDelByS3(ctx context.Context, key string, duration time.Time) (int64, error) {
return s.db.FindNotDelByS3(ctx, key, duration)
}
func (s *s3Database) DelS3Key(ctx context.Context, engine string, keys ...string) error {
return s.s3cache.DelS3Key(ctx, engine, keys...)
}

@ -16,9 +16,10 @@ package controller
import (
"context"
"time"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
"time"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/tools/db/pagination"

@ -70,10 +70,6 @@ type UserDatabase interface {
GetAllSubscribeList(ctx context.Context, userID string) ([]string, error)
// GetSubscribedList Get all subscribed lists
GetSubscribedList(ctx context.Context, userID string) ([]string, error)
// GetUserStatus Get the online status of the user
GetUserStatus(ctx context.Context, userIDs []string) ([]*user.OnlineStatus, error)
// SetUserStatus Set the user status and store the user status in redis
SetUserStatus(ctx context.Context, userID string, status, platformID int32) error
// CRUD user command
AddUserCommand(ctx context.Context, userID string, Type int32, UUID string, value string, ex string) error
@ -199,7 +195,7 @@ func (u *userDatabase) GetAllUserID(ctx context.Context, pagination pagination.P
}
func (u *userDatabase) GetUserByID(ctx context.Context, userID string) (user *model.User, err error) {
return u.userDB.Take(ctx, userID)
return u.cache.GetUserInfo(ctx, userID)
}
// CountTotal Get the total number of users.
@ -246,17 +242,6 @@ func (u *userDatabase) GetSubscribedList(ctx context.Context, userID string) ([]
return list, nil
}
// GetUserStatus get user status.
func (u *userDatabase) GetUserStatus(ctx context.Context, userIDs []string) ([]*user.OnlineStatus, error) {
onlineStatusList, err := u.cache.GetUserStatus(ctx, userIDs)
return onlineStatusList, err
}
// SetUserStatus Set the user status and save it in redis.
func (u *userDatabase) SetUserStatus(ctx context.Context, userID string, status, platformID int32) error {
return u.cache.SetUserStatus(ctx, userID, status, platformID)
}
func (u *userDatabase) AddUserCommand(ctx context.Context, userID string, Type int32, UUID string, value string, ex string) error {
return u.userDB.AddUserCommand(ctx, userID, Type, UUID, value, ex)
}

@ -28,6 +28,8 @@ type GroupMember interface {
UpdateUserRoleLevels(ctx context.Context, groupID string, firstUserID string, firstUserRoleLevel int32, secondUserID string, secondUserRoleLevel int32) error
FindMemberUserID(ctx context.Context, groupID string) (userIDs []string, err error)
Take(ctx context.Context, groupID string, userID string) (groupMember *model.GroupMember, err error)
Find(ctx context.Context, groupID string, userIDs []string) ([]*model.GroupMember, error)
FindInGroup(ctx context.Context, userID string, groupIDs []string) ([]*model.GroupMember, error)
TakeOwner(ctx context.Context, groupID string) (groupMember *model.GroupMember, err error)
SearchMember(ctx context.Context, keyword string, groupID string, pagination pagination.Pagination) (total int64, groupList []*model.GroupMember, err error)
FindRoleLevelUserIDs(ctx context.Context, groupID string, roleLevel int32) ([]string, error)

@ -153,6 +153,22 @@ func (g *GroupMemberMgo) FindMemberUserID(ctx context.Context, groupID string) (
return mongoutil.Find[string](ctx, g.coll, bson.M{"group_id": groupID}, options.Find().SetProjection(bson.M{"_id": 0, "user_id": 1}).SetSort(g.memberSort()))
}
func (g *GroupMemberMgo) Find(ctx context.Context, groupID string, userIDs []string) ([]*model.GroupMember, error) {
filter := bson.M{"group_id": groupID}
if len(userIDs) > 0 {
filter["user_id"] = bson.M{"$in": userIDs}
}
return mongoutil.Find[*model.GroupMember](ctx, g.coll, filter)
}
func (g *GroupMemberMgo) FindInGroup(ctx context.Context, userID string, groupIDs []string) ([]*model.GroupMember, error) {
filter := bson.M{"user_id": userID}
if len(groupIDs) > 0 {
filter["group_id"] = bson.M{"$in": groupIDs}
}
return mongoutil.Find[*model.GroupMember](ctx, g.coll, filter)
}
func (g *GroupMemberMgo) Take(ctx context.Context, groupID string, userID string) (groupMember *model.GroupMember, err error) {
return mongoutil.FindOne[*model.GroupMember](ctx, g.coll, bson.M{"group_id": groupID, "user_id": userID})
}

@ -16,10 +16,13 @@ package mgo
import (
"context"
"time"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
"github.com/openimsdk/tools/db/mongoutil"
"github.com/openimsdk/tools/db/pagination"
"github.com/openimsdk/tools/errs"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
@ -68,3 +71,14 @@ func (o *S3Mongo) Take(ctx context.Context, engine string, name string) (*model.
func (o *S3Mongo) Delete(ctx context.Context, engine string, name string) error {
return mongoutil.DeleteOne(ctx, o.coll, bson.M{"name": name, "engine": engine})
}
func (o *S3Mongo) FindByExpires(ctx context.Context, duration time.Time, pagination pagination.Pagination) (total int64, objects []*model.Object, err error) {
return mongoutil.FindPage[*model.Object](ctx, o.coll, bson.M{
"create_time": bson.M{"$lt": duration},
}, pagination)
}
func (o *S3Mongo) FindNotDelByS3(ctx context.Context, key string, duration time.Time) (int64, error) {
return mongoutil.Count(ctx, o.coll, bson.M{
"key": key,
"create_time": bson.M{"$gt": duration},
})
}

@ -0,0 +1,103 @@
package mgo
import (
"context"
"errors"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
"github.com/openimsdk/tools/db/mongoutil"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
func NewSeqConversationMongo(db *mongo.Database) (database.SeqConversation, error) {
coll := db.Collection(database.SeqConversationName)
_, err := coll.Indexes().CreateOne(context.Background(), mongo.IndexModel{
Keys: bson.D{
{Key: "conversation_id", Value: 1},
},
})
if err != nil {
return nil, err
}
return &seqConversationMongo{coll: coll}, nil
}
type seqConversationMongo struct {
coll *mongo.Collection
}
func (s *seqConversationMongo) setSeq(ctx context.Context, conversationID string, seq int64, field string) error {
filter := map[string]any{
"conversation_id": conversationID,
}
insert := bson.M{
"conversation_id": conversationID,
"min_seq": 0,
"max_seq": 0,
}
delete(insert, field)
update := map[string]any{
"$set": bson.M{
field: seq,
},
"$setOnInsert": insert,
}
opt := options.Update().SetUpsert(true)
return mongoutil.UpdateOne(ctx, s.coll, filter, update, false, opt)
}
func (s *seqConversationMongo) Malloc(ctx context.Context, conversationID string, size int64) (int64, error) {
if size < 0 {
return 0, errors.New("size must be greater than 0")
}
if size == 0 {
return s.GetMaxSeq(ctx, conversationID)
}
filter := map[string]any{"conversation_id": conversationID}
update := map[string]any{
"$inc": map[string]any{"max_seq": size},
"$set": map[string]any{"min_seq": int64(0)},
}
opt := options.FindOneAndUpdate().SetUpsert(true).SetReturnDocument(options.After).SetProjection(map[string]any{"_id": 0, "max_seq": 1})
lastSeq, err := mongoutil.FindOneAndUpdate[int64](ctx, s.coll, filter, update, opt)
if err != nil {
return 0, err
}
return lastSeq - size, nil
}
func (s *seqConversationMongo) SetMaxSeq(ctx context.Context, conversationID string, seq int64) error {
return s.setSeq(ctx, conversationID, seq, "max_seq")
}
func (s *seqConversationMongo) GetMaxSeq(ctx context.Context, conversationID string) (int64, error) {
seq, err := mongoutil.FindOne[int64](ctx, s.coll, bson.M{"conversation_id": conversationID}, options.FindOne().SetProjection(map[string]any{"_id": 0, "max_seq": 1}))
if err == nil {
return seq, nil
} else if IsNotFound(err) {
return 0, nil
} else {
return 0, err
}
}
func (s *seqConversationMongo) GetMinSeq(ctx context.Context, conversationID string) (int64, error) {
seq, err := mongoutil.FindOne[int64](ctx, s.coll, bson.M{"conversation_id": conversationID}, options.FindOne().SetProjection(map[string]any{"_id": 0, "min_seq": 1}))
if err == nil {
return seq, nil
} else if IsNotFound(err) {
return 0, nil
} else {
return 0, err
}
}
func (s *seqConversationMongo) SetMinSeq(ctx context.Context, conversationID string, seq int64) error {
return s.setSeq(ctx, conversationID, seq, "min_seq")
}
func (s *seqConversationMongo) GetConversation(ctx context.Context, conversationID string) (*model.SeqConversation, error) {
return mongoutil.FindOne[*model.SeqConversation](ctx, s.coll, bson.M{"conversation_id": conversationID})
}

@ -0,0 +1,37 @@
package mgo
import (
"context"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
"testing"
"time"
)
func Result[V any](val V, err error) V {
if err != nil {
panic(err)
}
return val
}
func Mongodb() *mongo.Database {
return Result(
mongo.Connect(context.Background(),
options.Client().
ApplyURI("mongodb://openIM:openIM123@172.16.8.48:37017/openim_v3?maxPoolSize=100").
SetConnectTimeout(5*time.Second)),
).Database("openim_v3")
}
func TestUserSeq(t *testing.T) {
uSeq := Result(NewSeqUserMongo(Mongodb())).(*seqUserMongo)
t.Log(uSeq.SetMinSeq(context.Background(), "1000", "2000", 4))
}
func TestConversationSeq(t *testing.T) {
cSeq := Result(NewSeqConversationMongo(Mongodb())).(*seqConversationMongo)
t.Log(cSeq.SetMaxSeq(context.Background(), "2000", 10))
t.Log(cSeq.Malloc(context.Background(), "2000", 10))
t.Log(cSeq.GetMaxSeq(context.Background(), "2000"))
}

@ -0,0 +1,110 @@
package mgo
import (
"context"
"errors"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
"github.com/openimsdk/tools/db/mongoutil"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
func NewSeqUserMongo(db *mongo.Database) (database.SeqUser, error) {
coll := db.Collection(database.SeqUserName)
_, err := coll.Indexes().CreateOne(context.Background(), mongo.IndexModel{
Keys: bson.D{
{Key: "user_id", Value: 1},
{Key: "conversation_id", Value: 1},
},
})
if err != nil {
return nil, err
}
return &seqUserMongo{coll: coll}, nil
}
type seqUserMongo struct {
coll *mongo.Collection
}
func (s *seqUserMongo) setSeq(ctx context.Context, conversationID string, userID string, seq int64, field string) error {
filter := map[string]any{
"user_id": userID,
"conversation_id": conversationID,
}
insert := bson.M{
"user_id": userID,
"conversation_id": conversationID,
"min_seq": 0,
"max_seq": 0,
"read_seq": 0,
}
delete(insert, field)
update := map[string]any{
"$set": bson.M{
field: seq,
},
"$setOnInsert": insert,
}
opt := options.Update().SetUpsert(true)
return mongoutil.UpdateOne(ctx, s.coll, filter, update, false, opt)
}
func (s *seqUserMongo) getSeq(ctx context.Context, conversationID string, userID string, failed string) (int64, error) {
filter := map[string]any{
"user_id": userID,
"conversation_id": conversationID,
}
opt := options.FindOne().SetProjection(bson.M{"_id": 0, failed: 1})
seq, err := mongoutil.FindOne[int64](ctx, s.coll, filter, opt)
if err == nil {
return seq, nil
} else if errors.Is(err, mongo.ErrNoDocuments) {
return 0, nil
} else {
return 0, err
}
}
func (s *seqUserMongo) GetMaxSeq(ctx context.Context, conversationID string, userID string) (int64, error) {
return s.getSeq(ctx, conversationID, userID, "max_seq")
}
func (s *seqUserMongo) SetMaxSeq(ctx context.Context, conversationID string, userID string, seq int64) error {
return s.setSeq(ctx, conversationID, userID, seq, "max_seq")
}
func (s *seqUserMongo) GetMinSeq(ctx context.Context, conversationID string, userID string) (int64, error) {
return s.getSeq(ctx, conversationID, userID, "min_seq")
}
func (s *seqUserMongo) SetMinSeq(ctx context.Context, conversationID string, userID string, seq int64) error {
return s.setSeq(ctx, conversationID, userID, seq, "min_seq")
}
func (s *seqUserMongo) GetReadSeq(ctx context.Context, conversationID string, userID string) (int64, error) {
return s.getSeq(ctx, conversationID, userID, "read_seq")
}
func (s *seqUserMongo) GetReadSeqs(ctx context.Context, userID string, conversationID []string) (map[string]int64, error) {
if len(conversationID) == 0 {
return map[string]int64{}, nil
}
filter := bson.M{"user_id": userID, "conversation_id": bson.M{"$in": conversationID}}
opt := options.Find().SetProjection(bson.M{"_id": 0, "conversation_id": 1, "read_seq": 1})
seqs, err := mongoutil.Find[*model.SeqUser](ctx, s.coll, filter, opt)
if err != nil {
return nil, err
}
res := make(map[string]int64)
for _, seq := range seqs {
res[seq.ConversationID] = seq.ReadSeq
}
return res, nil
}
func (s *seqUserMongo) SetReadSeq(ctx context.Context, conversationID string, userID string, seq int64) error {
return s.setSeq(ctx, conversationID, userID, seq, "read_seq")
}

@ -15,4 +15,6 @@ const (
LogName = "log"
ObjectName = "s3"
UserName = "user"
SeqConversationName = "seq"
SeqUserName = "seq_user"
)

@ -16,11 +16,16 @@ package database
import (
"context"
"time"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
"github.com/openimsdk/tools/db/pagination"
)
type ObjectInfo interface {
SetObject(ctx context.Context, obj *model.Object) error
Take(ctx context.Context, engine string, name string) (*model.Object, error)
Delete(ctx context.Context, engine string, name string) error
FindByExpires(ctx context.Context, duration time.Time, pagination pagination.Pagination) (total int64, objects []*model.Object, err error)
FindNotDelByS3(ctx context.Context, key string, duration time.Time) (int64, error)
}

@ -0,0 +1,11 @@
package database
import "context"
type SeqConversation interface {
Malloc(ctx context.Context, conversationID string, size int64) (int64, error)
GetMaxSeq(ctx context.Context, conversationID string) (int64, error)
SetMaxSeq(ctx context.Context, conversationID string, seq int64) error
GetMinSeq(ctx context.Context, conversationID string) (int64, error)
SetMinSeq(ctx context.Context, conversationID string, seq int64) error
}

@ -0,0 +1,13 @@
package database
import "context"
type SeqUser interface {
GetMaxSeq(ctx context.Context, conversationID string, userID string) (int64, error)
SetMaxSeq(ctx context.Context, conversationID string, userID string, seq int64) error
GetMinSeq(ctx context.Context, conversationID string, userID string) (int64, error)
SetMinSeq(ctx context.Context, conversationID string, userID string, seq int64) error
GetReadSeq(ctx context.Context, conversationID string, userID string) (int64, error)
SetReadSeq(ctx context.Context, conversationID string, userID string, seq int64) error
GetReadSeqs(ctx context.Context, userID string, conversationID []string) (map[string]int64, error)
}

@ -0,0 +1,7 @@
package model
type SeqConversation struct {
ConversationID string `bson:"conversation_id"`
MaxSeq int64 `bson:"max_seq"`
MinSeq int64 `bson:"min_seq"`
}

@ -0,0 +1,9 @@
package model
type SeqUser struct {
UserID string `bson:"user_id"`
ConversationID string `bson:"conversation_id"`
MinSeq int64 `bson:"min_seq"`
MaxSeq int64 `bson:"max_seq"`
ReadSeq int64 `bson:"read_seq"`
}

@ -31,6 +31,12 @@ type Cache[V any] interface {
Stop()
}
func LRUStringHash(key string) uint64 {
h := fnv.New64a()
h.Write(*(*[]byte)(unsafe.Pointer(&key)))
return h.Sum64()
}
func New[V any](opts ...Option) Cache[V] {
opt := defaultOption()
for _, o := range opts {
@ -49,11 +55,7 @@ func New[V any](opts ...Option) Cache[V] {
if opt.localSlotNum == 1 {
c.local = createSimpleLRU()
} else {
c.local = lru.NewSlotLRU[string, V](opt.localSlotNum, func(key string) uint64 {
h := fnv.New64a()
h.Write(*(*[]byte)(unsafe.Pointer(&key)))
return h.Sum64()
}, createSimpleLRU)
c.local = lru.NewSlotLRU[string, V](opt.localSlotNum, LRUStringHash, createSimpleLRU)
}
if opt.linkSlotNum > 0 {
c.link = link.New(opt.linkSlotNum)

@ -20,6 +20,7 @@ type EvictCallback[K comparable, V any] simplelru.EvictCallback[K, V]
type LRU[K comparable, V any] interface {
Get(key K, fetch func() (V, error)) (V, error)
SetHas(key K, value V) bool
Del(key K) bool
Stop()
}

@ -89,5 +89,15 @@ func (x *ExpirationLRU[K, V]) Del(key K) bool {
return ok
}
func (x *ExpirationLRU[K, V]) SetHas(key K, value V) bool {
x.lock.Lock()
defer x.lock.Unlock()
if x.core.Contains(key) {
x.core.Add(key, &expirationLruItem[V]{value: value})
return true
}
return false
}
func (x *ExpirationLRU[K, V]) Stop() {
}

@ -88,6 +88,28 @@ func (x *LayLRU[K, V]) Get(key K, fetch func() (V, error)) (V, error) {
return v.value, v.err
}
//func (x *LayLRU[K, V]) Set(key K, value V) {
// x.lock.Lock()
// x.core.Add(key, &layLruItem[V]{value: value, expires: time.Now().Add(x.successTTL).UnixMilli()})
// x.lock.Unlock()
//}
//
//func (x *LayLRU[K, V]) Has(key K) bool {
// x.lock.Lock()
// defer x.lock.Unlock()
// return x.core.Contains(key)
//}
func (x *LayLRU[K, V]) SetHas(key K, value V) bool {
x.lock.Lock()
defer x.lock.Unlock()
if x.core.Contains(key) {
x.core.Add(key, &layLruItem[V]{value: value, expires: time.Now().Add(x.successTTL).UnixMilli()})
return true
}
return false
}
func (x *LayLRU[K, V]) Del(key K) bool {
x.lock.Lock()
ok := x.core.Remove(key)

@ -40,6 +40,10 @@ func (x *slotLRU[K, V]) Get(key K, fetch func() (V, error)) (V, error) {
return x.slots[x.getIndex(key)].Get(key, fetch)
}
func (x *slotLRU[K, V]) SetHas(key K, value V) bool {
return x.slots[x.getIndex(key)].SetHas(key, value)
}
func (x *slotLRU[K, V]) Del(key K) bool {
return x.slots[x.getIndex(key)].Del(key)
}

@ -30,7 +30,7 @@ func defaultOption() *option {
localSuccessTTL: time.Minute,
localFailedTTL: time.Second * 5,
delFn: make([]func(ctx context.Context, key ...string), 0, 2),
target: emptyTarget{},
target: EmptyTarget{},
}
}
@ -123,14 +123,14 @@ func WithDeleteKeyBefore(fn func(ctx context.Context, key ...string)) Option {
}
}
type emptyTarget struct{}
type EmptyTarget struct{}
func (e emptyTarget) IncrGetHit() {}
func (e EmptyTarget) IncrGetHit() {}
func (e emptyTarget) IncrGetSuccess() {}
func (e EmptyTarget) IncrGetSuccess() {}
func (e emptyTarget) IncrGetFailed() {}
func (e EmptyTarget) IncrGetFailed() {}
func (e emptyTarget) IncrDelHit() {}
func (e EmptyTarget) IncrDelHit() {}
func (e emptyTarget) IncrDelNotFound() {}
func (e EmptyTarget) IncrDelNotFound() {}

@ -24,6 +24,10 @@ import (
"google.golang.org/protobuf/proto"
)
func IsGroupConversationID(conversationID string) bool {
return strings.HasPrefix(conversationID, "g_") || strings.HasPrefix(conversationID, "sg_")
}
func GetNotificationConversationIDByMsg(msg *sdkws.MsgData) string {
switch msg.SessionType {
case constant.SingleChatType:

@ -0,0 +1,100 @@
package rpccache
import (
"context"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
"github.com/openimsdk/open-im-server/v3/pkg/localcache"
"github.com/openimsdk/open-im-server/v3/pkg/localcache/lru"
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
"github.com/openimsdk/open-im-server/v3/pkg/util/useronline"
"github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/mcontext"
"github.com/redis/go-redis/v9"
"math/rand"
"strconv"
"time"
)
func NewOnlineCache(user rpcclient.UserRpcClient, group *GroupLocalCache, rdb redis.UniversalClient, fn func(ctx context.Context, userID string, platformIDs []int32)) *OnlineCache {
x := &OnlineCache{
user: user,
group: group,
local: lru.NewSlotLRU(1024, localcache.LRUStringHash, func() lru.LRU[string, []int32] {
return lru.NewLayLRU[string, []int32](2048, cachekey.OnlineExpire/2, time.Second*3, localcache.EmptyTarget{}, func(key string, value []int32) {})
}),
}
go func() {
ctx := mcontext.SetOperationID(context.Background(), cachekey.OnlineChannel+strconv.FormatUint(rand.Uint64(), 10))
for message := range rdb.Subscribe(ctx, cachekey.OnlineChannel).Channel() {
userID, platformIDs, err := useronline.ParseUserOnlineStatus(message.Payload)
if err != nil {
log.ZError(ctx, "OnlineCache redis subscribe parseUserOnlineStatus", err, "payload", message.Payload, "channel", message.Channel)
continue
}
storageCache := x.setUserOnline(userID, platformIDs)
log.ZDebug(ctx, "OnlineCache setUserOnline", "userID", userID, "platformIDs", platformIDs, "payload", message.Payload, "storageCache", storageCache)
if fn != nil {
fn(ctx, userID, platformIDs)
}
}
}()
return x
}
type OnlineCache struct {
user rpcclient.UserRpcClient
group *GroupLocalCache
local lru.LRU[string, []int32]
}
func (o *OnlineCache) GetUserOnlinePlatform(ctx context.Context, userID string) ([]int32, error) {
return o.local.Get(userID, func() ([]int32, error) {
return o.user.GetUserOnlinePlatform(ctx, userID)
})
}
func (o *OnlineCache) GetUserOnline(ctx context.Context, userID string) (bool, error) {
platformIDs, err := o.GetUserOnlinePlatform(ctx, userID)
if err != nil {
return false, err
}
return len(platformIDs) > 0, nil
}
func (o *OnlineCache) GetUsersOnline(ctx context.Context, userIDs []string) ([]string, error) {
onlineUserIDs := make([]string, 0, len(userIDs))
for _, userID := range userIDs {
online, err := o.GetUserOnline(ctx, userID)
if err != nil {
return nil, err
}
if online {
onlineUserIDs = append(onlineUserIDs, userID)
}
}
log.ZDebug(ctx, "OnlineCache GetUsersOnline", "userIDs", userIDs, "onlineUserIDs", onlineUserIDs)
return onlineUserIDs, nil
}
func (o *OnlineCache) GetGroupOnline(ctx context.Context, groupID string) ([]string, error) {
userIDs, err := o.group.GetGroupMemberIDs(ctx, groupID)
if err != nil {
return nil, err
}
var onlineUserIDs []string
for _, userID := range userIDs {
online, err := o.GetUserOnline(ctx, userID)
if err != nil {
return nil, err
}
if online {
onlineUserIDs = append(onlineUserIDs, userID)
}
}
log.ZDebug(ctx, "OnlineCache GetGroupOnline", "groupID", groupID, "onlineUserIDs", onlineUserIDs, "allUserID", userIDs)
return onlineUserIDs, nil
}
func (o *OnlineCache) setUserOnline(userID string, platformIDs []int32) bool {
return o.local.SetHas(userID, platformIDs)
}

@ -110,3 +110,18 @@ func (u *UserLocalCache) GetUsersInfoMap(ctx context.Context, userIDs []string)
}
return users, nil
}
//func (u *UserLocalCache) GetUserOnlinePlatform(ctx context.Context, userID string) (val []int32, err error) {
// log.ZDebug(ctx, "UserLocalCache GetUserOnlinePlatform req", "userID", userID)
// defer func() {
// if err == nil {
// log.ZDebug(ctx, "UserLocalCache GetUserOnlinePlatform return", "value", val)
// } else {
// log.ZError(ctx, "UserLocalCache GetUserOnlinePlatform return", err)
// }
// }()
// return localcache.AnyValue[[]int32](u.local.Get(ctx, cachekey.GetOnlineKey(userID), func(ctx context.Context) (any, error) {
// log.ZDebug(ctx, "UserLocalCache GetUserGlobalMsgRecvOpt rpc", "userID", userID)
// return u.client.GetUserGlobalMsgRecvOpt(ctx, userID)
// }))
//}

@ -41,3 +41,7 @@ func NewThird(discov discovery.SvcDiscoveryRegistry, rpcRegisterName, grafanaUrl
}
return &Third{discov: discov, Client: client, conn: conn, GrafanaUrl: grafanaUrl}
}
func (t *Third) DeleteOutdatedData(ctx context.Context, expires int64) error {
_, err := t.Client.DeleteOutdatedData(ctx, &third.DeleteOutdatedDataReq{ExpireTime: expires})
return err
}

@ -193,3 +193,25 @@ func (u *UserRpcClient) GetNotificationByID(ctx context.Context, userID string)
})
return err
}
func (u *UserRpcClient) GetUsersOnlinePlatform(ctx context.Context, userIDs []string) ([]*user.OnlineStatus, error) {
if len(userIDs) == 0 {
return nil, nil
}
resp, err := u.Client.GetUserStatus(ctx, &user.GetUserStatusReq{UserIDs: userIDs, UserID: u.imAdminUserID[0]})
if err != nil {
return nil, err
}
return resp.StatusList, nil
}
func (u *UserRpcClient) GetUserOnlinePlatform(ctx context.Context, userID string) ([]int32, error) {
resp, err := u.GetUsersOnlinePlatform(ctx, []string{userID})
if err != nil {
return nil, err
}
if len(resp) == 0 {
return nil, nil
}
return resp[0].PlatformIDs, nil
}

@ -0,0 +1,27 @@
package useronline
import (
"errors"
"strconv"
"strings"
)
func ParseUserOnlineStatus(payload string) (string, []int32, error) {
arr := strings.Split(payload, ":")
if len(arr) == 0 {
return "", nil, errors.New("invalid data")
}
userID := arr[len(arr)-1]
if userID == "" {
return "", nil, errors.New("userID is empty")
}
platformIDs := make([]int32, len(arr)-1)
for i := range platformIDs {
platformID, err := strconv.Atoi(arr[i])
if err != nil {
return "", nil, err
}
platformIDs[i] = int32(platformID)
}
return userID, platformIDs, nil
}

@ -14,4 +14,5 @@ serviceBinaries:
toolBinaries:
- check-free-memory
- check-component
- seq
maxFileDescriptors: 10000

@ -0,0 +1,331 @@
package internal
import (
"bytes"
"context"
"errors"
"fmt"
"github.com/openimsdk/open-im-server/v3/pkg/common/cmd"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
"github.com/openimsdk/tools/db/mongoutil"
"github.com/openimsdk/tools/db/redisutil"
"github.com/redis/go-redis/v9"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
"gopkg.in/yaml.v3"
"os"
"os/signal"
"path/filepath"
"strconv"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
)
const (
MaxSeq = "MAX_SEQ:"
MinSeq = "MIN_SEQ:"
ConversationUserMinSeq = "CON_USER_MIN_SEQ:"
HasReadSeq = "HAS_READ_SEQ:"
)
const (
batchSize = 100
dataVersionCollection = "data_version"
seqKey = "seq"
seqVersion = 38
)
func readConfig[T any](dir string, name string) (*T, error) {
data, err := os.ReadFile(filepath.Join(dir, name))
if err != nil {
return nil, err
}
var conf T
if err := yaml.Unmarshal(data, &conf); err != nil {
return nil, err
}
return &conf, nil
}
func Main(conf string, del time.Duration) error {
redisConfig, err := readConfig[config.Redis](conf, cmd.RedisConfigFileName)
if err != nil {
return err
}
mongodbConfig, err := readConfig[config.Mongo](conf, cmd.MongodbConfigFileName)
if err != nil {
return err
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
defer cancel()
rdb, err := redisutil.NewRedisClient(ctx, redisConfig.Build())
if err != nil {
return err
}
mgocli, err := mongoutil.NewMongoDB(ctx, mongodbConfig.Build())
if err != nil {
return err
}
versionColl := mgocli.GetDB().Collection(dataVersionCollection)
converted, err := CheckVersion(versionColl, seqKey, seqVersion)
if err != nil {
return err
}
if converted {
fmt.Println("[seq] seq data has been converted")
return nil
}
if _, err := mgo.NewSeqConversationMongo(mgocli.GetDB()); err != nil {
return err
}
cSeq, err := mgo.NewSeqConversationMongo(mgocli.GetDB())
if err != nil {
return err
}
uSeq, err := mgo.NewSeqUserMongo(mgocli.GetDB())
if err != nil {
return err
}
uSpitHasReadSeq := func(id string) (conversationID string, userID string, err error) {
// HasReadSeq + userID + ":" + conversationID
arr := strings.Split(id, ":")
if len(arr) != 2 || arr[0] == "" || arr[1] == "" {
return "", "", fmt.Errorf("invalid has read seq id %s", id)
}
userID = arr[0]
conversationID = arr[1]
return
}
uSpitConversationUserMinSeq := func(id string) (conversationID string, userID string, err error) {
// ConversationUserMinSeq + conversationID + "u:" + userID
arr := strings.Split(id, "u:")
if len(arr) != 2 || arr[0] == "" || arr[1] == "" {
return "", "", fmt.Errorf("invalid has read seq id %s", id)
}
conversationID = arr[0]
userID = arr[1]
return
}
ts := []*taskSeq{
{
Prefix: MaxSeq,
GetSeq: cSeq.GetMaxSeq,
SetSeq: cSeq.SetMinSeq,
},
{
Prefix: MinSeq,
GetSeq: cSeq.GetMinSeq,
SetSeq: cSeq.SetMinSeq,
},
{
Prefix: HasReadSeq,
GetSeq: func(ctx context.Context, id string) (int64, error) {
conversationID, userID, err := uSpitHasReadSeq(id)
if err != nil {
return 0, err
}
return uSeq.GetReadSeq(ctx, conversationID, userID)
},
SetSeq: func(ctx context.Context, id string, seq int64) error {
conversationID, userID, err := uSpitHasReadSeq(id)
if err != nil {
return err
}
return uSeq.SetReadSeq(ctx, conversationID, userID, seq)
},
},
{
Prefix: ConversationUserMinSeq,
GetSeq: func(ctx context.Context, id string) (int64, error) {
conversationID, userID, err := uSpitConversationUserMinSeq(id)
if err != nil {
return 0, err
}
return uSeq.GetMinSeq(ctx, conversationID, userID)
},
SetSeq: func(ctx context.Context, id string, seq int64) error {
conversationID, userID, err := uSpitConversationUserMinSeq(id)
if err != nil {
return err
}
return uSeq.SetMinSeq(ctx, conversationID, userID, seq)
},
},
}
cancel()
ctx = context.Background()
var wg sync.WaitGroup
wg.Add(len(ts))
for i := range ts {
go func(task *taskSeq) {
defer wg.Done()
err := seqRedisToMongo(ctx, rdb, task.GetSeq, task.SetSeq, task.Prefix, del, &task.Count)
task.End = time.Now()
task.Error = err
}(ts[i])
}
start := time.Now()
done := make(chan struct{})
go func() {
wg.Wait()
close(done)
}()
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGTERM)
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
var buf bytes.Buffer
printTaskInfo := func(now time.Time) {
buf.Reset()
buf.WriteString(now.Format(time.DateTime))
buf.WriteString(" \n")
for i := range ts {
task := ts[i]
if task.Error == nil {
if task.End.IsZero() {
buf.WriteString(fmt.Sprintf("[%s] converting %s* count %d", now.Sub(start), task.Prefix, atomic.LoadInt64(&task.Count)))
} else {
buf.WriteString(fmt.Sprintf("[%s] success %s* count %d", task.End.Sub(start), task.Prefix, atomic.LoadInt64(&task.Count)))
}
} else {
buf.WriteString(fmt.Sprintf("[%s] failed %s* count %d error %s", task.End.Sub(start), task.Prefix, atomic.LoadInt64(&task.Count), task.Error))
}
buf.WriteString("\n")
}
fmt.Println(buf.String())
}
for {
select {
case <-ctx.Done():
return ctx.Err()
case s := <-sigs:
return fmt.Errorf("exit by signal %s", s)
case <-done:
errs := make([]error, 0, len(ts))
for i := range ts {
task := ts[i]
if task.Error != nil {
errs = append(errs, fmt.Errorf("seq %s failed %w", task.Prefix, task.Error))
}
}
if len(errs) > 0 {
return errors.Join(errs...)
}
printTaskInfo(time.Now())
if err := SetVersion(versionColl, seqKey, seqVersion); err != nil {
return fmt.Errorf("set mongodb seq version %w", err)
}
return nil
case now := <-ticker.C:
printTaskInfo(now)
}
}
}
type taskSeq struct {
Prefix string
Count int64
Error error
End time.Time
GetSeq func(ctx context.Context, id string) (int64, error)
SetSeq func(ctx context.Context, id string, seq int64) error
}
func seqRedisToMongo(ctx context.Context, rdb redis.UniversalClient, getSeq func(ctx context.Context, id string) (int64, error), setSeq func(ctx context.Context, id string, seq int64) error, prefix string, delAfter time.Duration, count *int64) error {
var (
cursor uint64
keys []string
err error
)
for {
keys, cursor, err = rdb.Scan(ctx, cursor, prefix+"*", batchSize).Result()
if err != nil {
return err
}
if len(keys) > 0 {
for _, key := range keys {
seqStr, err := rdb.Get(ctx, key).Result()
if err != nil {
return fmt.Errorf("redis get %s failed %w", key, err)
}
seq, err := strconv.Atoi(seqStr)
if err != nil {
return fmt.Errorf("invalid %s seq %s", key, seqStr)
}
if seq < 0 {
return fmt.Errorf("invalid %s seq %s", key, seqStr)
}
id := strings.TrimPrefix(key, prefix)
redisSeq := int64(seq)
mongoSeq, err := getSeq(ctx, id)
if err != nil {
return fmt.Errorf("get mongo seq %s failed %w", key, err)
}
if mongoSeq < redisSeq {
if err := setSeq(ctx, id, redisSeq); err != nil {
return fmt.Errorf("set mongo seq %s failed %w", key, err)
}
}
if delAfter > 0 {
if err := rdb.Expire(ctx, key, delAfter).Err(); err != nil {
return fmt.Errorf("redis expire key %s failed %w", key, err)
}
} else {
if err := rdb.Del(ctx, key).Err(); err != nil {
return fmt.Errorf("redis del key %s failed %w", key, err)
}
}
atomic.AddInt64(count, 1)
}
}
if cursor == 0 {
return nil
}
}
}
func CheckVersion(coll *mongo.Collection, key string, currentVersion int) (converted bool, err error) {
type VersionTable struct {
Key string `bson:"key"`
Value string `bson:"value"`
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
res, err := mongoutil.FindOne[VersionTable](ctx, coll, bson.M{"key": key})
if err == nil {
ver, err := strconv.Atoi(res.Value)
if err != nil {
return false, fmt.Errorf("version %s parse error %w", res.Value, err)
}
if ver >= currentVersion {
return true, nil
}
return false, nil
} else if errors.Is(err, mongo.ErrNoDocuments) {
return false, nil
} else {
return false, err
}
}
func SetVersion(coll *mongo.Collection, key string, version int) error {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
option := options.Update().SetUpsert(true)
filter := bson.M{"key": key, "value": strconv.Itoa(version)}
update := bson.M{"$set": bson.M{"key": key, "value": strconv.Itoa(version)}}
return mongoutil.UpdateOne(ctx, coll, filter, update, false, option)
}

@ -0,0 +1,22 @@
package main
import (
"flag"
"fmt"
"github.com/openimsdk/open-im-server/v3/tools/seq/internal"
"time"
)
func main() {
var (
config string
second int
)
flag.StringVar(&config, "c", "/Users/chao/Desktop/project/open-im-server/config", "config directory")
flag.IntVar(&second, "sec", 3600*24, "delayed deletion of the original seq key after conversion")
flag.Parse()
if err := internal.Main(config, time.Duration(second)*time.Second); err != nil {
fmt.Println("seq task", err)
}
fmt.Println("seq task success!")
}
Loading…
Cancel
Save