Merge branch 'main' of https://github.com/openimsdk/open-im-server
commit
69296539e2
@ -0,0 +1,34 @@
|
||||
global:
|
||||
resolve_timeout: 5m
|
||||
smtp_from: alert@openim.io
|
||||
smtp_smarthost: smtp.163.com:465
|
||||
smtp_auth_username: alert@openim.io
|
||||
smtp_auth_password: YOURAUTHPASSWORD
|
||||
smtp_require_tls: false
|
||||
smtp_hello: xxx
|
||||
|
||||
templates:
|
||||
- /etc/alertmanager/email.tmpl
|
||||
|
||||
route:
|
||||
group_by: [ 'alertname' ]
|
||||
group_wait: 5s
|
||||
group_interval: 5s
|
||||
repeat_interval: 5m
|
||||
receiver: email
|
||||
routes:
|
||||
- matchers:
|
||||
- alertname = "XXX"
|
||||
group_by: [ 'instance' ]
|
||||
group_wait: 5s
|
||||
group_interval: 5s
|
||||
repeat_interval: 5m
|
||||
receiver: email
|
||||
|
||||
receivers:
|
||||
- name: email
|
||||
email_configs:
|
||||
- to: 'alert@example.com'
|
||||
html: '{{ template "email.to.html" . }}'
|
||||
headers: { Subject: "[OPENIM-SERVER]Alarm" }
|
||||
send_resolved: true
|
@ -0,0 +1,36 @@
|
||||
{{ define "email.to.html" }}
|
||||
{{ if eq .Status "firing" }}
|
||||
{{ range .Alerts }}
|
||||
<!-- Begin of OpenIM Alert -->
|
||||
<div style="border:1px solid #ccc; padding:10px; margin-bottom:10px;">
|
||||
<h3>OpenIM Alert</h3>
|
||||
<p><strong>Alert Status:</strong> firing</p>
|
||||
<p><strong>Alert Program:</strong> Prometheus Alert</p>
|
||||
<p><strong>Severity Level:</strong> {{ .Labels.severity }}</p>
|
||||
<p><strong>Alert Type:</strong> {{ .Labels.alertname }}</p>
|
||||
<p><strong>Affected Host:</strong> {{ .Labels.instance }}</p>
|
||||
<p><strong>Affected Service:</strong> {{ .Labels.job }}</p>
|
||||
<p><strong>Alert Subject:</strong> {{ .Annotations.summary }}</p>
|
||||
<p><strong>Trigger Time:</strong> {{ .StartsAt.Format "2006-01-02 15:04:05" }}</p>
|
||||
</div>
|
||||
{{ end }}
|
||||
|
||||
|
||||
{{ else if eq .Status "resolved" }}
|
||||
{{ range .Alerts }}
|
||||
<!-- Begin of OpenIM Alert -->
|
||||
<div style="border:1px solid #ccc; padding:10px; margin-bottom:10px;">
|
||||
<h3>OpenIM Alert</h3>
|
||||
<p><strong>Alert Status:</strong> resolved</p>
|
||||
<p><strong>Alert Program:</strong> Prometheus Alert</p>
|
||||
<p><strong>Severity Level:</strong> {{ .Labels.severity }}</p>
|
||||
<p><strong>Alert Type:</strong> {{ .Labels.alertname }}</p>
|
||||
<p><strong>Affected Host:</strong> {{ .Labels.instance }}</p>
|
||||
<p><strong>Affected Service:</strong> {{ .Labels.job }}</p>
|
||||
<p><strong>Alert Subject:</strong> {{ .Annotations.summary }}</p>
|
||||
<p><strong>Trigger Time:</strong> {{ .StartsAt.Format "2006-01-02 15:04:05" }}</p>
|
||||
</div>
|
||||
{{ end }}
|
||||
<!-- End of OpenIM Alert -->
|
||||
{{ end }}
|
||||
{{ end }}
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,44 @@
|
||||
groups:
|
||||
- name: instance_down
|
||||
rules:
|
||||
- alert: InstanceDown
|
||||
expr: up == 0
|
||||
for: 1m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: "Instance {{ $labels.instance }} down"
|
||||
description: "{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 1 minutes."
|
||||
|
||||
- name: database_insert_failure_alerts
|
||||
rules:
|
||||
- alert: DatabaseInsertFailed
|
||||
expr: (increase(msg_insert_redis_failed_total[5m]) > 0) or (increase(msg_insert_mongo_failed_total[5m]) > 0)
|
||||
for: 1m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: "Increase in MsgInsertRedisFailedCounter or MsgInsertMongoFailedCounter detected"
|
||||
description: "Either MsgInsertRedisFailedCounter or MsgInsertMongoFailedCounter has increased in the last 5 minutes, indicating failures in message insert operations to Redis or MongoDB,maybe the redis or mongodb is crash."
|
||||
|
||||
- name: registrations_few
|
||||
rules:
|
||||
- alert: RegistrationsFew
|
||||
expr: increase(user_login_total[1h]) == 0
|
||||
for: 1m
|
||||
labels:
|
||||
severity: info
|
||||
annotations:
|
||||
summary: "Too few registrations within the time frame"
|
||||
description: "The number of registrations in the last hour is 0. There might be some issues."
|
||||
|
||||
- name: messages_few
|
||||
rules:
|
||||
- alert: MessagesFew
|
||||
expr: (increase(single_chat_msg_process_success_total[1h])+increase(group_chat_msg_process_success_total[1h])) == 0
|
||||
for: 1m
|
||||
labels:
|
||||
severity: info
|
||||
annotations:
|
||||
summary: "Too few messages within the time frame"
|
||||
description: "The number of messages sent in the last hour is 0. There might be some issues."
|
@ -1,2 +1,3 @@
|
||||
chatRecordsClearTime: "0 2 * * *"
|
||||
cronExecuteTime: "0 2 * * *"
|
||||
retainChatRecords: 365
|
||||
fileExpireTime: 90
|
||||
|
@ -0,0 +1,83 @@
|
||||
# my global config
|
||||
global:
|
||||
scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
|
||||
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
|
||||
# scrape_timeout is set to the global default (10s).
|
||||
|
||||
# Alertmanager configuration
|
||||
alerting:
|
||||
alertmanagers:
|
||||
- static_configs:
|
||||
- targets: ['internal_ip:19093']
|
||||
|
||||
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
|
||||
rule_files:
|
||||
- "instance-down-rules.yml"
|
||||
# - "first_rules.yml"
|
||||
# - "second_rules.yml"
|
||||
|
||||
# A scrape configuration containing exactly one endpoint to scrape:
|
||||
# Here it's Prometheus itself.
|
||||
scrape_configs:
|
||||
# The job name is added as a label "job='job_name'"" to any timeseries scraped from this config.
|
||||
# Monitored information captured by prometheus
|
||||
|
||||
# prometheus fetches application services
|
||||
- job_name: 'node_exporter'
|
||||
static_configs:
|
||||
- targets: [ 'internal_ip:20114' ]
|
||||
- job_name: 'openimserver-openim-api'
|
||||
static_configs:
|
||||
- targets: [ 'internal_ip:20113' ]
|
||||
labels:
|
||||
namespace: 'default'
|
||||
- job_name: 'openimserver-openim-msggateway'
|
||||
static_configs:
|
||||
- targets: [ 'internal_ip:20112' ]
|
||||
labels:
|
||||
namespace: 'default'
|
||||
- job_name: 'openimserver-openim-msgtransfer'
|
||||
static_configs:
|
||||
- targets: [ 'internal_ip:20111', 'internal_ip:20110', 'internal_ip:20109', 'internal_ip:20108' ]
|
||||
labels:
|
||||
namespace: 'default'
|
||||
- job_name: 'openimserver-openim-push'
|
||||
static_configs:
|
||||
- targets: [ 'internal_ip:20107' ]
|
||||
labels:
|
||||
namespace: 'default'
|
||||
- job_name: 'openimserver-openim-rpc-auth'
|
||||
static_configs:
|
||||
- targets: [ 'internal_ip:20106' ]
|
||||
labels:
|
||||
namespace: 'default'
|
||||
- job_name: 'openimserver-openim-rpc-conversation'
|
||||
static_configs:
|
||||
- targets: [ 'internal_ip:20105' ]
|
||||
labels:
|
||||
namespace: 'default'
|
||||
- job_name: 'openimserver-openim-rpc-friend'
|
||||
static_configs:
|
||||
- targets: [ 'internal_ip:20104' ]
|
||||
labels:
|
||||
namespace: 'default'
|
||||
- job_name: 'openimserver-openim-rpc-group'
|
||||
static_configs:
|
||||
- targets: [ 'internal_ip:20103' ]
|
||||
labels:
|
||||
namespace: 'default'
|
||||
- job_name: 'openimserver-openim-rpc-msg'
|
||||
static_configs:
|
||||
- targets: [ 'internal_ip:20102' ]
|
||||
labels:
|
||||
namespace: 'default'
|
||||
- job_name: 'openimserver-openim-rpc-third'
|
||||
static_configs:
|
||||
- targets: [ 'internal_ip:20101' ]
|
||||
labels:
|
||||
namespace: 'default'
|
||||
- job_name: 'openimserver-openim-rpc-user'
|
||||
static_configs:
|
||||
- targets: [ 'internal_ip:20100' ]
|
||||
labels:
|
||||
namespace: 'default'
|
@ -0,0 +1,112 @@
|
||||
package msggateway
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/binary"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
|
||||
pbuser "github.com/openimsdk/protocol/user"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/mcontext"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (ws *WsServer) ChangeOnlineStatus(concurrent int) {
|
||||
if concurrent < 1 {
|
||||
concurrent = 1
|
||||
}
|
||||
const renewalTime = cachekey.OnlineExpire / 3
|
||||
//const renewalTime = time.Second * 10
|
||||
renewalTicker := time.NewTicker(renewalTime)
|
||||
|
||||
requestChs := make([]chan *pbuser.SetUserOnlineStatusReq, concurrent)
|
||||
changeStatus := make([][]UserState, concurrent)
|
||||
|
||||
for i := 0; i < concurrent; i++ {
|
||||
requestChs[i] = make(chan *pbuser.SetUserOnlineStatusReq, 64)
|
||||
changeStatus[i] = make([]UserState, 0, 100)
|
||||
}
|
||||
|
||||
mergeTicker := time.NewTicker(time.Second)
|
||||
|
||||
local2pb := func(u UserState) *pbuser.UserOnlineStatus {
|
||||
return &pbuser.UserOnlineStatus{
|
||||
UserID: u.UserID,
|
||||
Online: u.Online,
|
||||
Offline: u.Offline,
|
||||
}
|
||||
}
|
||||
|
||||
rNum := rand.Uint64()
|
||||
pushUserState := func(us ...UserState) {
|
||||
for _, u := range us {
|
||||
sum := md5.Sum([]byte(u.UserID))
|
||||
i := (binary.BigEndian.Uint64(sum[:]) + rNum) % uint64(concurrent)
|
||||
changeStatus[i] = append(changeStatus[i], u)
|
||||
status := changeStatus[i]
|
||||
if len(status) == cap(status) {
|
||||
req := &pbuser.SetUserOnlineStatusReq{
|
||||
Status: datautil.Slice(status, local2pb),
|
||||
}
|
||||
changeStatus[i] = status[:0]
|
||||
select {
|
||||
case requestChs[i] <- req:
|
||||
default:
|
||||
log.ZError(context.Background(), "user online processing is too slow", nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pushAllUserState := func() {
|
||||
for i, status := range changeStatus {
|
||||
if len(status) == 0 {
|
||||
continue
|
||||
}
|
||||
req := &pbuser.SetUserOnlineStatusReq{
|
||||
Status: datautil.Slice(status, local2pb),
|
||||
}
|
||||
changeStatus[i] = status[:0]
|
||||
select {
|
||||
case requestChs[i] <- req:
|
||||
default:
|
||||
log.ZError(context.Background(), "user online processing is too slow", nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
opIdCtx := mcontext.SetOperationID(context.Background(), "r"+strconv.FormatUint(rNum, 10))
|
||||
doRequest := func(req *pbuser.SetUserOnlineStatusReq) {
|
||||
ctx, cancel := context.WithTimeout(opIdCtx, time.Second*5)
|
||||
defer cancel()
|
||||
if _, err := ws.userClient.Client.SetUserOnlineStatus(ctx, req); err != nil {
|
||||
log.ZError(ctx, "update user online status", err)
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < concurrent; i++ {
|
||||
go func(ch <-chan *pbuser.SetUserOnlineStatusReq) {
|
||||
for req := range ch {
|
||||
doRequest(req)
|
||||
}
|
||||
}(requestChs[i])
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-mergeTicker.C:
|
||||
pushAllUserState()
|
||||
case now := <-renewalTicker.C:
|
||||
deadline := now.Add(-cachekey.OnlineExpire / 3)
|
||||
users := ws.clients.GetAllUserStatus(deadline, now)
|
||||
log.ZDebug(context.Background(), "renewal ticker", "deadline", deadline, "nowtime", now, "num", len(users))
|
||||
pushUserState(users...)
|
||||
case state := <-ws.clients.UserState():
|
||||
log.ZDebug(context.Background(), "OnlineCache user online change", "userID", state.UserID, "online", state.Online, "offline", state.Offline)
|
||||
pushUserState(state)
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,166 @@
|
||||
package msggateway
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/openimsdk/protocol/sdkws"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"sync"
|
||||
)
|
||||
|
||||
func (ws *WsServer) subscriberUserOnlineStatusChanges(ctx context.Context, userID string, platformIDs []int32) {
|
||||
if ws.clients.RecvSubChange(userID, platformIDs) {
|
||||
log.ZDebug(ctx, "gateway receive subscription message and go back online", "userID", userID, "platformIDs", platformIDs)
|
||||
} else {
|
||||
log.ZDebug(ctx, "gateway ignore user online status changes", "userID", userID, "platformIDs", platformIDs)
|
||||
}
|
||||
ws.pushUserIDOnlineStatus(ctx, userID, platformIDs)
|
||||
}
|
||||
|
||||
func (ws *WsServer) SubUserOnlineStatus(ctx context.Context, client *Client, data *Req) ([]byte, error) {
|
||||
var sub sdkws.SubUserOnlineStatus
|
||||
if err := proto.Unmarshal(data.Data, &sub); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ws.subscription.Sub(client, sub.SubscribeUserID, sub.UnsubscribeUserID)
|
||||
var resp sdkws.SubUserOnlineStatusTips
|
||||
if len(sub.SubscribeUserID) > 0 {
|
||||
resp.Subscribers = make([]*sdkws.SubUserOnlineStatusElem, 0, len(sub.SubscribeUserID))
|
||||
for _, userID := range sub.SubscribeUserID {
|
||||
platformIDs, err := ws.online.GetUserOnlinePlatform(ctx, userID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp.Subscribers = append(resp.Subscribers, &sdkws.SubUserOnlineStatusElem{
|
||||
UserID: userID,
|
||||
OnlinePlatformIDs: platformIDs,
|
||||
})
|
||||
}
|
||||
}
|
||||
return proto.Marshal(&resp)
|
||||
}
|
||||
|
||||
func newSubscription() *Subscription {
|
||||
return &Subscription{
|
||||
userIDs: make(map[string]*subClient),
|
||||
}
|
||||
}
|
||||
|
||||
type subClient struct {
|
||||
clients map[string]*Client
|
||||
}
|
||||
|
||||
type Subscription struct {
|
||||
lock sync.RWMutex
|
||||
userIDs map[string]*subClient // subscribe to the user's client connection
|
||||
}
|
||||
|
||||
func (s *Subscription) DelClient(client *Client) {
|
||||
client.subLock.Lock()
|
||||
userIDs := datautil.Keys(client.subUserIDs)
|
||||
for _, userID := range userIDs {
|
||||
delete(client.subUserIDs, userID)
|
||||
}
|
||||
client.subLock.Unlock()
|
||||
if len(userIDs) == 0 {
|
||||
return
|
||||
}
|
||||
addr := client.ctx.GetRemoteAddr()
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
for _, userID := range userIDs {
|
||||
sub, ok := s.userIDs[userID]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
delete(sub.clients, addr)
|
||||
if len(sub.clients) == 0 {
|
||||
delete(s.userIDs, userID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Subscription) GetClient(userID string) []*Client {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
cs, ok := s.userIDs[userID]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
clients := make([]*Client, 0, len(cs.clients))
|
||||
for _, client := range cs.clients {
|
||||
clients = append(clients, client)
|
||||
}
|
||||
return clients
|
||||
}
|
||||
|
||||
func (s *Subscription) Sub(client *Client, addUserIDs, delUserIDs []string) {
|
||||
if len(addUserIDs)+len(delUserIDs) == 0 {
|
||||
return
|
||||
}
|
||||
var (
|
||||
del = make(map[string]struct{})
|
||||
add = make(map[string]struct{})
|
||||
)
|
||||
client.subLock.Lock()
|
||||
for _, userID := range delUserIDs {
|
||||
if _, ok := client.subUserIDs[userID]; !ok {
|
||||
continue
|
||||
}
|
||||
del[userID] = struct{}{}
|
||||
delete(client.subUserIDs, userID)
|
||||
}
|
||||
for _, userID := range addUserIDs {
|
||||
delete(del, userID)
|
||||
if _, ok := client.subUserIDs[userID]; ok {
|
||||
continue
|
||||
}
|
||||
client.subUserIDs[userID] = struct{}{}
|
||||
add[userID] = struct{}{}
|
||||
}
|
||||
client.subLock.Unlock()
|
||||
if len(del)+len(add) == 0 {
|
||||
return
|
||||
}
|
||||
addr := client.ctx.GetRemoteAddr()
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
for userID := range del {
|
||||
sub, ok := s.userIDs[userID]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
delete(sub.clients, addr)
|
||||
if len(sub.clients) == 0 {
|
||||
delete(s.userIDs, userID)
|
||||
}
|
||||
}
|
||||
for userID := range add {
|
||||
sub, ok := s.userIDs[userID]
|
||||
if !ok {
|
||||
sub = &subClient{clients: make(map[string]*Client)}
|
||||
s.userIDs[userID] = sub
|
||||
}
|
||||
sub.clients[addr] = client
|
||||
}
|
||||
}
|
||||
|
||||
func (ws *WsServer) pushUserIDOnlineStatus(ctx context.Context, userID string, platformIDs []int32) {
|
||||
clients := ws.subscription.GetClient(userID)
|
||||
if len(clients) == 0 {
|
||||
return
|
||||
}
|
||||
onlineStatus, err := proto.Marshal(&sdkws.SubUserOnlineStatusTips{
|
||||
Subscribers: []*sdkws.SubUserOnlineStatusElem{{UserID: userID, OnlinePlatformIDs: platformIDs}},
|
||||
})
|
||||
if err != nil {
|
||||
log.ZError(ctx, "pushUserIDOnlineStatus json.Marshal", err)
|
||||
return
|
||||
}
|
||||
for _, client := range clients {
|
||||
if err := client.PushUserOnlineStatus(onlineStatus); err != nil {
|
||||
log.ZError(ctx, "UserSubscribeOnlineStatusNotification push failed", err, "userID", client.UserID, "platformID", client.PlatformID, "changeUserID", userID, "changePlatformID", platformIDs)
|
||||
}
|
||||
}
|
||||
}
|
@ -1,135 +1,185 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package msggateway
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type UserMap struct {
|
||||
m sync.Map
|
||||
type UserMap interface {
|
||||
GetAll(userID string) ([]*Client, bool)
|
||||
Get(userID string, platformID int) ([]*Client, bool, bool)
|
||||
Set(userID string, v *Client)
|
||||
DeleteClients(userID string, clients []*Client) (isDeleteUser bool)
|
||||
UserState() <-chan UserState
|
||||
GetAllUserStatus(deadline time.Time, nowtime time.Time) []UserState
|
||||
RecvSubChange(userID string, platformIDs []int32) bool
|
||||
}
|
||||
|
||||
func newUserMap() *UserMap {
|
||||
return &UserMap{}
|
||||
type UserState struct {
|
||||
UserID string
|
||||
Online []int32
|
||||
Offline []int32
|
||||
}
|
||||
|
||||
func (u *UserMap) GetAll(key string) ([]*Client, bool) {
|
||||
allClients, ok := u.m.Load(key)
|
||||
if ok {
|
||||
return allClients.([]*Client), ok
|
||||
type UserPlatform struct {
|
||||
Time time.Time
|
||||
Clients []*Client
|
||||
}
|
||||
|
||||
func (u *UserPlatform) PlatformIDs() []int32 {
|
||||
if len(u.Clients) == 0 {
|
||||
return nil
|
||||
}
|
||||
platformIDs := make([]int32, 0, len(u.Clients))
|
||||
for _, client := range u.Clients {
|
||||
platformIDs = append(platformIDs, int32(client.PlatformID))
|
||||
}
|
||||
return nil, ok
|
||||
return platformIDs
|
||||
}
|
||||
|
||||
func (u *UserMap) Get(key string, platformID int) ([]*Client, bool, bool) {
|
||||
allClients, userExisted := u.m.Load(key)
|
||||
if userExisted {
|
||||
var clients []*Client
|
||||
for _, client := range allClients.([]*Client) {
|
||||
if client.PlatformID == platformID {
|
||||
clients = append(clients, client)
|
||||
func (u *UserPlatform) PlatformIDSet() map[int32]struct{} {
|
||||
if len(u.Clients) == 0 {
|
||||
return nil
|
||||
}
|
||||
platformIDs := make(map[int32]struct{})
|
||||
for _, client := range u.Clients {
|
||||
platformIDs[int32(client.PlatformID)] = struct{}{}
|
||||
}
|
||||
if len(clients) > 0 {
|
||||
return clients, userExisted, true
|
||||
return platformIDs
|
||||
}
|
||||
return clients, userExisted, false
|
||||
|
||||
func newUserMap() UserMap {
|
||||
return &userMap{
|
||||
data: make(map[string]*UserPlatform),
|
||||
ch: make(chan UserState, 10000),
|
||||
}
|
||||
return nil, userExisted, false
|
||||
}
|
||||
|
||||
// Set adds a client to the map.
|
||||
func (u *UserMap) Set(key string, v *Client) {
|
||||
allClients, existed := u.m.Load(key)
|
||||
if existed {
|
||||
log.ZDebug(context.Background(), "Set existed", "user_id", key, "client_user_id", v.UserID)
|
||||
oldClients := allClients.([]*Client)
|
||||
oldClients = append(oldClients, v)
|
||||
u.m.Store(key, oldClients)
|
||||
} else {
|
||||
log.ZDebug(context.Background(), "Set not existed", "user_id", key, "client_user_id", v.UserID)
|
||||
type userMap struct {
|
||||
lock sync.RWMutex
|
||||
data map[string]*UserPlatform
|
||||
ch chan UserState
|
||||
}
|
||||
|
||||
var clients []*Client
|
||||
clients = append(clients, v)
|
||||
u.m.Store(key, clients)
|
||||
func (u *userMap) RecvSubChange(userID string, platformIDs []int32) bool {
|
||||
u.lock.RLock()
|
||||
defer u.lock.RUnlock()
|
||||
result, ok := u.data[userID]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
localPlatformIDs := result.PlatformIDSet()
|
||||
for _, platformID := range platformIDs {
|
||||
delete(localPlatformIDs, platformID)
|
||||
}
|
||||
if len(localPlatformIDs) == 0 {
|
||||
return false
|
||||
}
|
||||
u.push(userID, result, nil)
|
||||
return true
|
||||
}
|
||||
|
||||
func (u *UserMap) delete(key string, connRemoteAddr string) (isDeleteUser bool) {
|
||||
// Attempt to load the clients associated with the key.
|
||||
allClients, existed := u.m.Load(key)
|
||||
if !existed {
|
||||
// Return false immediately if the key does not exist.
|
||||
func (u *userMap) push(userID string, userPlatform *UserPlatform, offline []int32) bool {
|
||||
select {
|
||||
case u.ch <- UserState{UserID: userID, Online: userPlatform.PlatformIDs(), Offline: offline}:
|
||||
userPlatform.Time = time.Now()
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Convert allClients to a slice of *Client.
|
||||
oldClients := allClients.([]*Client)
|
||||
var remainingClients []*Client
|
||||
for _, client := range oldClients {
|
||||
// Keep clients that do not match the connRemoteAddr.
|
||||
if client.ctx.GetRemoteAddr() != connRemoteAddr {
|
||||
remainingClients = append(remainingClients, client)
|
||||
func (u *userMap) GetAll(userID string) ([]*Client, bool) {
|
||||
u.lock.RLock()
|
||||
defer u.lock.RUnlock()
|
||||
result, ok := u.data[userID]
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
return result.Clients, true
|
||||
}
|
||||
|
||||
// If no clients remain after filtering, delete the key from the map.
|
||||
if len(remainingClients) == 0 {
|
||||
u.m.Delete(key)
|
||||
return true
|
||||
func (u *userMap) Get(userID string, platformID int) ([]*Client, bool, bool) {
|
||||
u.lock.RLock()
|
||||
defer u.lock.RUnlock()
|
||||
result, ok := u.data[userID]
|
||||
if !ok {
|
||||
return nil, false, false
|
||||
}
|
||||
var clients []*Client
|
||||
for _, client := range result.Clients {
|
||||
if client.PlatformID == platformID {
|
||||
clients = append(clients, client)
|
||||
}
|
||||
}
|
||||
return clients, true, len(clients) > 0
|
||||
}
|
||||
|
||||
// Otherwise, update the key with the remaining clients.
|
||||
u.m.Store(key, remainingClients)
|
||||
return false
|
||||
func (u *userMap) Set(userID string, client *Client) {
|
||||
u.lock.Lock()
|
||||
defer u.lock.Unlock()
|
||||
result, ok := u.data[userID]
|
||||
if ok {
|
||||
result.Clients = append(result.Clients, client)
|
||||
} else {
|
||||
result = &UserPlatform{
|
||||
Clients: []*Client{client},
|
||||
}
|
||||
u.data[userID] = result
|
||||
}
|
||||
u.push(client.UserID, result, nil)
|
||||
}
|
||||
|
||||
func (u *UserMap) deleteClients(key string, clients []*Client) (isDeleteUser bool) {
|
||||
m := datautil.SliceToMapAny(clients, func(c *Client) (string, struct{}) {
|
||||
return c.ctx.GetRemoteAddr(), struct{}{}
|
||||
})
|
||||
allClients, existed := u.m.Load(key)
|
||||
if !existed {
|
||||
// If the key doesn't exist, return false.
|
||||
func (u *userMap) DeleteClients(userID string, clients []*Client) (isDeleteUser bool) {
|
||||
if len(clients) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// Filter out clients that are in the deleteMap.
|
||||
oldClients := allClients.([]*Client)
|
||||
var remainingClients []*Client
|
||||
for _, client := range oldClients {
|
||||
if _, shouldBeDeleted := m[client.ctx.GetRemoteAddr()]; !shouldBeDeleted {
|
||||
remainingClients = append(remainingClients, client)
|
||||
u.lock.Lock()
|
||||
defer u.lock.Unlock()
|
||||
result, ok := u.data[userID]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
offline := make([]int32, 0, len(clients))
|
||||
deleteAddr := datautil.SliceSetAny(clients, func(client *Client) string {
|
||||
return client.ctx.GetRemoteAddr()
|
||||
})
|
||||
tmp := result.Clients
|
||||
result.Clients = result.Clients[:0]
|
||||
for _, client := range tmp {
|
||||
if _, delCli := deleteAddr[client.ctx.GetRemoteAddr()]; delCli {
|
||||
offline = append(offline, int32(client.PlatformID))
|
||||
} else {
|
||||
result.Clients = append(result.Clients, client)
|
||||
}
|
||||
}
|
||||
|
||||
// Update or delete the key based on the remaining clients.
|
||||
if len(remainingClients) == 0 {
|
||||
u.m.Delete(key)
|
||||
defer u.push(userID, result, offline)
|
||||
if len(result.Clients) > 0 {
|
||||
return false
|
||||
}
|
||||
delete(u.data, userID)
|
||||
return true
|
||||
}
|
||||
|
||||
u.m.Store(key, remainingClients)
|
||||
return false
|
||||
func (u *userMap) GetAllUserStatus(deadline time.Time, nowtime time.Time) []UserState {
|
||||
u.lock.RLock()
|
||||
defer u.lock.RUnlock()
|
||||
result := make([]UserState, 0, len(u.data))
|
||||
for userID, userPlatform := range u.data {
|
||||
if userPlatform.Time.Before(deadline) {
|
||||
continue
|
||||
}
|
||||
userPlatform.Time = nowtime
|
||||
online := make([]int32, 0, len(userPlatform.Clients))
|
||||
for _, client := range userPlatform.Clients {
|
||||
online = append(online, int32(client.PlatformID))
|
||||
}
|
||||
result = append(result, UserState{UserID: userID, Online: online})
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (u *UserMap) DeleteAll(key string) {
|
||||
u.m.Delete(key)
|
||||
func (u *userMap) UserState() <-chan UserState {
|
||||
return u.ch
|
||||
}
|
||||
|
@ -0,0 +1,56 @@
|
||||
package conversation
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/internal/rpc/incrversion"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/util/hashutil"
|
||||
"github.com/openimsdk/protocol/conversation"
|
||||
)
|
||||
|
||||
func (c *conversationServer) GetFullOwnerConversationIDs(ctx context.Context, req *conversation.GetFullOwnerConversationIDsReq) (*conversation.GetFullOwnerConversationIDsResp, error) {
|
||||
vl, err := c.conversationDatabase.FindMaxConversationUserVersionCache(ctx, req.UserID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
conversationIDs, err := c.conversationDatabase.GetConversationIDs(ctx, req.UserID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
idHash := hashutil.IdHash(conversationIDs)
|
||||
if req.IdHash == idHash {
|
||||
conversationIDs = nil
|
||||
}
|
||||
return &conversation.GetFullOwnerConversationIDsResp{
|
||||
Version: idHash,
|
||||
VersionID: vl.ID.Hex(),
|
||||
Equal: req.IdHash == idHash,
|
||||
ConversationIDs: conversationIDs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *conversationServer) GetIncrementalConversation(ctx context.Context, req *conversation.GetIncrementalConversationReq) (*conversation.GetIncrementalConversationResp, error) {
|
||||
opt := incrversion.Option[*conversation.Conversation, conversation.GetIncrementalConversationResp]{
|
||||
Ctx: ctx,
|
||||
VersionKey: req.UserID,
|
||||
VersionID: req.VersionID,
|
||||
VersionNumber: req.Version,
|
||||
Version: c.conversationDatabase.FindConversationUserVersion,
|
||||
CacheMaxVersion: c.conversationDatabase.FindMaxConversationUserVersionCache,
|
||||
Find: func(ctx context.Context, conversationIDs []string) ([]*conversation.Conversation, error) {
|
||||
return c.getConversations(ctx, req.UserID, conversationIDs)
|
||||
},
|
||||
Resp: func(version *model.VersionLog, delIDs []string, insertList, updateList []*conversation.Conversation, full bool) *conversation.GetIncrementalConversationResp {
|
||||
return &conversation.GetIncrementalConversationResp{
|
||||
VersionID: version.ID.Hex(),
|
||||
Version: uint64(version.Version),
|
||||
Full: full,
|
||||
Delete: delIDs,
|
||||
Insert: insertList,
|
||||
Update: updateList,
|
||||
}
|
||||
},
|
||||
}
|
||||
return opt.Build()
|
||||
}
|
@ -0,0 +1,104 @@
|
||||
package friend
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/util/hashutil"
|
||||
"github.com/openimsdk/protocol/sdkws"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"slices"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/internal/rpc/incrversion"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"github.com/openimsdk/protocol/relation"
|
||||
)
|
||||
|
||||
func (s *friendServer) NotificationUserInfoUpdate(ctx context.Context, req *relation.NotificationUserInfoUpdateReq) (*relation.NotificationUserInfoUpdateResp, error) {
|
||||
userIDs, err := s.db.FindFriendUserIDs(ctx, req.UserID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(userIDs) > 0 {
|
||||
friendUserIDs := []string{req.UserID}
|
||||
noCancelCtx := context.WithoutCancel(ctx)
|
||||
err := s.queue.PushCtx(ctx, func() {
|
||||
for _, userID := range userIDs {
|
||||
if err := s.db.OwnerIncrVersion(noCancelCtx, userID, friendUserIDs, model.VersionStateUpdate); err != nil {
|
||||
log.ZError(ctx, "OwnerIncrVersion", err, "userID", userID, "friendUserIDs", friendUserIDs)
|
||||
}
|
||||
}
|
||||
for _, userID := range userIDs {
|
||||
s.notificationSender.FriendInfoUpdatedNotification(noCancelCtx, req.UserID, userID)
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
log.ZError(ctx, "NotificationUserInfoUpdate timeout", err, "userID", req.UserID)
|
||||
}
|
||||
}
|
||||
return &relation.NotificationUserInfoUpdateResp{}, nil
|
||||
}
|
||||
|
||||
func (s *friendServer) GetFullFriendUserIDs(ctx context.Context, req *relation.GetFullFriendUserIDsReq) (*relation.GetFullFriendUserIDsResp, error) {
|
||||
vl, err := s.db.FindMaxFriendVersionCache(ctx, req.UserID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
userIDs, err := s.db.FindFriendUserIDs(ctx, req.UserID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
idHash := hashutil.IdHash(userIDs)
|
||||
if req.IdHash == idHash {
|
||||
userIDs = nil
|
||||
}
|
||||
return &relation.GetFullFriendUserIDsResp{
|
||||
Version: idHash,
|
||||
VersionID: vl.ID.Hex(),
|
||||
Equal: req.IdHash == idHash,
|
||||
UserIDs: userIDs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *friendServer) GetIncrementalFriends(ctx context.Context, req *relation.GetIncrementalFriendsReq) (*relation.GetIncrementalFriendsResp, error) {
|
||||
if err := authverify.CheckAccessV3(ctx, req.UserID, s.config.Share.IMAdminUserID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var sortVersion uint64
|
||||
opt := incrversion.Option[*sdkws.FriendInfo, relation.GetIncrementalFriendsResp]{
|
||||
Ctx: ctx,
|
||||
VersionKey: req.UserID,
|
||||
VersionID: req.VersionID,
|
||||
VersionNumber: req.Version,
|
||||
Version: func(ctx context.Context, ownerUserID string, version uint, limit int) (*model.VersionLog, error) {
|
||||
vl, err := s.db.FindFriendIncrVersion(ctx, ownerUserID, version, limit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vl.Logs = slices.DeleteFunc(vl.Logs, func(elem model.VersionLogElem) bool {
|
||||
if elem.EID == model.VersionSortChangeID {
|
||||
vl.LogLen--
|
||||
sortVersion = uint64(elem.Version)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
return vl, nil
|
||||
},
|
||||
CacheMaxVersion: s.db.FindMaxFriendVersionCache,
|
||||
Find: func(ctx context.Context, ids []string) ([]*sdkws.FriendInfo, error) {
|
||||
return s.getFriend(ctx, req.UserID, ids)
|
||||
},
|
||||
Resp: func(version *model.VersionLog, deleteIds []string, insertList, updateList []*sdkws.FriendInfo, full bool) *relation.GetIncrementalFriendsResp {
|
||||
return &relation.GetIncrementalFriendsResp{
|
||||
VersionID: version.ID.Hex(),
|
||||
Version: uint64(version.Version),
|
||||
Full: full,
|
||||
Delete: deleteIds,
|
||||
Insert: insertList,
|
||||
Update: updateList,
|
||||
SortVersion: sortVersion,
|
||||
}
|
||||
},
|
||||
}
|
||||
return opt.Build()
|
||||
}
|
@ -0,0 +1,303 @@
|
||||
package group
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/internal/rpc/incrversion"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/util/hashutil"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
pbgroup "github.com/openimsdk/protocol/group"
|
||||
"github.com/openimsdk/protocol/sdkws"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
)
|
||||
|
||||
func (s *groupServer) GetFullGroupMemberUserIDs(ctx context.Context, req *pbgroup.GetFullGroupMemberUserIDsReq) (*pbgroup.GetFullGroupMemberUserIDsResp, error) {
|
||||
vl, err := s.db.FindMaxGroupMemberVersionCache(ctx, req.GroupID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
userIDs, err := s.db.FindGroupMemberUserID(ctx, req.GroupID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
idHash := hashutil.IdHash(userIDs)
|
||||
if req.IdHash == idHash {
|
||||
userIDs = nil
|
||||
}
|
||||
return &pbgroup.GetFullGroupMemberUserIDsResp{
|
||||
Version: idHash,
|
||||
VersionID: vl.ID.Hex(),
|
||||
Equal: req.IdHash == idHash,
|
||||
UserIDs: userIDs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *groupServer) GetFullJoinGroupIDs(ctx context.Context, req *pbgroup.GetFullJoinGroupIDsReq) (*pbgroup.GetFullJoinGroupIDsResp, error) {
|
||||
vl, err := s.db.FindMaxJoinGroupVersionCache(ctx, req.UserID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
groupIDs, err := s.db.FindJoinGroupID(ctx, req.UserID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
idHash := hashutil.IdHash(groupIDs)
|
||||
if req.IdHash == idHash {
|
||||
groupIDs = nil
|
||||
}
|
||||
return &pbgroup.GetFullJoinGroupIDsResp{
|
||||
Version: idHash,
|
||||
VersionID: vl.ID.Hex(),
|
||||
Equal: req.IdHash == idHash,
|
||||
GroupIDs: groupIDs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *groupServer) GetIncrementalGroupMember(ctx context.Context, req *pbgroup.GetIncrementalGroupMemberReq) (*pbgroup.GetIncrementalGroupMemberResp, error) {
|
||||
group, err := s.db.TakeGroup(ctx, req.GroupID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if group.Status == constant.GroupStatusDismissed {
|
||||
return nil, servererrs.ErrDismissedAlready.Wrap()
|
||||
}
|
||||
var (
|
||||
hasGroupUpdate bool
|
||||
sortVersion uint64
|
||||
)
|
||||
opt := incrversion.Option[*sdkws.GroupMemberFullInfo, pbgroup.GetIncrementalGroupMemberResp]{
|
||||
Ctx: ctx,
|
||||
VersionKey: req.GroupID,
|
||||
VersionID: req.VersionID,
|
||||
VersionNumber: req.Version,
|
||||
Version: func(ctx context.Context, groupID string, version uint, limit int) (*model.VersionLog, error) {
|
||||
vl, err := s.db.FindMemberIncrVersion(ctx, groupID, version, limit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logs := make([]model.VersionLogElem, 0, len(vl.Logs))
|
||||
for i, log := range vl.Logs {
|
||||
switch log.EID {
|
||||
case model.VersionGroupChangeID:
|
||||
vl.LogLen--
|
||||
hasGroupUpdate = true
|
||||
case model.VersionSortChangeID:
|
||||
vl.LogLen--
|
||||
sortVersion = uint64(log.Version)
|
||||
default:
|
||||
logs = append(logs, vl.Logs[i])
|
||||
}
|
||||
}
|
||||
vl.Logs = logs
|
||||
if vl.LogLen > 0 {
|
||||
hasGroupUpdate = true
|
||||
}
|
||||
return vl, nil
|
||||
},
|
||||
CacheMaxVersion: s.db.FindMaxGroupMemberVersionCache,
|
||||
Find: func(ctx context.Context, ids []string) ([]*sdkws.GroupMemberFullInfo, error) {
|
||||
return s.getGroupMembersInfo(ctx, req.GroupID, ids)
|
||||
},
|
||||
Resp: func(version *model.VersionLog, delIDs []string, insertList, updateList []*sdkws.GroupMemberFullInfo, full bool) *pbgroup.GetIncrementalGroupMemberResp {
|
||||
return &pbgroup.GetIncrementalGroupMemberResp{
|
||||
VersionID: version.ID.Hex(),
|
||||
Version: uint64(version.Version),
|
||||
Full: full,
|
||||
Delete: delIDs,
|
||||
Insert: insertList,
|
||||
Update: updateList,
|
||||
SortVersion: sortVersion,
|
||||
}
|
||||
},
|
||||
}
|
||||
resp, err := opt.Build()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.Full || hasGroupUpdate {
|
||||
count, err := s.db.FindGroupMemberNum(ctx, group.GroupID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
owner, err := s.db.TakeGroupOwner(ctx, group.GroupID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp.Group = s.groupDB2PB(group, owner.UserID, count)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (s *groupServer) BatchGetIncrementalGroupMember(ctx context.Context, req *pbgroup.BatchGetIncrementalGroupMemberReq) (resp *pbgroup.BatchGetIncrementalGroupMemberResp, err error) {
|
||||
type VersionInfo struct {
|
||||
GroupID string
|
||||
VersionID string
|
||||
VersionNumber uint64
|
||||
}
|
||||
|
||||
var groupIDs []string
|
||||
|
||||
groupsVersionMap := make(map[string]*VersionInfo)
|
||||
groupsMap := make(map[string]*model.Group)
|
||||
hasGroupUpdateMap := make(map[string]bool)
|
||||
sortVersionMap := make(map[string]uint64)
|
||||
|
||||
var targetKeys, versionIDs []string
|
||||
var versionNumbers []uint64
|
||||
|
||||
var requestBodyLen int
|
||||
|
||||
for _, group := range req.ReqList {
|
||||
groupsVersionMap[group.GroupID] = &VersionInfo{
|
||||
GroupID: group.GroupID,
|
||||
VersionID: group.VersionID,
|
||||
VersionNumber: group.Version,
|
||||
}
|
||||
|
||||
groupIDs = append(groupIDs, group.GroupID)
|
||||
}
|
||||
|
||||
groups, err := s.db.FindGroup(ctx, groupIDs)
|
||||
if err != nil {
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
|
||||
for _, group := range groups {
|
||||
if group.Status == constant.GroupStatusDismissed {
|
||||
err = servererrs.ErrDismissedAlready.Wrap()
|
||||
log.ZError(ctx, "This group is Dismissed Already", err, "group is", group.GroupID)
|
||||
|
||||
delete(groupsVersionMap, group.GroupID)
|
||||
} else {
|
||||
groupsMap[group.GroupID] = group
|
||||
}
|
||||
}
|
||||
|
||||
for groupID, vInfo := range groupsVersionMap {
|
||||
targetKeys = append(targetKeys, groupID)
|
||||
versionIDs = append(versionIDs, vInfo.VersionID)
|
||||
versionNumbers = append(versionNumbers, vInfo.VersionNumber)
|
||||
}
|
||||
|
||||
opt := incrversion.BatchOption[[]*sdkws.GroupMemberFullInfo, pbgroup.BatchGetIncrementalGroupMemberResp]{
|
||||
Ctx: ctx,
|
||||
TargetKeys: targetKeys,
|
||||
VersionIDs: versionIDs,
|
||||
VersionNumbers: versionNumbers,
|
||||
Versions: func(ctx context.Context, groupIDs []string, versions []uint64, limits []int) (map[string]*model.VersionLog, error) {
|
||||
vLogs, err := s.db.BatchFindMemberIncrVersion(ctx, groupIDs, versions, limits)
|
||||
if err != nil {
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
|
||||
for groupID, vlog := range vLogs {
|
||||
vlogElems := make([]model.VersionLogElem, 0, len(vlog.Logs))
|
||||
for i, log := range vlog.Logs {
|
||||
switch log.EID {
|
||||
case model.VersionGroupChangeID:
|
||||
vlog.LogLen--
|
||||
hasGroupUpdateMap[groupID] = true
|
||||
case model.VersionSortChangeID:
|
||||
vlog.LogLen--
|
||||
sortVersionMap[groupID] = uint64(log.Version)
|
||||
default:
|
||||
vlogElems = append(vlogElems, vlog.Logs[i])
|
||||
}
|
||||
}
|
||||
vlog.Logs = vlogElems
|
||||
if vlog.LogLen > 0 {
|
||||
hasGroupUpdateMap[groupID] = true
|
||||
}
|
||||
}
|
||||
|
||||
return vLogs, nil
|
||||
},
|
||||
CacheMaxVersions: s.db.BatchFindMaxGroupMemberVersionCache,
|
||||
Find: func(ctx context.Context, groupID string, ids []string) ([]*sdkws.GroupMemberFullInfo, error) {
|
||||
memberInfo, err := s.getGroupMembersInfo(ctx, groupID, ids)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return memberInfo, err
|
||||
},
|
||||
Resp: func(versions map[string]*model.VersionLog, deleteIdsMap map[string][]string, insertListMap, updateListMap map[string][]*sdkws.GroupMemberFullInfo, fullMap map[string]bool) *pbgroup.BatchGetIncrementalGroupMemberResp {
|
||||
resList := make(map[string]*pbgroup.GetIncrementalGroupMemberResp)
|
||||
|
||||
for groupID, versionLog := range versions {
|
||||
resList[groupID] = &pbgroup.GetIncrementalGroupMemberResp{
|
||||
VersionID: versionLog.ID.Hex(),
|
||||
Version: uint64(versionLog.Version),
|
||||
Full: fullMap[groupID],
|
||||
Delete: deleteIdsMap[groupID],
|
||||
Insert: insertListMap[groupID],
|
||||
Update: updateListMap[groupID],
|
||||
SortVersion: sortVersionMap[groupID],
|
||||
}
|
||||
|
||||
requestBodyLen += len(insertListMap[groupID]) + len(updateListMap[groupID]) + len(deleteIdsMap[groupID])
|
||||
if requestBodyLen > 200 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return &pbgroup.BatchGetIncrementalGroupMemberResp{
|
||||
RespList: resList,
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
resp, err = opt.Build()
|
||||
if err != nil {
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
|
||||
for groupID, val := range resp.RespList {
|
||||
if val.Full || hasGroupUpdateMap[groupID] {
|
||||
count, err := s.db.FindGroupMemberNum(ctx, groupID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
owner, err := s.db.TakeGroupOwner(ctx, groupID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp.RespList[groupID].Group = s.groupDB2PB(groupsMap[groupID], owner.UserID, count)
|
||||
}
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
|
||||
}
|
||||
|
||||
func (s *groupServer) GetIncrementalJoinGroup(ctx context.Context, req *pbgroup.GetIncrementalJoinGroupReq) (*pbgroup.GetIncrementalJoinGroupResp, error) {
|
||||
if err := authverify.CheckAccessV3(ctx, req.UserID, s.config.Share.IMAdminUserID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opt := incrversion.Option[*sdkws.GroupInfo, pbgroup.GetIncrementalJoinGroupResp]{
|
||||
Ctx: ctx,
|
||||
VersionKey: req.UserID,
|
||||
VersionID: req.VersionID,
|
||||
VersionNumber: req.Version,
|
||||
Version: s.db.FindJoinIncrVersion,
|
||||
CacheMaxVersion: s.db.FindMaxJoinGroupVersionCache,
|
||||
Find: s.getGroupsInfo,
|
||||
Resp: func(version *model.VersionLog, delIDs []string, insertList, updateList []*sdkws.GroupInfo, full bool) *pbgroup.GetIncrementalJoinGroupResp {
|
||||
return &pbgroup.GetIncrementalJoinGroupResp{
|
||||
VersionID: version.ID.Hex(),
|
||||
Version: uint64(version.Version),
|
||||
Full: full,
|
||||
Delete: delIDs,
|
||||
Insert: insertList,
|
||||
Update: updateList,
|
||||
}
|
||||
},
|
||||
}
|
||||
return opt.Build()
|
||||
}
|
@ -0,0 +1,207 @@
|
||||
package incrversion
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"go.mongodb.org/mongo-driver/bson/primitive"
|
||||
)
|
||||
|
||||
type BatchOption[A, B any] struct {
|
||||
Ctx context.Context
|
||||
TargetKeys []string
|
||||
VersionIDs []string
|
||||
VersionNumbers []uint64
|
||||
//SyncLimit int
|
||||
Versions func(ctx context.Context, dIds []string, versions []uint64, limits []int) (map[string]*model.VersionLog, error)
|
||||
CacheMaxVersions func(ctx context.Context, dIds []string) (map[string]*model.VersionLog, error)
|
||||
Find func(ctx context.Context, dId string, ids []string) (A, error)
|
||||
Resp func(versionsMap map[string]*model.VersionLog, deleteIdsMap map[string][]string, insertListMap, updateListMap map[string]A, fullMap map[string]bool) *B
|
||||
}
|
||||
|
||||
func (o *BatchOption[A, B]) newError(msg string) error {
|
||||
return errs.ErrInternalServer.WrapMsg(msg)
|
||||
}
|
||||
|
||||
func (o *BatchOption[A, B]) check() error {
|
||||
if o.Ctx == nil {
|
||||
return o.newError("opt ctx is nil")
|
||||
}
|
||||
if len(o.TargetKeys) == 0 {
|
||||
return o.newError("targetKeys is empty")
|
||||
}
|
||||
if o.Versions == nil {
|
||||
return o.newError("func versions is nil")
|
||||
}
|
||||
if o.Find == nil {
|
||||
return o.newError("func find is nil")
|
||||
}
|
||||
if o.Resp == nil {
|
||||
return o.newError("func resp is nil")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *BatchOption[A, B]) validVersions() []bool {
|
||||
valids := make([]bool, len(o.VersionIDs))
|
||||
for i, versionID := range o.VersionIDs {
|
||||
objID, err := primitive.ObjectIDFromHex(versionID)
|
||||
valids[i] = (err == nil && (!objID.IsZero()) && o.VersionNumbers[i] > 0)
|
||||
}
|
||||
return valids
|
||||
}
|
||||
|
||||
func (o *BatchOption[A, B]) equalIDs(objIDs []primitive.ObjectID) []bool {
|
||||
equals := make([]bool, len(o.VersionIDs))
|
||||
for i, versionID := range o.VersionIDs {
|
||||
equals[i] = versionID == objIDs[i].Hex()
|
||||
}
|
||||
return equals
|
||||
}
|
||||
|
||||
func (o *BatchOption[A, B]) getVersions(tags *[]int) (versions map[string]*model.VersionLog, err error) {
|
||||
var dIDs []string
|
||||
var versionNums []uint64
|
||||
var limits []int
|
||||
|
||||
valids := o.validVersions()
|
||||
|
||||
if o.CacheMaxVersions == nil {
|
||||
for i, valid := range valids {
|
||||
if valid {
|
||||
(*tags)[i] = tagQuery
|
||||
dIDs = append(dIDs, o.TargetKeys[i])
|
||||
versionNums = append(versionNums, o.VersionNumbers[i])
|
||||
limits = append(limits, syncLimit)
|
||||
} else {
|
||||
(*tags)[i] = tagFull
|
||||
dIDs = append(dIDs, o.TargetKeys[i])
|
||||
versionNums = append(versionNums, 0)
|
||||
limits = append(limits, 0)
|
||||
}
|
||||
}
|
||||
|
||||
versions, err = o.Versions(o.Ctx, dIDs, versionNums, limits)
|
||||
if err != nil {
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
return versions, nil
|
||||
|
||||
} else {
|
||||
caches, err := o.CacheMaxVersions(o.Ctx, o.TargetKeys)
|
||||
if err != nil {
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
|
||||
objIDs := make([]primitive.ObjectID, len(o.VersionIDs))
|
||||
|
||||
for i, versionID := range o.VersionIDs {
|
||||
objID, _ := primitive.ObjectIDFromHex(versionID)
|
||||
objIDs[i] = objID
|
||||
}
|
||||
|
||||
equals := o.equalIDs(objIDs)
|
||||
for i, valid := range valids {
|
||||
if !valid {
|
||||
(*tags)[i] = tagFull
|
||||
} else if !equals[i] {
|
||||
(*tags)[i] = tagFull
|
||||
} else if o.VersionNumbers[i] == uint64(caches[o.TargetKeys[i]].Version) {
|
||||
(*tags)[i] = tagEqual
|
||||
} else {
|
||||
(*tags)[i] = tagQuery
|
||||
dIDs = append(dIDs, o.TargetKeys[i])
|
||||
versionNums = append(versionNums, o.VersionNumbers[i])
|
||||
limits = append(limits, syncLimit)
|
||||
|
||||
delete(caches, o.TargetKeys[i])
|
||||
}
|
||||
}
|
||||
|
||||
if dIDs != nil {
|
||||
versionMap, err := o.Versions(o.Ctx, dIDs, versionNums, limits)
|
||||
if err != nil {
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
|
||||
for k, v := range versionMap {
|
||||
caches[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
versions = caches
|
||||
}
|
||||
return versions, nil
|
||||
}
|
||||
|
||||
func (o *BatchOption[A, B]) Build() (*B, error) {
|
||||
if err := o.check(); err != nil {
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
|
||||
tags := make([]int, len(o.TargetKeys))
|
||||
versions, err := o.getVersions(&tags)
|
||||
if err != nil {
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
|
||||
fullMap := make(map[string]bool)
|
||||
for i, tag := range tags {
|
||||
switch tag {
|
||||
case tagQuery:
|
||||
vLog := versions[o.TargetKeys[i]]
|
||||
fullMap[o.TargetKeys[i]] = vLog.ID.Hex() != o.VersionIDs[i] || uint64(vLog.Version) < o.VersionNumbers[i] || len(vLog.Logs) != vLog.LogLen
|
||||
case tagFull:
|
||||
fullMap[o.TargetKeys[i]] = true
|
||||
case tagEqual:
|
||||
fullMap[o.TargetKeys[i]] = false
|
||||
default:
|
||||
panic(fmt.Errorf("undefined tag %d", tag))
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
insertIdsMap = make(map[string][]string)
|
||||
deleteIdsMap = make(map[string][]string)
|
||||
updateIdsMap = make(map[string][]string)
|
||||
)
|
||||
|
||||
for _, targetKey := range o.TargetKeys {
|
||||
if !fullMap[targetKey] {
|
||||
version := versions[targetKey]
|
||||
insertIds, deleteIds, updateIds := version.DeleteAndChangeIDs()
|
||||
insertIdsMap[targetKey] = insertIds
|
||||
deleteIdsMap[targetKey] = deleteIds
|
||||
updateIdsMap[targetKey] = updateIds
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
insertListMap = make(map[string]A)
|
||||
updateListMap = make(map[string]A)
|
||||
)
|
||||
|
||||
for targetKey, insertIds := range insertIdsMap {
|
||||
if len(insertIds) > 0 {
|
||||
insertList, err := o.Find(o.Ctx, targetKey, insertIds)
|
||||
if err != nil {
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
insertListMap[targetKey] = insertList
|
||||
}
|
||||
}
|
||||
|
||||
for targetKey, updateIds := range updateIdsMap {
|
||||
if len(updateIds) > 0 {
|
||||
updateList, err := o.Find(o.Ctx, targetKey, updateIds)
|
||||
if err != nil {
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
updateListMap[targetKey] = updateList
|
||||
}
|
||||
}
|
||||
|
||||
return o.Resp(versions, deleteIdsMap, insertListMap, updateListMap, fullMap), nil
|
||||
}
|
@ -0,0 +1,153 @@
|
||||
package incrversion
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"go.mongodb.org/mongo-driver/bson/primitive"
|
||||
)
|
||||
|
||||
//func Limit(maxSync int, version uint64) int {
|
||||
// if version == 0 {
|
||||
// return 0
|
||||
// }
|
||||
// return maxSync
|
||||
//}
|
||||
|
||||
const syncLimit = 200
|
||||
|
||||
const (
|
||||
tagQuery = iota + 1
|
||||
tagFull
|
||||
tagEqual
|
||||
)
|
||||
|
||||
type Option[A, B any] struct {
|
||||
Ctx context.Context
|
||||
VersionKey string
|
||||
VersionID string
|
||||
VersionNumber uint64
|
||||
//SyncLimit int
|
||||
CacheMaxVersion func(ctx context.Context, dId string) (*model.VersionLog, error)
|
||||
Version func(ctx context.Context, dId string, version uint, limit int) (*model.VersionLog, error)
|
||||
//SortID func(ctx context.Context, dId string) ([]string, error)
|
||||
Find func(ctx context.Context, ids []string) ([]A, error)
|
||||
Resp func(version *model.VersionLog, deleteIds []string, insertList, updateList []A, full bool) *B
|
||||
}
|
||||
|
||||
func (o *Option[A, B]) newError(msg string) error {
|
||||
return errs.ErrInternalServer.WrapMsg(msg)
|
||||
}
|
||||
|
||||
func (o *Option[A, B]) check() error {
|
||||
if o.Ctx == nil {
|
||||
return o.newError("opt ctx is nil")
|
||||
}
|
||||
if o.VersionKey == "" {
|
||||
return o.newError("versionKey is empty")
|
||||
}
|
||||
//if o.SyncLimit <= 0 {
|
||||
// return o.newError("invalid synchronization quantity")
|
||||
//}
|
||||
if o.Version == nil {
|
||||
return o.newError("func version is nil")
|
||||
}
|
||||
//if o.SortID == nil {
|
||||
// return o.newError("func allID is nil")
|
||||
//}
|
||||
if o.Find == nil {
|
||||
return o.newError("func find is nil")
|
||||
}
|
||||
if o.Resp == nil {
|
||||
return o.newError("func resp is nil")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Option[A, B]) validVersion() bool {
|
||||
objID, err := primitive.ObjectIDFromHex(o.VersionID)
|
||||
return err == nil && (!objID.IsZero()) && o.VersionNumber > 0
|
||||
}
|
||||
|
||||
func (o *Option[A, B]) equalID(objID primitive.ObjectID) bool {
|
||||
return o.VersionID == objID.Hex()
|
||||
}
|
||||
|
||||
func (o *Option[A, B]) getVersion(tag *int) (*model.VersionLog, error) {
|
||||
if o.CacheMaxVersion == nil {
|
||||
if o.validVersion() {
|
||||
*tag = tagQuery
|
||||
return o.Version(o.Ctx, o.VersionKey, uint(o.VersionNumber), syncLimit)
|
||||
}
|
||||
*tag = tagFull
|
||||
return o.Version(o.Ctx, o.VersionKey, 0, 0)
|
||||
} else {
|
||||
cache, err := o.CacheMaxVersion(o.Ctx, o.VersionKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !o.validVersion() {
|
||||
*tag = tagFull
|
||||
return cache, nil
|
||||
}
|
||||
if !o.equalID(cache.ID) {
|
||||
*tag = tagFull
|
||||
return cache, nil
|
||||
}
|
||||
if o.VersionNumber == uint64(cache.Version) {
|
||||
*tag = tagEqual
|
||||
return cache, nil
|
||||
}
|
||||
*tag = tagQuery
|
||||
return o.Version(o.Ctx, o.VersionKey, uint(o.VersionNumber), syncLimit)
|
||||
}
|
||||
}
|
||||
|
||||
func (o *Option[A, B]) Build() (*B, error) {
|
||||
if err := o.check(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var tag int
|
||||
version, err := o.getVersion(&tag)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var full bool
|
||||
switch tag {
|
||||
case tagQuery:
|
||||
full = version.ID.Hex() != o.VersionID || uint64(version.Version) < o.VersionNumber || len(version.Logs) != version.LogLen
|
||||
case tagFull:
|
||||
full = true
|
||||
case tagEqual:
|
||||
full = false
|
||||
default:
|
||||
panic(fmt.Errorf("undefined tag %d", tag))
|
||||
}
|
||||
var (
|
||||
insertIds []string
|
||||
deleteIds []string
|
||||
updateIds []string
|
||||
)
|
||||
if !full {
|
||||
insertIds, deleteIds, updateIds = version.DeleteAndChangeIDs()
|
||||
}
|
||||
var (
|
||||
insertList []A
|
||||
updateList []A
|
||||
)
|
||||
if len(insertIds) > 0 {
|
||||
insertList, err = o.Find(o.Ctx, insertIds)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if len(updateIds) > 0 {
|
||||
updateList, err = o.Find(o.Ctx, updateIds)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return o.Resp(version, deleteIds, insertList, updateList, full), nil
|
||||
}
|
@ -0,0 +1,82 @@
|
||||
package user
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
pbuser "github.com/openimsdk/protocol/user"
|
||||
)
|
||||
|
||||
func (s *userServer) getUserOnlineStatus(ctx context.Context, userID string) (*pbuser.OnlineStatus, error) {
|
||||
platformIDs, err := s.online.GetOnline(ctx, userID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
status := pbuser.OnlineStatus{
|
||||
UserID: userID,
|
||||
PlatformIDs: platformIDs,
|
||||
}
|
||||
if len(platformIDs) > 0 {
|
||||
status.Status = constant.Online
|
||||
} else {
|
||||
status.Status = constant.Offline
|
||||
}
|
||||
return &status, nil
|
||||
}
|
||||
|
||||
func (s *userServer) getUsersOnlineStatus(ctx context.Context, userIDs []string) ([]*pbuser.OnlineStatus, error) {
|
||||
res := make([]*pbuser.OnlineStatus, 0, len(userIDs))
|
||||
for _, userID := range userIDs {
|
||||
status, err := s.getUserOnlineStatus(ctx, userID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res = append(res, status)
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// SubscribeOrCancelUsersStatus Subscribe online or cancel online users.
|
||||
func (s *userServer) SubscribeOrCancelUsersStatus(ctx context.Context, req *pbuser.SubscribeOrCancelUsersStatusReq) (*pbuser.SubscribeOrCancelUsersStatusResp, error) {
|
||||
return &pbuser.SubscribeOrCancelUsersStatusResp{}, nil
|
||||
}
|
||||
|
||||
// GetUserStatus Get the online status of the user.
|
||||
func (s *userServer) GetUserStatus(ctx context.Context, req *pbuser.GetUserStatusReq) (*pbuser.GetUserStatusResp, error) {
|
||||
res, err := s.getUsersOnlineStatus(ctx, req.UserIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &pbuser.GetUserStatusResp{StatusList: res}, nil
|
||||
}
|
||||
|
||||
// SetUserStatus Synchronize user's online status.
|
||||
func (s *userServer) SetUserStatus(ctx context.Context, req *pbuser.SetUserStatusReq) (*pbuser.SetUserStatusResp, error) {
|
||||
var (
|
||||
online []int32
|
||||
offline []int32
|
||||
)
|
||||
switch req.Status {
|
||||
case constant.Online:
|
||||
online = []int32{req.PlatformID}
|
||||
case constant.Offline:
|
||||
online = []int32{req.PlatformID}
|
||||
}
|
||||
if err := s.online.SetUserOnline(ctx, req.UserID, online, offline); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &pbuser.SetUserStatusResp{}, nil
|
||||
}
|
||||
|
||||
// GetSubscribeUsersStatus Get the online status of subscribers.
|
||||
func (s *userServer) GetSubscribeUsersStatus(ctx context.Context, req *pbuser.GetSubscribeUsersStatusReq) (*pbuser.GetSubscribeUsersStatusResp, error) {
|
||||
return &pbuser.GetSubscribeUsersStatusResp{}, nil
|
||||
}
|
||||
|
||||
func (s *userServer) SetUserOnlineStatus(ctx context.Context, req *pbuser.SetUserOnlineStatusReq) (*pbuser.SetUserOnlineStatusResp, error) {
|
||||
for _, status := range req.Status {
|
||||
if err := s.online.SetUserOnline(ctx, status.UserID, status.Online, status.Offline); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return &pbuser.SetUserOnlineStatusResp{}, nil
|
||||
}
|
@ -0,0 +1,48 @@
|
||||
package prommetrics
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
var (
|
||||
apiCounter = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "api_count",
|
||||
Help: "Total number of API calls",
|
||||
},
|
||||
[]string{"path", "method", "code"},
|
||||
)
|
||||
httpCounter = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "http_count",
|
||||
Help: "Total number of HTTP calls",
|
||||
},
|
||||
[]string{"path", "method", "status"},
|
||||
)
|
||||
)
|
||||
|
||||
func ApiInit(prometheusPort int) error {
|
||||
apiRegistry := prometheus.NewRegistry()
|
||||
cs := append(
|
||||
baseCollector,
|
||||
apiCounter,
|
||||
httpCounter,
|
||||
)
|
||||
return Init(apiRegistry, prometheusPort, commonPath, promhttp.HandlerFor(apiRegistry, promhttp.HandlerOpts{}), cs...)
|
||||
}
|
||||
|
||||
func APICall(path string, method string, apiCode int) {
|
||||
apiCounter.With(prometheus.Labels{"path": path, "method": method, "code": strconv.Itoa(apiCode)}).Inc()
|
||||
}
|
||||
|
||||
func HttpCall(path string, method string, status int) {
|
||||
httpCounter.With(prometheus.Labels{"path": path, "method": method, "status": strconv.Itoa(status)}).Inc()
|
||||
}
|
||||
|
||||
//func ApiHandler() http.Handler {
|
||||
// return promhttp.InstrumentMetricHandler(
|
||||
// apiRegistry, promhttp.HandlerFor(apiRegistry, promhttp.HandlerOpts{}),
|
||||
// )
|
||||
//}
|
@ -1,30 +0,0 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prommetrics
|
||||
|
||||
import ginprom "github.com/openimsdk/open-im-server/v3/pkg/common/ginprometheus"
|
||||
|
||||
/*
|
||||
labels := prometheus.Labels{"label_one": "any", "label_two": "value"}
|
||||
ApiCustomCnt.MetricCollector.(*prometheus.CounterVec).With(labels).Inc().
|
||||
*/
|
||||
var (
|
||||
ApiCustomCnt = &ginprom.Metric{
|
||||
Name: "custom_total",
|
||||
Description: "Custom counter events.",
|
||||
Type: "counter_vec",
|
||||
Args: []string{"label_one", "label_two"},
|
||||
}
|
||||
)
|
@ -0,0 +1,10 @@
|
||||
package prommetrics
|
||||
|
||||
import "github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
var (
|
||||
UserRegisterCounter = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "user_register_total",
|
||||
Help: "The number of user login",
|
||||
})
|
||||
)
|
@ -0,0 +1,60 @@
|
||||
package prommetrics
|
||||
|
||||
import (
|
||||
gp "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
const rpcPath = commonPath
|
||||
|
||||
var (
|
||||
grpcMetrics *gp.ServerMetrics
|
||||
rpcCounter = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "rpc_count",
|
||||
Help: "Total number of RPC calls",
|
||||
},
|
||||
[]string{"name", "path", "code"},
|
||||
)
|
||||
)
|
||||
|
||||
func RpcInit(cs []prometheus.Collector, prometheusPort int) error {
|
||||
reg := prometheus.NewRegistry()
|
||||
cs = append(append(
|
||||
baseCollector,
|
||||
rpcCounter,
|
||||
), cs...)
|
||||
return Init(reg, prometheusPort, rpcPath, promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg}), cs...)
|
||||
}
|
||||
|
||||
func RPCCall(name string, path string, code int) {
|
||||
rpcCounter.With(prometheus.Labels{"name": name, "path": path, "code": strconv.Itoa(code)}).Inc()
|
||||
}
|
||||
|
||||
func GetGrpcServerMetrics() *gp.ServerMetrics {
|
||||
if grpcMetrics == nil {
|
||||
grpcMetrics = gp.NewServerMetrics()
|
||||
grpcMetrics.EnableHandlingTimeHistogram()
|
||||
}
|
||||
return grpcMetrics
|
||||
}
|
||||
|
||||
func GetGrpcCusMetrics(registerName string, share *config.Share) []prometheus.Collector {
|
||||
switch registerName {
|
||||
case share.RpcRegisterName.MessageGateway:
|
||||
return []prometheus.Collector{OnlineUserGauge}
|
||||
case share.RpcRegisterName.Msg:
|
||||
return []prometheus.Collector{SingleChatMsgProcessSuccessCounter, SingleChatMsgProcessFailedCounter, GroupChatMsgProcessSuccessCounter, GroupChatMsgProcessFailedCounter}
|
||||
case share.RpcRegisterName.Push:
|
||||
return []prometheus.Collector{MsgOfflinePushFailedCounter}
|
||||
case share.RpcRegisterName.Auth:
|
||||
return []prometheus.Collector{UserLoginCounter}
|
||||
case share.RpcRegisterName.User:
|
||||
return []prometheus.Collector{UserRegisterCounter}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
@ -0,0 +1,13 @@
|
||||
package cachekey
|
||||
|
||||
import "time"
|
||||
|
||||
const (
|
||||
OnlineKey = "ONLINE:"
|
||||
OnlineChannel = "online_change"
|
||||
OnlineExpire = time.Hour / 2
|
||||
)
|
||||
|
||||
func GetOnlineKey(userID string) string {
|
||||
return OnlineKey + userID
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue