commit
6e3c99ba69
@ -1,7 +1,14 @@
|
||||
address: [ localhost:16379 ]
|
||||
address: [localhost:16379]
|
||||
username:
|
||||
password: openIM123
|
||||
clusterMode: false
|
||||
# redis Mode, including "standalone","cluster","sentinel"
|
||||
redisMode: "standalone"
|
||||
db: 0
|
||||
maxRetry: 10
|
||||
poolSize: 100
|
||||
# Sentinel configuration (only used when redisMode is "sentinel")
|
||||
sentinelMode:
|
||||
masterName: "redis-master"
|
||||
sentinelsAddrs: ["127.0.0.1:26379", "127.0.0.1:26380", "127.0.0.1:26381"]
|
||||
routeByLatency: true
|
||||
routeRandomly: true
|
||||
|
@ -0,0 +1,132 @@
|
||||
package msgtransfer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/apistruct"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/webhook"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
"github.com/openimsdk/protocol/sdkws"
|
||||
"github.com/openimsdk/tools/mcontext"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"github.com/openimsdk/tools/utils/stringutil"
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
cbapi "github.com/openimsdk/open-im-server/v3/pkg/callbackstruct"
|
||||
)
|
||||
|
||||
func toCommonCallback(ctx context.Context, msg *sdkws.MsgData, command string) cbapi.CommonCallbackReq {
|
||||
return cbapi.CommonCallbackReq{
|
||||
SendID: msg.SendID,
|
||||
ServerMsgID: msg.ServerMsgID,
|
||||
CallbackCommand: command,
|
||||
ClientMsgID: msg.ClientMsgID,
|
||||
OperationID: mcontext.GetOperationID(ctx),
|
||||
SenderPlatformID: msg.SenderPlatformID,
|
||||
SenderNickname: msg.SenderNickname,
|
||||
SessionType: msg.SessionType,
|
||||
MsgFrom: msg.MsgFrom,
|
||||
ContentType: msg.ContentType,
|
||||
Status: msg.Status,
|
||||
SendTime: msg.SendTime,
|
||||
CreateTime: msg.CreateTime,
|
||||
AtUserIDList: msg.AtUserIDList,
|
||||
SenderFaceURL: msg.SenderFaceURL,
|
||||
Content: GetContent(msg),
|
||||
Seq: uint32(msg.Seq),
|
||||
Ex: msg.Ex,
|
||||
}
|
||||
}
|
||||
|
||||
func GetContent(msg *sdkws.MsgData) string {
|
||||
if msg.ContentType >= constant.NotificationBegin && msg.ContentType <= constant.NotificationEnd {
|
||||
var tips sdkws.TipsComm
|
||||
_ = proto.Unmarshal(msg.Content, &tips)
|
||||
content := tips.JsonDetail
|
||||
return content
|
||||
} else {
|
||||
return string(msg.Content)
|
||||
}
|
||||
}
|
||||
|
||||
func (mc *OnlineHistoryMongoConsumerHandler) webhookAfterSendSingleMsg(ctx context.Context, after *config.AfterConfig, msg *sdkws.MsgData) {
|
||||
if msg.ContentType == constant.Typing {
|
||||
return
|
||||
}
|
||||
|
||||
if !filterAfterMsg(msg, after) {
|
||||
return
|
||||
}
|
||||
|
||||
cbReq := &cbapi.CallbackAfterSendSingleMsgReq{
|
||||
CommonCallbackReq: toCommonCallback(ctx, msg, cbapi.CallbackAfterSendSingleMsgCommand),
|
||||
RecvID: msg.RecvID,
|
||||
}
|
||||
mc.webhookClient.AsyncPostWithQuery(ctx, cbReq.GetCallbackCommand(), cbReq, &cbapi.CallbackAfterSendSingleMsgResp{}, after, buildKeyMsgDataQuery(msg))
|
||||
}
|
||||
|
||||
func (mc *OnlineHistoryMongoConsumerHandler) webhookAfterSendGroupMsg(ctx context.Context, after *config.AfterConfig, msg *sdkws.MsgData) {
|
||||
if msg.ContentType == constant.Typing {
|
||||
return
|
||||
}
|
||||
|
||||
if !filterAfterMsg(msg, after) {
|
||||
return
|
||||
}
|
||||
|
||||
cbReq := &cbapi.CallbackAfterSendGroupMsgReq{
|
||||
CommonCallbackReq: toCommonCallback(ctx, msg, cbapi.CallbackAfterSendGroupMsgCommand),
|
||||
GroupID: msg.GroupID,
|
||||
}
|
||||
|
||||
mc.webhookClient.AsyncPostWithQuery(ctx, cbReq.GetCallbackCommand(), cbReq, &cbapi.CallbackAfterSendGroupMsgResp{}, after, buildKeyMsgDataQuery(msg))
|
||||
}
|
||||
|
||||
func buildKeyMsgDataQuery(msg *sdkws.MsgData) map[string]string {
|
||||
keyMsgData := apistruct.KeyMsgData{
|
||||
SendID: msg.SendID,
|
||||
RecvID: msg.RecvID,
|
||||
GroupID: msg.GroupID,
|
||||
}
|
||||
|
||||
return map[string]string{
|
||||
webhook.Key: base64.StdEncoding.EncodeToString(stringutil.StructToJsonBytes(keyMsgData)),
|
||||
}
|
||||
}
|
||||
|
||||
func filterAfterMsg(msg *sdkws.MsgData, after *config.AfterConfig) bool {
|
||||
return filterMsg(msg, after.AttentionIds, after.DeniedTypes)
|
||||
}
|
||||
|
||||
func filterMsg(msg *sdkws.MsgData, attentionIds []string, deniedTypes []int32) bool {
|
||||
// According to the attentionIds configuration, only some users are sent
|
||||
if len(attentionIds) != 0 && msg.ContentType == constant.SingleChatType && !datautil.Contain(msg.RecvID, attentionIds...) {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(attentionIds) != 0 && msg.ContentType == constant.ReadGroupChatType && !datautil.Contain(msg.GroupID, attentionIds...) {
|
||||
return false
|
||||
}
|
||||
|
||||
if defaultDeniedTypes(msg.ContentType) {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(deniedTypes) != 0 && datautil.Contain(msg.ContentType, deniedTypes...) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func defaultDeniedTypes(contentType int32) bool {
|
||||
if contentType >= constant.NotificationBegin && contentType <= constant.NotificationEnd {
|
||||
return true
|
||||
}
|
||||
if contentType == constant.Typing {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
@ -0,0 +1,89 @@
|
||||
package cron
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/tools/log"
|
||||
clientv3 "go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/client/v3/concurrency"
|
||||
)
|
||||
|
||||
const (
|
||||
lockLeaseTTL = 300
|
||||
)
|
||||
|
||||
type EtcdLocker struct {
|
||||
client *clientv3.Client
|
||||
instanceID string
|
||||
}
|
||||
|
||||
// NewEtcdLocker creates a new etcd distributed lock
|
||||
func NewEtcdLocker(client *clientv3.Client) (*EtcdLocker, error) {
|
||||
hostname, _ := os.Hostname()
|
||||
pid := os.Getpid()
|
||||
instanceID := fmt.Sprintf("%s-pid-%d-%d", hostname, pid, time.Now().UnixNano())
|
||||
|
||||
locker := &EtcdLocker{
|
||||
client: client,
|
||||
instanceID: instanceID,
|
||||
}
|
||||
|
||||
return locker, nil
|
||||
}
|
||||
|
||||
func (e *EtcdLocker) ExecuteWithLock(ctx context.Context, taskName string, task func()) {
|
||||
session, err := concurrency.NewSession(e.client, concurrency.WithTTL(lockLeaseTTL))
|
||||
if err != nil {
|
||||
log.ZWarn(ctx, "Failed to create etcd session", err,
|
||||
"taskName", taskName,
|
||||
"instanceID", e.instanceID)
|
||||
return
|
||||
}
|
||||
defer session.Close()
|
||||
|
||||
lockKey := fmt.Sprintf("openim/crontask/%s", taskName)
|
||||
mutex := concurrency.NewMutex(session, lockKey)
|
||||
|
||||
ctxWithTimeout, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
|
||||
defer cancel()
|
||||
|
||||
err = mutex.TryLock(ctxWithTimeout)
|
||||
if err != nil {
|
||||
if err == context.DeadlineExceeded {
|
||||
log.ZDebug(ctx, "Task is being executed by another instance, skipping",
|
||||
"taskName", taskName,
|
||||
"instanceID", e.instanceID)
|
||||
} else {
|
||||
log.ZWarn(ctx, "Failed to acquire task lock", err,
|
||||
"taskName", taskName,
|
||||
"instanceID", e.instanceID)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := mutex.Unlock(ctx); err != nil {
|
||||
log.ZWarn(ctx, "Failed to release task lock", err,
|
||||
"taskName", taskName,
|
||||
"instanceID", e.instanceID)
|
||||
} else {
|
||||
log.ZInfo(ctx, "Successfully released task lock",
|
||||
"taskName", taskName,
|
||||
"instanceID", e.instanceID)
|
||||
}
|
||||
}()
|
||||
|
||||
log.ZInfo(ctx, "Successfully acquired task lock, starting execution",
|
||||
"taskName", taskName,
|
||||
"instanceID", e.instanceID,
|
||||
"sessionID", session.Lease())
|
||||
|
||||
task()
|
||||
|
||||
log.ZInfo(ctx, "Task execution completed",
|
||||
"taskName", taskName,
|
||||
"instanceID", e.instanceID)
|
||||
}
|
Loading…
Reference in new issue