feat: implement offline push using kafka (#2600)

* refactor: refactor workflows contents.

* add tool workflows.

* update field.

* fix: remove chat error.

* Fix err.

* fix error.

* remove cn comment.

* update workflows files.

* update infra config.

* move workflows.

* feat: update bot.

* fix: solve uncorrect outdated msg get.

* update get docIDs logic.

* update

* update skip logic.

* fix

* update.

* fix: delay deleteObject func.

* remove unused content.

* update log type.

* feat: implement request batch count limit.

* update

* update

* feat: implement offline push.

* feat: implement batch Push spilt

* update go mod

* feat: implement kafka producer and consumer.

* update format,

* add PushMQ log.

* feat: update Handler logic.

* update MQ logic.

* update

* update

* fix: update OfflinePushConsumerHandler.
pull/2611/head
Monet Lee 2 months ago committed by GitHub
parent c581d43f17
commit 3381b85895
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -14,12 +14,16 @@ toRedisTopic: toRedis
toMongoTopic: toMongo
# Kafka topic for push notifications
toPushTopic: toPush
# Kafka topic for offline push notifications
toOfflinePushTopic: toOfflinePush
# Consumer group ID for Redis topic
toRedisGroupID: redis
# Consumer group ID for MongoDB topic
toMongoGroupID: mongo
# Consumer group ID for push notifications topic
toPushGroupID: push
# Consumer group ID for offline push notifications topic
toOfflinePushGroupID: offlinePush
# TLS (Transport Layer Security) configuration
tls:
# Enable or disable TLS

@ -12,7 +12,7 @@ require (
github.com/gorilla/websocket v1.5.1
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
github.com/mitchellh/mapstructure v1.5.0
github.com/openimsdk/protocol v0.0.72-alpha.13
github.com/openimsdk/protocol v0.0.72-alpha.17
github.com/openimsdk/tools v0.0.50-alpha.11
github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_golang v1.18.0

@ -319,8 +319,8 @@ github.com/onsi/gomega v1.25.0 h1:Vw7br2PCDYijJHSfBOWhov+8cAnUf8MfMaIOV323l6Y=
github.com/onsi/gomega v1.25.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM=
github.com/openimsdk/gomake v0.0.14-alpha.5 h1:VY9c5x515lTfmdhhPjMvR3BBRrRquAUCFsz7t7vbv7Y=
github.com/openimsdk/gomake v0.0.14-alpha.5/go.mod h1:PndCozNc2IsQIciyn9mvEblYWZwJmAI+06z94EY+csI=
github.com/openimsdk/protocol v0.0.72-alpha.13 h1:ILpvuxWGrVJMVCPRodOQcrSMFKUBzLahBPb8GkITWSc=
github.com/openimsdk/protocol v0.0.72-alpha.13/go.mod h1:OZQA9FR55lseYoN2Ql1XAHYKHJGu7OMNkUbuekrKCM8=
github.com/openimsdk/protocol v0.0.72-alpha.17 h1:kB7eyjJHdkc8lpSlLIHskHzbodxkIG4eaK908iQLVdI=
github.com/openimsdk/protocol v0.0.72-alpha.17/go.mod h1:OZQA9FR55lseYoN2Ql1XAHYKHJGu7OMNkUbuekrKCM8=
github.com/openimsdk/tools v0.0.50-alpha.11 h1:ClhkRjUVJWbmOiQ14G6do/ES1a6ZueDITv40Apwq/Tc=
github.com/openimsdk/tools v0.0.50-alpha.11/go.mod h1:h1cYmfyaVtgFbKmb1Cfsl8XwUOMTt8ubVUQrdGtsUh4=
github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4=

@ -111,6 +111,7 @@ func Start(ctx context.Context, index int, config *Config) error {
if err != nil {
return err
}
msgTransfer := &MsgTransfer{
historyCH: historyCH,
historyMongoCH: historyMongoCH,

@ -18,11 +18,12 @@ import (
"context"
"crypto/sha256"
"encoding/hex"
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options"
"strconv"
"sync"
"time"
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/tools/errs"
@ -91,6 +92,16 @@ func (g *Client) Push(ctx context.Context, userIDs []string, title, content stri
for i, v := range s.GetSplitResult() {
go func(index int, userIDs []string) {
defer wg.Done()
for i := 0; i < len(userIDs); i += maxNum {
end := i + maxNum
if end > len(userIDs) {
end = len(userIDs)
}
if err = g.batchPush(ctx, token, userIDs[i:end], pushReq); err != nil {
log.ZError(ctx, "batchPush failed", err, "index", index, "token", token, "req", pushReq)
}
}
if err = g.batchPush(ctx, token, userIDs, pushReq); err != nil {
log.ZError(ctx, "batchPush failed", err, "index", index, "token", token, "req", pushReq)
}

@ -0,0 +1,122 @@
package push
import (
"context"
"github.com/IBM/sarama"
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush"
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options"
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
"github.com/openimsdk/protocol/constant"
pbpush "github.com/openimsdk/protocol/push"
"github.com/openimsdk/protocol/sdkws"
"github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/mq/kafka"
"github.com/openimsdk/tools/utils/jsonutil"
"google.golang.org/protobuf/proto"
)
type OfflinePushConsumerHandler struct {
OfflinePushConsumerGroup *kafka.MConsumerGroup
offlinePusher offlinepush.OfflinePusher
}
func NewOfflinePushConsumerHandler(config *Config, offlinePusher offlinepush.OfflinePusher) (*OfflinePushConsumerHandler, error) {
var offlinePushConsumerHandler OfflinePushConsumerHandler
var err error
offlinePushConsumerHandler.offlinePusher = offlinePusher
offlinePushConsumerHandler.OfflinePushConsumerGroup, err = kafka.NewMConsumerGroup(config.KafkaConfig.Build(), config.KafkaConfig.ToOfflineGroupID,
[]string{config.KafkaConfig.ToOfflinePushTopic}, true)
if err != nil {
return nil, err
}
return &offlinePushConsumerHandler, nil
}
func (*OfflinePushConsumerHandler) Setup(sarama.ConsumerGroupSession) error { return nil }
func (*OfflinePushConsumerHandler) Cleanup(sarama.ConsumerGroupSession) error { return nil }
func (o *OfflinePushConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error {
for msg := range claim.Messages() {
ctx := o.OfflinePushConsumerGroup.GetContextFromMsg(msg)
o.handleMsg2OfflinePush(ctx, msg.Value)
sess.MarkMessage(msg, "")
}
return nil
}
func (o *OfflinePushConsumerHandler) handleMsg2OfflinePush(ctx context.Context, msg []byte) {
offlinePushMsg := pbpush.PushMsgReq{}
if err := proto.Unmarshal(msg, &offlinePushMsg); err != nil {
log.ZError(ctx, "offline push Unmarshal msg err", err, "msg", string(msg))
return
}
if offlinePushMsg.MsgData == nil || offlinePushMsg.UserIDs == nil {
log.ZError(ctx, "offline push msg is empty", errs.New("offlinePushMsg is empty"), "userIDs", offlinePushMsg.UserIDs, "msg", offlinePushMsg.MsgData)
return
}
log.ZInfo(ctx, "receive to OfflinePush MQ", "userIDs", offlinePushMsg.UserIDs, "msg", offlinePushMsg.MsgData)
err := o.offlinePushMsg(ctx, offlinePushMsg.MsgData, offlinePushMsg.UserIDs)
if err != nil {
log.ZWarn(ctx, "offline push failed", err, "msg", offlinePushMsg.String())
}
}
func (c *OfflinePushConsumerHandler) getOfflinePushInfos(msg *sdkws.MsgData) (title, content string, opts *options.Opts, err error) {
type AtTextElem struct {
Text string `json:"text,omitempty"`
AtUserList []string `json:"atUserList,omitempty"`
IsAtSelf bool `json:"isAtSelf"`
}
opts = &options.Opts{Signal: &options.Signal{}}
if msg.OfflinePushInfo != nil {
opts.IOSBadgeCount = msg.OfflinePushInfo.IOSBadgeCount
opts.IOSPushSound = msg.OfflinePushInfo.IOSPushSound
opts.Ex = msg.OfflinePushInfo.Ex
}
if msg.OfflinePushInfo != nil {
title = msg.OfflinePushInfo.Title
content = msg.OfflinePushInfo.Desc
}
if title == "" {
switch msg.ContentType {
case constant.Text:
fallthrough
case constant.Picture:
fallthrough
case constant.Voice:
fallthrough
case constant.Video:
fallthrough
case constant.File:
title = constant.ContentType2PushContent[int64(msg.ContentType)]
case constant.AtText:
ac := AtTextElem{}
_ = jsonutil.JsonStringToStruct(string(msg.Content), &ac)
case constant.SignalingNotification:
title = constant.ContentType2PushContent[constant.SignalMsg]
default:
title = constant.ContentType2PushContent[constant.Common]
}
}
if content == "" {
content = title
}
return
}
func (c *OfflinePushConsumerHandler) offlinePushMsg(ctx context.Context, msg *sdkws.MsgData, offlinePushUserIDs []string) error {
title, content, opts, err := c.getOfflinePushInfos(msg)
if err != nil {
return err
}
err = c.offlinePusher.Push(ctx, offlinePushUserIDs, title, content, opts)
if err != nil {
prommetrics.MsgOfflinePushFailedCounter.Inc()
return err
}
return nil
}

@ -2,6 +2,7 @@ package push
import (
"context"
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
@ -17,12 +18,12 @@ type pushServer struct {
disCov discovery.SvcDiscoveryRegistry
offlinePusher offlinepush.OfflinePusher
pushCh *ConsumerHandler
offlinePushCh *OfflinePushConsumerHandler
}
type Config struct {
RpcConfig config.Push
RedisConfig config.Redis
MongodbConfig config.Mongo
KafkaConfig config.Kafka
NotificationConfig config.Notification
Share config.Share
@ -55,18 +56,30 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
if err != nil {
return err
}
database := controller.NewPushDatabase(cacheModel)
consumer, err := NewConsumerHandler(config, offlinePusher, rdb, client)
database := controller.NewPushDatabase(cacheModel, &config.KafkaConfig)
consumer, err := NewConsumerHandler(config, database, offlinePusher, rdb, client)
if err != nil {
return err
}
offlinePushConsumer, err := NewOfflinePushConsumerHandler(config, offlinePusher)
if err != nil {
return err
}
pbpush.RegisterPushMsgServiceServer(server, &pushServer{
database: database,
disCov: client,
offlinePusher: offlinePusher,
pushCh: consumer,
offlinePushCh: offlinePushConsumer,
})
go consumer.pushConsumerGroup.RegisterHandleAndConsumer(ctx, consumer)
go offlinePushConsumer.OfflinePushConsumerGroup.RegisterHandleAndConsumer(ctx, offlinePushConsumer)
return nil
}

@ -1,33 +1,20 @@
// Copyright © 2023 OpenIM. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package push
import (
"context"
"encoding/json"
"github.com/IBM/sarama"
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush"
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options"
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
"github.com/openimsdk/open-im-server/v3/pkg/common/webhook"
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
"github.com/openimsdk/open-im-server/v3/pkg/rpccache"
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
"github.com/openimsdk/open-im-server/v3/pkg/util/conversationutil"
"github.com/openimsdk/protocol/constant"
pbchat "github.com/openimsdk/protocol/msg"
"github.com/openimsdk/protocol/msggateway"
pbpush "github.com/openimsdk/protocol/push"
"github.com/openimsdk/protocol/sdkws"
@ -46,6 +33,7 @@ type ConsumerHandler struct {
pushConsumerGroup *kafka.MConsumerGroup
offlinePusher offlinepush.OfflinePusher
onlinePusher OnlinePusher
pushDatabase controller.PushDatabase
onlineCache *rpccache.OnlineCache
groupLocalCache *rpccache.GroupLocalCache
conversationLocalCache *rpccache.ConversationLocalCache
@ -56,7 +44,7 @@ type ConsumerHandler struct {
config *Config
}
func NewConsumerHandler(config *Config, offlinePusher offlinepush.OfflinePusher, rdb redis.UniversalClient,
func NewConsumerHandler(config *Config, database controller.PushDatabase, offlinePusher offlinepush.OfflinePusher, rdb redis.UniversalClient,
client discovery.SvcDiscoveryRegistry) (*ConsumerHandler, error) {
var consumerHandler ConsumerHandler
var err error
@ -65,6 +53,7 @@ func NewConsumerHandler(config *Config, offlinePusher offlinepush.OfflinePusher,
if err != nil {
return nil, err
}
userRpcClient := rpcclient.NewUserRpcClient(client, config.Share.RpcRegisterName.User, config.Share.IMAdminUserID)
consumerHandler.offlinePusher = offlinePusher
consumerHandler.onlinePusher = NewOnlinePusher(client, config)
@ -75,43 +64,42 @@ func NewConsumerHandler(config *Config, offlinePusher offlinepush.OfflinePusher,
consumerHandler.conversationLocalCache = rpccache.NewConversationLocalCache(consumerHandler.conversationRpcClient, &config.LocalCacheConfig, rdb)
consumerHandler.webhookClient = webhook.NewWebhookClient(config.WebhooksConfig.URL)
consumerHandler.config = config
consumerHandler.pushDatabase = database
consumerHandler.onlineCache = rpccache.NewOnlineCache(userRpcClient, consumerHandler.groupLocalCache, rdb, nil)
return &consumerHandler, nil
}
func (c *ConsumerHandler) handleMs2PsChat(ctx context.Context, msg []byte) {
msgFromMQ := pbchat.PushMsgDataToMQ{}
msgFromMQ := pbpush.PushMsgReq{}
if err := proto.Unmarshal(msg, &msgFromMQ); err != nil {
log.ZError(ctx, "push Unmarshal msg err", err, "msg", string(msg))
return
}
pbData := &pbpush.PushMsgReq{
MsgData: msgFromMQ.MsgData,
ConversationID: msgFromMQ.ConversationID,
}
sec := msgFromMQ.MsgData.SendTime / 1000
nowSec := timeutil.GetCurrentTimestampBySecond()
if nowSec-sec > 10 {
prommetrics.MsgLoneTimePushCounter.Inc()
log.ZWarn(ctx, "its been a while since the message was sent", nil, "msg", pbData.String(), "sec", sec, "nowSec", nowSec, "nowSec-sec", nowSec-sec)
log.ZWarn(ctx, "its been a while since the message was sent", nil, "msg", msgFromMQ.String(), "sec", sec, "nowSec", nowSec, "nowSec-sec", nowSec-sec)
}
var err error
switch msgFromMQ.MsgData.SessionType {
case constant.ReadGroupChatType:
err = c.Push2Group(ctx, pbData.MsgData.GroupID, pbData.MsgData)
err = c.Push2Group(ctx, msgFromMQ.MsgData.GroupID, msgFromMQ.MsgData)
default:
var pushUserIDList []string
isSenderSync := datautil.GetSwitchFromOptions(pbData.MsgData.Options, constant.IsSenderSync)
if !isSenderSync || pbData.MsgData.SendID == pbData.MsgData.RecvID {
pushUserIDList = append(pushUserIDList, pbData.MsgData.RecvID)
isSenderSync := datautil.GetSwitchFromOptions(msgFromMQ.MsgData.Options, constant.IsSenderSync)
if !isSenderSync || msgFromMQ.MsgData.SendID == msgFromMQ.MsgData.RecvID {
pushUserIDList = append(pushUserIDList, msgFromMQ.MsgData.RecvID)
} else {
pushUserIDList = append(pushUserIDList, pbData.MsgData.RecvID, pbData.MsgData.SendID)
pushUserIDList = append(pushUserIDList, msgFromMQ.MsgData.RecvID, msgFromMQ.MsgData.SendID)
}
err = c.Push2User(ctx, pushUserIDList, pbData.MsgData)
err = c.Push2User(ctx, pushUserIDList, msgFromMQ.MsgData)
}
if err != nil {
log.ZWarn(ctx, "push failed", err, "msg", pbData.String())
log.ZWarn(ctx, "push failed", err, "msg", msgFromMQ.String())
}
}
@ -246,28 +234,34 @@ func (c *ConsumerHandler) Push2Group(ctx context.Context, groupID string, msg *s
if err != nil {
return err
}
// Use offline push messaging
if len(needOfflinePushUserIDs) > 0 {
var offlinePushUserIDs []string
err = c.webhookBeforeOfflinePush(ctx, &c.config.WebhooksConfig.BeforeOfflinePush, needOfflinePushUserIDs, msg, &offlinePushUserIDs)
if err != nil {
return err
}
if len(offlinePushUserIDs) > 0 {
needOfflinePushUserIDs = offlinePushUserIDs
}
c.asyncOfflinePush(ctx, needOfflinePushUserIDs, msg)
}
err = c.offlinePushMsg(ctx, msg, needOfflinePushUserIDs)
if err != nil {
log.ZWarn(ctx, "offlinePushMsg failed", err, "groupID", groupID, "msg", msg)
return nil
}
return nil
}
func (c *ConsumerHandler) asyncOfflinePush(ctx context.Context, needOfflinePushUserIDs []string, msg *sdkws.MsgData) {
var offlinePushUserIDs []string
err := c.webhookBeforeOfflinePush(ctx, &c.config.WebhooksConfig.BeforeOfflinePush, needOfflinePushUserIDs, msg, &offlinePushUserIDs)
if err != nil {
log.ZWarn(ctx, "webhookBeforeOfflinePush failed", err, "msg", msg)
return
}
return nil
if len(offlinePushUserIDs) > 0 {
needOfflinePushUserIDs = offlinePushUserIDs
}
if err := c.pushDatabase.MsgToOfflinePushMQ(ctx, conversationutil.GenConversationUniqueKeyForSingle(msg.SendID, msg.RecvID), needOfflinePushUserIDs, msg); err != nil {
log.ZError(ctx, "Msg To OfflinePush MQ error", err, "needOfflinePushUserIDs",
needOfflinePushUserIDs, "msg", msg)
prommetrics.SingleChatMsgProcessFailedCounter.Inc()
return
}
}
func (c *ConsumerHandler) groupMessagesHandler(ctx context.Context, groupID string, pushToUserIDs *[]string, msg *sdkws.MsgData) (err error) {
if len(*pushToUserIDs) == 0 {
*pushToUserIDs, err = c.groupLocalCache.GetGroupMemberIDs(ctx, groupID)

@ -37,7 +37,6 @@ func NewPushRpcCmd() *PushRpcCmd {
ret.configMap = map[string]any{
OpenIMPushCfgFileName: &pushConfig.RpcConfig,
RedisConfigFileName: &pushConfig.RedisConfig,
MongodbConfigFileName: &pushConfig.MongodbConfig,
KafkaConfigFileName: &pushConfig.KafkaConfig,
ShareFileName: &pushConfig.Share,
NotificationFileName: &pushConfig.NotificationConfig,

@ -73,18 +73,21 @@ type Mongo struct {
MaxRetry int `mapstructure:"maxRetry"`
}
type Kafka struct {
Username string `mapstructure:"username"`
Password string `mapstructure:"password"`
ProducerAck string `mapstructure:"producerAck"`
CompressType string `mapstructure:"compressType"`
Address []string `mapstructure:"address"`
ToRedisTopic string `mapstructure:"toRedisTopic"`
ToMongoTopic string `mapstructure:"toMongoTopic"`
ToPushTopic string `mapstructure:"toPushTopic"`
ToRedisGroupID string `mapstructure:"toRedisGroupID"`
ToMongoGroupID string `mapstructure:"toMongoGroupID"`
ToPushGroupID string `mapstructure:"toPushGroupID"`
Tls TLSConfig `mapstructure:"tls"`
Username string `mapstructure:"username"`
Password string `mapstructure:"password"`
ProducerAck string `mapstructure:"producerAck"`
CompressType string `mapstructure:"compressType"`
Address []string `mapstructure:"address"`
ToRedisTopic string `mapstructure:"toRedisTopic"`
ToMongoTopic string `mapstructure:"toMongoTopic"`
ToPushTopic string `mapstructure:"toPushTopic"`
ToOfflinePushTopic string `mapstructure:"toOfflinePushTopic"`
ToRedisGroupID string `mapstructure:"toRedisGroupID"`
ToMongoGroupID string `mapstructure:"toMongoGroupID"`
ToPushGroupID string `mapstructure:"toPushGroupID"`
ToOfflineGroupID string `mapstructure:"toOfflinePushGroupID"`
Tls TLSConfig `mapstructure:"tls"`
}
type TLSConfig struct {
EnableTLS bool `mapstructure:"enableTLS"`

@ -17,21 +17,45 @@ package controller
import (
"context"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/protocol/push"
"github.com/openimsdk/protocol/sdkws"
"github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/mq/kafka"
)
type PushDatabase interface {
DelFcmToken(ctx context.Context, userID string, platformID int) error
MsgToOfflinePushMQ(ctx context.Context, key string, userIDs []string, msg2mq *sdkws.MsgData) error
}
type pushDataBase struct {
cache cache.ThirdCache
cache cache.ThirdCache
producerToOfflinePush *kafka.Producer
}
func NewPushDatabase(cache cache.ThirdCache) PushDatabase {
return &pushDataBase{cache: cache}
func NewPushDatabase(cache cache.ThirdCache, kafkaConf *config.Kafka) PushDatabase {
conf, err := kafka.BuildProducerConfig(*kafkaConf.Build())
if err != nil {
return nil
}
producerToOfflinePush, err := kafka.NewKafkaProducer(conf, kafkaConf.Address, kafkaConf.ToOfflinePushTopic)
if err != nil {
return nil
}
return &pushDataBase{
cache: cache,
producerToOfflinePush: producerToOfflinePush,
}
}
func (p *pushDataBase) DelFcmToken(ctx context.Context, userID string, platformID int) error {
return p.cache.DelFcmToken(ctx, userID, platformID)
}
func (p *pushDataBase) MsgToOfflinePushMQ(ctx context.Context, key string, userIDs []string, msg2mq *sdkws.MsgData) error {
_, _, err := p.producerToOfflinePush.SendMessage(ctx, key, &push.PushMsgReq{MsgData: msg2mq, UserIDs: userIDs})
log.ZInfo(ctx, "message is push to offlinePush topic", "key", key, "userIDs", userIDs, "msg", msg2mq.String())
return err
}

@ -35,7 +35,7 @@ done
echo "Kafka is ready. Creating topics..."
topics=("toRedis" "toMongo" "toPush")
topics=("toRedis" "toMongo" "toPush" "toOfflinePush")
partitions=8
replicationFactor=1

@ -18,6 +18,12 @@ import (
"context"
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"time"
"github.com/openimsdk/open-im-server/v3/pkg/common/cmd"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/tools/db/mongoutil"
@ -27,11 +33,6 @@ import (
"github.com/openimsdk/tools/mq/kafka"
"github.com/openimsdk/tools/s3/minio"
"github.com/openimsdk/tools/system/program"
"io/ioutil"
"log"
"os"
"path/filepath"
"time"
)
const maxRetry = 180
@ -65,7 +66,7 @@ func CheckMinIO(ctx context.Context, config *config.Minio) error {
}
func CheckKafka(ctx context.Context, conf *config.Kafka) error {
return kafka.Check(ctx, conf.Build(), []string{conf.ToMongoTopic, conf.ToRedisTopic, conf.ToPushTopic})
return kafka.Check(ctx, conf.Build(), []string{conf.ToMongoTopic, conf.ToRedisTopic, conf.ToPushTopic, conf.ToOfflinePushTopic})
}
func initConfig(configDir string) (*config.Mongo, *config.Redis, *config.Kafka, *config.Minio, *config.Discovery, error) {

Loading…
Cancel
Save