concurrent consumption of messages

pull/232/head
Gordon 3 years ago
parent 4f165f3b27
commit ee0a71de1b

@ -222,7 +222,9 @@ secret: tuoyun
multiloginpolicy: 1 multiloginpolicy: 1
#chat log insert to db #chat log insert to db
chatPersistenceMysql: true chatpersistencemysql: true
#可靠性存储
reliablestorage: true
#token config #token config
tokenpolicy: tokenpolicy:

@ -2,22 +2,20 @@ package logic
import ( import (
"Open_IM/pkg/common/db" "Open_IM/pkg/common/db"
"Open_IM/pkg/common/log"
pbMsg "Open_IM/pkg/proto/chat" pbMsg "Open_IM/pkg/proto/chat"
"Open_IM/pkg/utils"
) )
func saveUserChat(uid string, msg *pbMsg.MsgDataToMQ) error { func saveUserChat(uid string, msg *pbMsg.MsgDataToMQ) error {
time := utils.GetCurrentTimestampByMill() //time := utils.GetCurrentTimestampByMill()
seq, err := db.DB.IncrUserSeq(uid) //seq, err := db.DB.IncrUserSeq(uid)
if err != nil { //if err != nil {
log.NewError(msg.OperationID, "data insert to redis err", err.Error(), msg.String()) // log.NewError(msg.OperationID, "data insert to redis err", err.Error(), msg.String())
return err // return err
} //}
msg.MsgData.Seq = uint32(seq) //msg.MsgData.Seq = uint32(seq)
pbSaveData := pbMsg.MsgDataToDB{} pbSaveData := pbMsg.MsgDataToDB{}
pbSaveData.MsgData = msg.MsgData pbSaveData.MsgData = msg.MsgData
log.NewInfo(msg.OperationID, "IncrUserSeq cost time", utils.GetCurrentTimestampByMill()-time) //log.NewInfo(msg.OperationID, "IncrUserSeq cost time", utils.GetCurrentTimestampByMill()-time)
return db.DB.SaveUserChatMongo2(uid, pbSaveData.MsgData.SendTime, &pbSaveData) return db.DB.SaveUserChatMongo2(uid, pbSaveData.MsgData.SendTime, &pbSaveData)
// return db.DB.SaveUserChatMongo2(uid, pbSaveData.MsgData.SendTime, &pbSaveData) // return db.DB.SaveUserChatMongo2(uid, pbSaveData.MsgData.SendTime, &pbSaveData)
} }

@ -12,6 +12,7 @@ import (
const OnlineTopicBusy = 1 const OnlineTopicBusy = 1
const OnlineTopicVacancy = 0 const OnlineTopicVacancy = 0
const Msg = 2
var ( var (
persistentCH PersistentConsumerHandler persistentCH PersistentConsumerHandler

@ -3,6 +3,7 @@ package logic
import ( import (
"Open_IM/pkg/common/config" "Open_IM/pkg/common/config"
"Open_IM/pkg/common/constant" "Open_IM/pkg/common/constant"
"Open_IM/pkg/common/db"
kfk "Open_IM/pkg/common/kafka" kfk "Open_IM/pkg/common/kafka"
"Open_IM/pkg/common/log" "Open_IM/pkg/common/log"
"Open_IM/pkg/grpc-etcdv3/getcdv3" "Open_IM/pkg/grpc-etcdv3/getcdv3"
@ -17,7 +18,11 @@ import (
"time" "time"
) )
type fcb func(msg []byte, msgKey string) type MsgChannelValue struct {
userID string
msg *pbMsg.MsgDataToMQ
}
type fcb func(cMsg *sarama.ConsumerMessage, msgKey string, sess sarama.ConsumerGroupSession)
type Cmd2Value struct { type Cmd2Value struct {
Cmd int Cmd int
Value interface{} Value interface{}
@ -26,12 +31,22 @@ type OnlineHistoryConsumerHandler struct {
msgHandle map[string]fcb msgHandle map[string]fcb
historyConsumerGroup *kfk.MConsumerGroup historyConsumerGroup *kfk.MConsumerGroup
cmdCh chan Cmd2Value cmdCh chan Cmd2Value
msgCh chan Cmd2Value
} }
func (och *OnlineHistoryConsumerHandler) Init(cmdCh chan Cmd2Value) { func (och *OnlineHistoryConsumerHandler) Init(cmdCh chan Cmd2Value) {
och.msgHandle = make(map[string]fcb) och.msgHandle = make(map[string]fcb)
och.cmdCh = cmdCh och.cmdCh = cmdCh
och.msgHandle[config.Config.Kafka.Ws2mschat.Topic] = och.handleChatWs2Mongo och.msgCh = make(chan Cmd2Value, 1000)
if config.Config.ReliableStorage {
och.msgHandle[config.Config.Kafka.Ws2mschat.Topic] = och.handleChatWs2Mongo
} else {
och.msgHandle[config.Config.Kafka.Ws2mschat.Topic] = och.handleChatWs2MongoLowReliability
for i := 0; i < 10; i++ {
go och.Run()
}
}
och.historyConsumerGroup = kfk.NewMConsumerGroup(&kfk.MConsumerGroupConfig{KafkaVersion: sarama.V0_10_2_0, och.historyConsumerGroup = kfk.NewMConsumerGroup(&kfk.MConsumerGroupConfig{KafkaVersion: sarama.V0_10_2_0,
OffsetsInitial: sarama.OffsetNewest, IsReturnErr: false}, []string{config.Config.Kafka.Ws2mschat.Topic}, OffsetsInitial: sarama.OffsetNewest, IsReturnErr: false}, []string{config.Config.Kafka.Ws2mschat.Topic},
config.Config.Kafka.Ws2mschat.Addr, config.Config.Kafka.ConsumerGroupID.MsgToMongo) config.Config.Kafka.Ws2mschat.Addr, config.Config.Kafka.ConsumerGroupID.MsgToMongo)
@ -61,7 +76,28 @@ func sendCmd(ch chan Cmd2Value, value Cmd2Value, timeout int64) error {
return errors.New("send cmd timeout") return errors.New("send cmd timeout")
} }
} }
func (och *OnlineHistoryConsumerHandler) handleChatWs2Mongo(msg []byte, msgKey string) { func (och *OnlineHistoryConsumerHandler) Run() {
for {
select {
case cmd := <-och.msgCh:
switch cmd.Cmd {
case Msg:
msgChannelValue := cmd.Value.(MsgChannelValue)
err := saveUserChat(msgChannelValue.userID, msgChannelValue.msg)
if err != nil {
singleMsgFailedCount++
log.NewError(msgChannelValue.msg.OperationID, "single data insert to mongo err", err.Error(), msgChannelValue.msg.String())
} else {
singleMsgSuccessCountMutex.Lock()
singleMsgSuccessCount++
singleMsgSuccessCountMutex.Unlock()
}
}
}
}
}
func (och *OnlineHistoryConsumerHandler) handleChatWs2Mongo(cMsg *sarama.ConsumerMessage, msgKey string, sess sarama.ConsumerGroupSession) {
msg := cMsg.Value
now := time.Now() now := time.Now()
msgFromMQ := pbMsg.MsgDataToMQ{} msgFromMQ := pbMsg.MsgDataToMQ{}
err := proto.Unmarshal(msg, &msgFromMQ) err := proto.Unmarshal(msg, &msgFromMQ)
@ -126,6 +162,102 @@ func (och *OnlineHistoryConsumerHandler) handleChatWs2Mongo(msg []byte, msgKey s
log.NewError(msgFromMQ.OperationID, "SessionType error", msgFromMQ.String()) log.NewError(msgFromMQ.OperationID, "SessionType error", msgFromMQ.String())
return return
} }
sess.MarkMessage(cMsg, "")
log.NewDebug(msgFromMQ.OperationID, "msg_transfer handle topic data to database success...", msgFromMQ.String())
}
func (och *OnlineHistoryConsumerHandler) handleChatWs2MongoLowReliability(cMsg *sarama.ConsumerMessage, msgKey string, sess sarama.ConsumerGroupSession) {
msg := cMsg.Value
now := time.Now()
msgFromMQ := pbMsg.MsgDataToMQ{}
err := proto.Unmarshal(msg, &msgFromMQ)
if err != nil {
log.Error("msg_transfer Unmarshal msg err", "", "msg", string(msg), "err", err.Error())
return
}
operationID := msgFromMQ.OperationID
log.NewInfo(operationID, "msg come mongo!!!", "", "msg", string(msg))
//Control whether to store offline messages (mongo)
isHistory := utils.GetSwitchFromOptions(msgFromMQ.MsgData.Options, constant.IsHistory)
//Control whether to store history messages (mysql)
isPersist := utils.GetSwitchFromOptions(msgFromMQ.MsgData.Options, constant.IsPersistent)
isSenderSync := utils.GetSwitchFromOptions(msgFromMQ.MsgData.Options, constant.IsSenderSync)
switch msgFromMQ.MsgData.SessionType {
case constant.SingleChatType:
log.NewDebug(msgFromMQ.OperationID, "msg_transfer msg type = SingleChatType", isHistory, isPersist)
if isHistory {
seq, err := db.DB.IncrUserSeq(msgKey)
if err != nil {
log.NewError(operationID, "data insert to redis err", err.Error(), string(msg))
return
}
sess.MarkMessage(cMsg, "")
msgFromMQ.MsgData.Seq = uint32(seq)
log.Debug(operationID, "send ch msg is ", msgFromMQ.String())
och.msgCh <- Cmd2Value{Cmd: Msg, Value: MsgChannelValue{msgKey, &msgFromMQ}}
//err := saveUserChat(msgKey, &msgFromMQ)
//if err != nil {
// singleMsgFailedCount++
// log.NewError(operationID, "single data insert to mongo err", err.Error(), msgFromMQ.String())
// return
//}
//singleMsgSuccessCountMutex.Lock()
//singleMsgSuccessCount++
//singleMsgSuccessCountMutex.Unlock()
//log.NewDebug(msgFromMQ.OperationID, "sendMessageToPush cost time ", time.Since(now))
}
if !isSenderSync && msgKey == msgFromMQ.MsgData.SendID {
} else {
go sendMessageToPush(&msgFromMQ, msgKey)
}
log.NewDebug(operationID, "saveUserChat cost time ", time.Since(now))
case constant.GroupChatType:
log.NewDebug(msgFromMQ.OperationID, "msg_transfer msg type = GroupChatType", isHistory, isPersist)
if isHistory {
seq, err := db.DB.IncrUserSeq(msgKey)
if err != nil {
log.NewError(operationID, "data insert to redis err", err.Error(), string(msg))
return
}
sess.MarkMessage(cMsg, "")
msgFromMQ.MsgData.Seq = uint32(seq)
log.Debug(operationID, "send ch msg is ", msgFromMQ.String())
och.msgCh <- Cmd2Value{Cmd: Msg, Value: MsgChannelValue{msgKey, &msgFromMQ}}
//err := saveUserChat(msgFromMQ.MsgData.RecvID, &msgFromMQ)
//if err != nil {
// log.NewError(operationID, "group data insert to mongo err", msgFromMQ.String(), msgFromMQ.MsgData.RecvID, err.Error())
// return
//}
//groupMsgCount++
}
go sendMessageToPush(&msgFromMQ, msgFromMQ.MsgData.RecvID)
case constant.NotificationChatType:
log.NewDebug(msgFromMQ.OperationID, "msg_transfer msg type = NotificationChatType", isHistory, isPersist)
if isHistory {
seq, err := db.DB.IncrUserSeq(msgKey)
if err != nil {
log.NewError(operationID, "data insert to redis err", err.Error(), string(msg))
return
}
sess.MarkMessage(cMsg, "")
msgFromMQ.MsgData.Seq = uint32(seq)
log.Debug(operationID, "send ch msg is ", msgFromMQ.String())
och.msgCh <- Cmd2Value{Cmd: Msg, Value: MsgChannelValue{msgKey, &msgFromMQ}}
//err := saveUserChat(msgKey, &msgFromMQ)
//if err != nil {
// log.NewError(operationID, "single data insert to mongo err", err.Error(), msgFromMQ.String())
// return
//}
//log.NewDebug(msgFromMQ.OperationID, "sendMessageToPush cost time ", time.Since(now))
}
if !isSenderSync && msgKey == msgFromMQ.MsgData.SendID {
} else {
go sendMessageToPush(&msgFromMQ, msgKey)
}
log.NewDebug(operationID, "saveUserChat cost time ", time.Since(now))
default:
log.NewError(msgFromMQ.OperationID, "SessionType error", msgFromMQ.String())
return
}
log.NewDebug(msgFromMQ.OperationID, "msg_transfer handle topic data to database success...", msgFromMQ.String()) log.NewDebug(msgFromMQ.OperationID, "msg_transfer handle topic data to database success...", msgFromMQ.String())
} }
@ -138,8 +270,7 @@ func (och *OnlineHistoryConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupS
SetOnlineTopicStatus(OnlineTopicBusy) SetOnlineTopicStatus(OnlineTopicBusy)
//och.TriggerCmd(OnlineTopicBusy) //och.TriggerCmd(OnlineTopicBusy)
log.NewDebug("", "online kafka get info to mongo", "msgTopic", msg.Topic, "msgPartition", msg.Partition, "online", msg.Offset, claim.HighWaterMarkOffset()) log.NewDebug("", "online kafka get info to mongo", "msgTopic", msg.Topic, "msgPartition", msg.Partition, "online", msg.Offset, claim.HighWaterMarkOffset())
och.msgHandle[msg.Topic](msg.Value, string(msg.Key)) och.msgHandle[msg.Topic](msg, string(msg.Key), sess)
sess.MarkMessage(msg, "")
if claim.HighWaterMarkOffset()-msg.Offset <= 1 { if claim.HighWaterMarkOffset()-msg.Offset <= 1 {
log.Debug("", "online msg consume end", claim.HighWaterMarkOffset(), msg.Offset) log.Debug("", "online msg consume end", claim.HighWaterMarkOffset(), msg.Offset)
SetOnlineTopicStatus(OnlineTopicVacancy) SetOnlineTopicStatus(OnlineTopicVacancy)

@ -209,7 +209,8 @@ type config struct {
} }
Secret string `yaml:"secret"` Secret string `yaml:"secret"`
MultiLoginPolicy int `yaml:"multiloginpolicy"` MultiLoginPolicy int `yaml:"multiloginpolicy"`
ChatPersistenceMysql bool `yaml:"chatPersistenceMysql"` ChatPersistenceMysql bool `yaml:"chatpersistencemysql"`
ReliableStorage bool `yaml:"reliablestorage"`
TokenPolicy struct { TokenPolicy struct {
AccessSecret string `yaml:"accessSecret"` AccessSecret string `yaml:"accessSecret"`

Loading…
Cancel
Save