diff --git a/config/config.yaml b/config/config.yaml index 552290e6b..2c1b469ee 100644 --- a/config/config.yaml +++ b/config/config.yaml @@ -228,7 +228,7 @@ chatpersistencemysql: true #可靠性存储 reliablestorage: false #消息缓存时间 -msgCacheTimeout: 60 * 60 +msgCacheTimeout: 1800 #token config tokenpolicy: diff --git a/internal/rpc/msg/pull_message.go b/internal/rpc/msg/pull_message.go index 819a9592e..1811d3847 100644 --- a/internal/rpc/msg/pull_message.go +++ b/internal/rpc/msg/pull_message.go @@ -40,18 +40,28 @@ func (rpc *rpcChat) PullMessageBySeqList(_ context.Context, in *open_im_sdk.Pull log.NewInfo(in.OperationID, "rpc PullMessageBySeqList is arriving", in.String()) resp := new(open_im_sdk.PullMessageBySeqListResp) //msgList, err := commonDB.DB.GetMsgBySeqList(in.UserID, in.SeqList, in.OperationID) - msgList, err := commonDB.DB.GetMsgBySeqListMongo2(in.UserID, in.SeqList, in.OperationID) + redisMsgList, failedSeqList, err := commonDB.DB.GetMessageListBySeq(in.UserID, in.SeqList, in.OperationID) if err != nil { - log.Error(in.OperationID, "PullMessageBySeqList data error", in.String(), err.Error()) - resp.ErrCode = 201 - resp.ErrMsg = err.Error() - return resp, nil + if err != redis.ErrNil { + log.Error(in.OperationID, "get message from redis exception", err.Error(), failedSeqList) + } else { + log.Debug(in.OperationID, "get message from redis is nil", failedSeqList) + } + msgList, err1 := commonDB.DB.GetMsgBySeqListMongo2(in.UserID, failedSeqList, in.OperationID) + if err1 != nil { + log.Error(in.OperationID, "PullMessageBySeqList data error", in.String(), err.Error()) + resp.ErrCode = 201 + resp.ErrMsg = err.Error() + return resp, nil + } else { + redisMsgList = append(redisMsgList, msgList...) + resp.List = redisMsgList + } + } else { + resp.List = redisMsgList } //respSingleMsgFormat = singleMsgHandleByUser(SingleMsgFormat, in.UserID) //respGroupMsgFormat = groupMsgHandleByUser(GroupMsgFormat) - resp.ErrCode = 0 - resp.ErrMsg = "" - resp.List = msgList return resp, nil } diff --git a/pkg/common/db/batch_insert_chat.go b/pkg/common/db/batch_insert_chat.go index b5fca3502..6a872529b 100644 --- a/pkg/common/db/batch_insert_chat.go +++ b/pkg/common/db/batch_insert_chat.go @@ -10,8 +10,134 @@ import ( "github.com/garyburd/redigo/redis" "github.com/golang/protobuf/proto" "go.mongodb.org/mongo-driver/bson" + "runtime" + "time" ) +func (d *DataBases) BatchInsertChat2DB(userID string, msgList []*pbMsg.MsgDataToMQ, operationID string, currentMaxSeq uint64) error { + newTime := getCurrentTimestampByMill() + if len(msgList) > GetSingleGocMsgNum() { + return errors.New("too large") + } + isInit := false + var remain uint64 + blk0 := uint64(GetSingleGocMsgNum() - 1) + if currentMaxSeq < uint64(GetSingleGocMsgNum()) { + remain = blk0 - currentMaxSeq + } else { + excludeBlk0 := currentMaxSeq - blk0 + remain = (uint64(GetSingleGocMsgNum()) - (excludeBlk0 % uint64(GetSingleGocMsgNum()))) % uint64(GetSingleGocMsgNum()) + } + insertCounter := uint64(0) + msgListToMongo := make([]MsgInfo, 0) + msgListToMongoNext := make([]MsgInfo, 0) + seqUid := "" + seqUidNext := "" + log.Debug(operationID, "remain ", remain, "insertCounter ", insertCounter, "currentMaxSeq ", currentMaxSeq, userID, len(msgList)) + var err error + for _, m := range msgList { + log.Debug(operationID, "msg node ", m.String(), m.MsgData.ClientMsgID) + currentMaxSeq++ + sMsg := MsgInfo{} + sMsg.SendTime = m.MsgData.SendTime + m.MsgData.Seq = uint32(currentMaxSeq) + if sMsg.Msg, err = proto.Marshal(m.MsgData); err != nil { + return utils.Wrap(err, "") + } + if isInit { + msgListToMongoNext = append(msgListToMongoNext, sMsg) + seqUidNext = getSeqUid(userID, uint32(currentMaxSeq)) + log.Debug(operationID, "msgListToMongoNext ", seqUidNext, m.MsgData.Seq, m.MsgData.ClientMsgID, insertCounter, remain) + continue + } + if insertCounter < remain { + msgListToMongo = append(msgListToMongo, sMsg) + insertCounter++ + seqUid = getSeqUid(userID, uint32(currentMaxSeq)) + log.Debug(operationID, "msgListToMongo ", seqUid, m.MsgData.Seq, m.MsgData.ClientMsgID, insertCounter, remain) + } else { + msgListToMongoNext = append(msgListToMongoNext, sMsg) + seqUidNext = getSeqUid(userID, uint32(currentMaxSeq)) + log.Debug(operationID, "msgListToMongoNext ", seqUidNext, m.MsgData.Seq, m.MsgData.ClientMsgID, insertCounter, remain) + } + } + + ctx := context.Background() + c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat) + + if seqUid != "" { + filter := bson.M{"uid": seqUid} + log.NewDebug(operationID, "filter ", seqUid, "list ", msgListToMongo) + err := c.FindOneAndUpdate(ctx, filter, bson.M{"$push": bson.M{"msg": bson.M{"$each": msgListToMongo}}}).Err() + if err != nil { + log.Error(operationID, "FindOneAndUpdate failed ", err.Error(), filter) + return utils.Wrap(err, "") + } + } + if seqUidNext != "" { + filter := bson.M{"uid": seqUidNext} + sChat := UserChat{} + sChat.UID = seqUidNext + sChat.Msg = msgListToMongoNext + log.NewDebug(operationID, "filter ", seqUidNext, "list ", msgListToMongoNext) + if _, err = c.InsertOne(ctx, &sChat); err != nil { + log.NewError(operationID, "InsertOne failed", filter, err.Error(), sChat) + return utils.Wrap(err, "") + } + } + log.NewWarn(operationID, "batch mgo cost time ", getCurrentTimestampByMill()-newTime, userID, len(msgList)) + return nil +} + +func (d *DataBases) BatchInsertChat2Cache(userID string, msgList []*pbMsg.MsgDataToMQ, operationID string) (error, uint64) { + newTime := getCurrentTimestampByMill() + if len(msgList) > GetSingleGocMsgNum() { + return errors.New("too large"), 0 + } + currentMaxSeq, err := d.GetUserMaxSeq(userID) + if err == nil { + + } else if err == redis.ErrNil { + currentMaxSeq = 0 + } else { + return utils.Wrap(err, ""), 0 + } + lastMaxSeq := currentMaxSeq + + for _, m := range msgList { + log.Debug(operationID, "msg node ", m.String(), m.MsgData.ClientMsgID) + currentMaxSeq++ + sMsg := MsgInfo{} + sMsg.SendTime = m.MsgData.SendTime + m.MsgData.Seq = uint32(currentMaxSeq) + } + log.Debug(operationID, "SetMessageToCache ", userID, len(msgList)) + err = d.SetMessageToCache(msgList, userID, operationID) + if err != nil { + log.Error(operationID, "setMessageToCache failed, continue ", err.Error(), len(msgList), userID) + } + log.NewWarn(operationID, "batch to redis cost time ", getCurrentTimestampByMill()-newTime, userID, len(msgList)) + return utils.Wrap(d.SetUserMaxSeq(userID, uint64(currentMaxSeq)), ""), lastMaxSeq +} + +func (d *DataBases) BatchInsertChatBoth(userID string, msgList []*pbMsg.MsgDataToMQ, operationID string) error { + err, lastMaxSeq := d.BatchInsertChat2Cache(userID, msgList, operationID) + if err != nil { + log.Error(operationID, "BatchInsertChat2Cache failed ", err.Error(), userID, len(msgList)) + return err + } + for { + if runtime.NumGoroutine() > 50000 { + log.NewWarn(operationID, "too many NumGoroutine ", runtime.NumGoroutine()) + time.Sleep(10 * time.Millisecond) + } else { + break + } + } + go d.BatchInsertChat2DB(userID, msgList, operationID, lastMaxSeq) + return nil +} + func (d *DataBases) BatchInsertChat(userID string, msgList []*pbMsg.MsgDataToMQ, operationID string) error { newTime := getCurrentTimestampByMill() if len(msgList) > GetSingleGocMsgNum() { @@ -104,3 +230,7 @@ func (d *DataBases) BatchInsertChat(userID string, msgList []*pbMsg.MsgDataToMQ, log.NewWarn(operationID, "batch mgo cost time ", getCurrentTimestampByMill()-newTime, userID, len(msgList)) return utils.Wrap(d.SetUserMaxSeq(userID, uint64(currentMaxSeq)), "") } + +//func (d *DataBases)setMessageToCache(msgList []*pbMsg.MsgDataToMQ, uid string) (err error) { +// +//} diff --git a/pkg/common/db/redisModel.go b/pkg/common/db/redisModel.go index 41c7ea6c1..e98fe1ce0 100644 --- a/pkg/common/db/redisModel.go +++ b/pkg/common/db/redisModel.go @@ -264,25 +264,48 @@ func (d *DataBases) GetGroupMemberIDListFromCache(groupID string) ([]string, err result, err := redis.Strings(d.Exec("SMEMBERS", groupCache+groupID)) return result, err } +func (d *DataBases) GetMessageListBySeq(userID string, seqList []uint32, operationID string) (seqMsg []*pbCommon.MsgData, failedSeqList []uint32, errResult error) { + for _, v := range seqList { + key := messageCache + userID + "_" + strconv.Itoa(int(v)) + result, err := redis.String(d.Exec("HGETALL", key)) + if err != nil { + errResult = err + failedSeqList = append(failedSeqList, v) + } else { + msg := pbCommon.MsgData{} + err = json.Unmarshal([]byte(result), &msg) + if err != nil { + errResult = err + failedSeqList = append(failedSeqList, v) + log2.NewWarn(operationID, "Unmarshal err", result, err.Error()) + } else { + log2.NewDebug(operationID, "redis get msg is ", msg.String()) + seqMsg = append(seqMsg, &msg) + } + + } + } + return seqMsg, failedSeqList, errResult +} -func (d *DataBases) SetMessageToCache(msgList []*pbChat.MsgDataToMQ, uid string) (err error) { +func (d *DataBases) SetMessageToCache(msgList []*pbChat.MsgDataToMQ, uid string, operationID string) error { var failedList []pbChat.MsgDataToMQ for _, msg := range msgList { key := messageCache + uid + "_" + strconv.Itoa(int(msg.MsgData.Seq)) m, err := utils.Pb2Map(msg.MsgData) if err != nil { - log2.NewWarn("", utils.GetSelfFuncName(), "Pb2Map failed", *msg.MsgData, uid, err.Error()) + log2.NewWarn(operationID, utils.GetSelfFuncName(), "Pb2Map failed", msg.MsgData.String(), uid, err.Error()) continue } - log2.NewDebug("", "m", m) + log2.NewDebug(operationID, "convert map is ", m) _, err = d.Exec("hmset", key, redis.Args{}.Add("TIMEOUT", config.Config.MsgCacheTimeout).AddFlat(m)...) if err != nil { - log2.NewWarn("", utils.GetSelfFuncName(), "redis failed", "args:", key, *msg, uid, m) + log2.NewWarn(operationID, utils.GetSelfFuncName(), "redis failed", "args:", key, *msg, uid, m) failedList = append(failedList, *msg) } } if len(failedList) != 0 { return errors.New(fmt.Sprintf("set msg to cache failed, failed lists: %s", failedList)) } - return err + return nil }