fix: graceful exit for kafka consumer of msgtransfer

Signed-off-by: rfyiamcool <rfyiamcool@163.com>
pull/1483/head
rfyiamcool 2 years ago
parent 35bac04f58
commit 630b75a1fb
No known key found for this signature in database
GPG Key ID: EBA61C4D83B4DC5C

@ -15,17 +15,24 @@
package msgtransfer package msgtransfer
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
"github.com/OpenIMSDK/tools/mw" "net/http"
"os"
"os/signal"
"sync"
"syscall"
"time"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/collectors" "github.com/prometheus/client_golang/prometheus/collectors"
"github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/client_golang/prometheus/promhttp"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/credentials/insecure"
"log"
"net/http" "github.com/OpenIMSDK/tools/log"
"sync" "github.com/OpenIMSDK/tools/mw"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/db/cache" "github.com/openimsdk/open-im-server/v3/pkg/common/db/cache"
@ -50,18 +57,22 @@ func StartTransfer(prometheusPort int) error {
if err != nil { if err != nil {
return err return err
} }
if err := db.AutoMigrate(&relationtb.ChatLogModel{}); err != nil {
if err = db.AutoMigrate(&relationtb.ChatLogModel{}); err != nil {
fmt.Printf("gorm: AutoMigrate ChatLogModel err: %v\n", err) fmt.Printf("gorm: AutoMigrate ChatLogModel err: %v\n", err)
} }
rdb, err := cache.NewRedis() rdb, err := cache.NewRedis()
if err != nil { if err != nil {
return err return err
} }
mongo, err := unrelation.NewMongo() mongo, err := unrelation.NewMongo()
if err != nil { if err != nil {
return err return err
} }
if err := mongo.CreateMsgIndex(); err != nil {
if err = mongo.CreateMsgIndex(); err != nil {
return err return err
} }
client, err := kdisc.NewDiscoveryRegister(config.Config.Envs.Discovery) client, err := kdisc.NewDiscoveryRegister(config.Config.Envs.Discovery)
@ -72,9 +83,11 @@ func StartTransfer(prometheusPort int) error {
if err != nil { if err != nil {
return err return err
} }
if err := client.CreateRpcRootNodes(config.Config.GetServiceNames()); err != nil { if err := client.CreateRpcRootNodes(config.Config.GetServiceNames()); err != nil {
return err return err
} }
client.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials())) client.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials()))
msgModel := cache.NewMsgCacheModel(rdb) msgModel := cache.NewMsgCacheModel(rdb)
msgDocModel := unrelation.NewMsgMongoDriver(mongo.GetDatabase()) msgDocModel := unrelation.NewMsgMongoDriver(mongo.GetDatabase())
@ -98,35 +111,68 @@ func NewMsgTransfer(chatLogDatabase controller.ChatLogDatabase,
} }
func (m *MsgTransfer) Start(prometheusPort int) error { func (m *MsgTransfer) Start(prometheusPort int) error {
var wg sync.WaitGroup
wg.Add(1)
fmt.Println("start msg transfer", "prometheusPort:", prometheusPort) fmt.Println("start msg transfer", "prometheusPort:", prometheusPort)
if prometheusPort <= 0 { if prometheusPort <= 0 {
return errors.New("prometheusPort not correct") return errors.New("prometheusPort not correct")
} }
if config.Config.ChatPersistenceMysql { if config.Config.ChatPersistenceMysql {
// go m.persistentCH.persistentConsumerGroup.RegisterHandleAndConsumer(m.persistentCH) // go m.persistentCH.persistentConsumerGroup.RegisterHandleAndConsumer(m.persistentCH)
} else { } else {
fmt.Println("msg transfer not start mysql consumer") fmt.Println("msg transfer not start mysql consumer")
} }
go m.historyCH.historyConsumerGroup.RegisterHandleAndConsumer(m.historyCH)
go m.historyMongoCH.historyConsumerGroup.RegisterHandleAndConsumer(m.historyMongoCH) var wg sync.WaitGroup
// go m.modifyCH.modifyMsgConsumerGroup.RegisterHandleAndConsumer(m.modifyCH)
/*err := prome.StartPrometheusSrv(prometheusPort) wg.Add(1)
if err != nil { go func() {
return err defer wg.Done()
}*/
//////////////////////////// m.historyCH.historyConsumerGroup.RegisterHandleAndConsumer(m.historyCH)
}()
wg.Add(1)
go func() {
defer wg.Done()
m.historyMongoCH.historyConsumerGroup.RegisterHandleAndConsumer(m.historyMongoCH)
}()
if config.Config.Prometheus.Enable { if config.Config.Prometheus.Enable {
reg := prometheus.NewRegistry() go func() {
reg.MustRegister( proreg := prometheus.NewRegistry()
proreg.MustRegister(
collectors.NewGoCollector(), collectors.NewGoCollector(),
) )
reg.MustRegister(prommetrics.GetGrpcCusMetrics("Transfer")...) proreg.MustRegister(prommetrics.GetGrpcCusMetrics("Transfer")...)
http.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg})) http.Handle("/metrics", promhttp.HandlerFor(proreg, promhttp.HandlerOpts{Registry: proreg}))
log.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", prometheusPort), nil)) err := http.ListenAndServe(fmt.Sprintf(":%d", prometheusPort), nil)
if err != nil && err != http.ErrServerClosed {
panic(err)
}
}()
} }
////////////////////////////////////////
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)
<-sigs
// graceful close kafka client.
go m.historyCH.historyConsumerGroup.Close()
go m.historyMongoCH.historyConsumerGroup.Close()
done := make(chan struct{}, 1)
go func() {
wg.Wait() wg.Wait()
close(done)
}()
select {
case <-done:
log.ZInfo(context.Background(), "msgtrasfer exit successfully")
case <-time.After(15 * time.Second):
log.ZError(context.Background(), "msgtransfer force to exit, timeout 15s", nil)
}
return nil return nil
} }

@ -19,6 +19,7 @@ import (
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
"sync/atomic"
"time" "time"
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor" "github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
@ -430,16 +431,29 @@ func (och *OnlineHistoryRedisConsumerHandler) ConsumeClaim(
log.ZDebug(context.Background(), "online new session msg come", "highWaterMarkOffset", log.ZDebug(context.Background(), "online new session msg come", "highWaterMarkOffset",
claim.HighWaterMarkOffset(), "topic", claim.Topic(), "partition", claim.Partition()) claim.HighWaterMarkOffset(), "topic", claim.Topic(), "partition", claim.Partition())
split := 1000 var (
rwLock := new(sync.RWMutex) split = 1000
messages := make([]*sarama.ConsumerMessage, 0, 1000) rwLock = new(sync.RWMutex)
ticker := time.NewTicker(time.Millisecond * 100) messages = make([]*sarama.ConsumerMessage, 0, 1000)
ticker = time.NewTicker(time.Millisecond * 100)
wg = sync.WaitGroup{}
running = new(atomic.Bool)
)
wg.Add(1)
go func() { go func() {
defer wg.Done()
for { for {
select { select {
case <-ticker.C: case <-ticker.C:
// if the buffer is empty and running is false, return loop.
if len(messages) == 0 { if len(messages) == 0 {
if !running.Load() {
return
}
continue continue
} }
@ -472,7 +486,18 @@ func (och *OnlineHistoryRedisConsumerHandler) ConsumeClaim(
} }
}() }()
for msg := range claim.Messages() { wg.Add(1)
go func() {
defer wg.Done()
for running.Load() {
select {
case msg, ok := <-claim.Messages():
if !ok {
running.Store(false)
return
}
if len(msg.Value) == 0 { if len(msg.Value) == 0 {
continue continue
} }
@ -482,7 +507,14 @@ func (och *OnlineHistoryRedisConsumerHandler) ConsumeClaim(
rwLock.Unlock() rwLock.Unlock()
sess.MarkMessage(msg, "") sess.MarkMessage(msg, "")
case <-sess.Context().Done():
running.Store(false)
return
} }
}
}()
wg.Wait()
return nil return nil
} }

@ -16,15 +16,19 @@ package kafka
import ( import (
"context" "context"
"errors"
"github.com/IBM/sarama"
"github.com/OpenIMSDK/tools/log" "github.com/OpenIMSDK/tools/log"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/IBM/sarama"
) )
type MConsumerGroup struct { type MConsumerGroup struct {
ctx context.Context
cancel context.CancelFunc
sarama.ConsumerGroup sarama.ConsumerGroup
groupID string groupID string
topics []string topics []string
@ -51,7 +55,10 @@ func NewMConsumerGroup(consumerConfig *MConsumerGroupConfig, topics, addrs []str
if err != nil { if err != nil {
panic(err.Error()) panic(err.Error())
} }
ctx, cancel := context.WithCancel(context.Background())
return &MConsumerGroup{ return &MConsumerGroup{
ctx, cancel,
consumerGroup, consumerGroup,
groupID, groupID,
topics, topics,
@ -64,11 +71,23 @@ func (mc *MConsumerGroup) GetContextFromMsg(cMsg *sarama.ConsumerMessage) contex
func (mc *MConsumerGroup) RegisterHandleAndConsumer(handler sarama.ConsumerGroupHandler) { func (mc *MConsumerGroup) RegisterHandleAndConsumer(handler sarama.ConsumerGroupHandler) {
log.ZDebug(context.Background(), "register consumer group", "groupID", mc.groupID) log.ZDebug(context.Background(), "register consumer group", "groupID", mc.groupID)
ctx := context.Background()
for { for {
err := mc.ConsumerGroup.Consume(ctx, mc.topics, handler) err := mc.ConsumerGroup.Consume(mc.ctx, mc.topics, handler)
if errors.Is(err, sarama.ErrClosedConsumerGroup) {
return
}
if mc.ctx.Err() != nil {
return
}
if err != nil { if err != nil {
panic(err.Error()) log.ZError(context.Background(), "kafka consume error", err)
return
} }
} }
} }
func (mc *MConsumerGroup) Close() {
mc.cancel()
mc.ConsumerGroup.Close()
}

Loading…
Cancel
Save