fix: graceful exit for kafka consumer of msgtransfer (#1483)

* fix: graceful exit for kafka consumer of msgtransfer

Signed-off-by: rfyiamcool <rfyiamcool@163.com>

* Update init.go

* Update init.go

---------

Signed-off-by: rfyiamcool <rfyiamcool@163.com>
Co-authored-by: OpenIM-Gordon <46924906+FGadvancer@users.noreply.github.com>
pull/1887/head
fengyun.rui 10 months ago committed by GitHub
parent 0865eb65b1
commit 31381935f1
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -18,7 +18,10 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"log" "os"
"os/signal"
"syscall"
"time"
"net/http" "net/http"
"sync" "sync"
@ -30,7 +33,7 @@ import (
"github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/client_golang/prometheus/promhttp"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/credentials/insecure"
"github.com/OpenIMSDK/tools/log"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/db/cache" "github.com/openimsdk/open-im-server/v3/pkg/common/db/cache"
"github.com/openimsdk/open-im-server/v3/pkg/common/db/controller" "github.com/openimsdk/open-im-server/v3/pkg/common/db/controller"
@ -51,11 +54,13 @@ func StartTransfer(prometheusPort int) error {
if err != nil { if err != nil {
return err return err
} }
mongo, err := unrelation.NewMongo() mongo, err := unrelation.NewMongo()
if err != nil { if err != nil {
return err return err
} }
if err := mongo.CreateMsgIndex(); err != nil {
if err = mongo.CreateMsgIndex(); err != nil {
return err return err
} }
client, err := kdisc.NewDiscoveryRegister(config.Config.Envs.Discovery) client, err := kdisc.NewDiscoveryRegister(config.Config.Envs.Discovery)
@ -66,6 +71,7 @@ func StartTransfer(prometheusPort int) error {
if err != nil { if err != nil {
return err return err
} }
if err := client.CreateRpcRootNodes(config.Config.GetServiceNames()); err != nil { if err := client.CreateRpcRootNodes(config.Config.GetServiceNames()); err != nil {
return err return err
} }
@ -103,26 +109,62 @@ func NewMsgTransfer(msgDatabase controller.CommonMsgDatabase, conversationRpcCli
func (m *MsgTransfer) Start(prometheusPort int) error { func (m *MsgTransfer) Start(prometheusPort int) error {
ctx := context.Background() ctx := context.Background()
var wg sync.WaitGroup
wg.Add(1)
fmt.Println("start msg transfer", "prometheusPort:", prometheusPort) fmt.Println("start msg transfer", "prometheusPort:", prometheusPort)
if prometheusPort <= 0 { if prometheusPort <= 0 {
return errs.Wrap(errors.New("prometheusPort not correct")) return errs.Wrap(errors.New("prometheusPort not correct"))
} }
go m.historyCH.historyConsumerGroup.RegisterHandleAndConsumer(ctx, m.historyCH) var wg sync.WaitGroup
go m.historyMongoCH.historyConsumerGroup.RegisterHandleAndConsumer(ctx, m.historyMongoCH)
wg.Add(1)
go func() {
defer wg.Done()
m.historyCH.historyConsumerGroup.RegisterHandleAndConsumer(ctx, m.historyCH)
}()
wg.Add(1)
go func() {
defer wg.Done()
m.historyMongoCH.historyConsumerGroup.RegisterHandleAndConsumer(ctx, m.historyMongoCH)
}()
if config.Config.Prometheus.Enable { if config.Config.Prometheus.Enable {
reg := prometheus.NewRegistry() go func() {
reg.MustRegister( proreg := prometheus.NewRegistry()
proreg.MustRegister(
collectors.NewGoCollector(), collectors.NewGoCollector(),
) )
reg.MustRegister(prommetrics.GetGrpcCusMetrics("Transfer")...) proreg.MustRegister(prommetrics.GetGrpcCusMetrics("Transfer")...)
http.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg})) http.Handle("/metrics", promhttp.HandlerFor(proreg, promhttp.HandlerOpts{Registry: proreg}))
log.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", prometheusPort), nil)) err := http.ListenAndServe(fmt.Sprintf(":%d", prometheusPort), nil)
if err != nil && err != http.ErrServerClosed {
panic(err)
} }
//////////////////////////////////////// }()
}
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)
<-sigs
// graceful close kafka client.
go m.historyCH.historyConsumerGroup.Close()
go m.historyMongoCH.historyConsumerGroup.Close()
done := make(chan struct{}, 1)
go func() {
wg.Wait() wg.Wait()
close(done)
}()
select {
case <-done:
log.ZInfo(context.Background(), "msgtrasfer exit successfully")
case <-time.After(15 * time.Second):
log.ZError(context.Background(), "msgtransfer force to exit, timeout 15s", nil)
}
return nil return nil
} }

@ -19,6 +19,7 @@ import (
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
"sync/atomic"
"time" "time"
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor" "github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
@ -431,16 +432,29 @@ func (och *OnlineHistoryRedisConsumerHandler) ConsumeClaim(
log.ZDebug(context.Background(), "online new session msg come", "highWaterMarkOffset", log.ZDebug(context.Background(), "online new session msg come", "highWaterMarkOffset",
claim.HighWaterMarkOffset(), "topic", claim.Topic(), "partition", claim.Partition()) claim.HighWaterMarkOffset(), "topic", claim.Topic(), "partition", claim.Partition())
split := 1000 var (
rwLock := new(sync.RWMutex) split = 1000
messages := make([]*sarama.ConsumerMessage, 0, 1000) rwLock = new(sync.RWMutex)
ticker := time.NewTicker(time.Millisecond * 100) messages = make([]*sarama.ConsumerMessage, 0, 1000)
ticker = time.NewTicker(time.Millisecond * 100)
wg = sync.WaitGroup{}
running = new(atomic.Bool)
)
wg.Add(1)
go func() { go func() {
defer wg.Done()
for { for {
select { select {
case <-ticker.C: case <-ticker.C:
// if the buffer is empty and running is false, return loop.
if len(messages) == 0 { if len(messages) == 0 {
if !running.Load() {
return
}
continue continue
} }
@ -473,7 +487,18 @@ func (och *OnlineHistoryRedisConsumerHandler) ConsumeClaim(
} }
}() }()
for msg := range claim.Messages() { wg.Add(1)
go func() {
defer wg.Done()
for running.Load() {
select {
case msg, ok := <-claim.Messages():
if !ok {
running.Store(false)
return
}
if len(msg.Value) == 0 { if len(msg.Value) == 0 {
continue continue
} }
@ -483,7 +508,14 @@ func (och *OnlineHistoryRedisConsumerHandler) ConsumeClaim(
rwLock.Unlock() rwLock.Unlock()
sess.MarkMessage(msg, "") sess.MarkMessage(msg, "")
case <-sess.Context().Done():
running.Store(false)
return
} }
}
}()
wg.Wait()
return nil return nil
} }

@ -16,18 +16,18 @@ package kafka
import ( import (
"context" "context"
"errors"
"github.com/IBM/sarama"
"strings" "strings"
"github.com/OpenIMSDK/tools/errs" "github.com/OpenIMSDK/tools/errs"
"github.com/OpenIMSDK/tools/log" "github.com/OpenIMSDK/tools/log"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/IBM/sarama"
) )
type MConsumerGroup struct { type MConsumerGroup struct {
ctx context.Context
cancel context.CancelFunc
sarama.ConsumerGroup sarama.ConsumerGroup
groupID string groupID string
topics []string topics []string
@ -54,7 +54,10 @@ func NewMConsumerGroup(consumerConfig *MConsumerGroupConfig, topics, addrs []str
if err != nil { if err != nil {
return nil, errs.Wrap(err, strings.Join(topics, ","), strings.Join(addrs, ","), groupID, config.Config.Kafka.Username, config.Config.Kafka.Password) return nil, errs.Wrap(err, strings.Join(topics, ","), strings.Join(addrs, ","), groupID, config.Config.Kafka.Username, config.Config.Kafka.Password)
} }
ctx, cancel := context.WithCancel(context.Background())
return &MConsumerGroup{ return &MConsumerGroup{
ctx, cancel,
consumerGroup, consumerGroup,
groupID, groupID,
topics, topics,
@ -68,7 +71,14 @@ func (mc *MConsumerGroup) GetContextFromMsg(cMsg *sarama.ConsumerMessage) contex
func (mc *MConsumerGroup) RegisterHandleAndConsumer(ctx context.Context, handler sarama.ConsumerGroupHandler) { func (mc *MConsumerGroup) RegisterHandleAndConsumer(ctx context.Context, handler sarama.ConsumerGroupHandler) {
log.ZDebug(context.Background(), "register consumer group", "groupID", mc.groupID) log.ZDebug(context.Background(), "register consumer group", "groupID", mc.groupID)
for { for {
err := mc.ConsumerGroup.Consume(ctx, mc.topics, handler) err := mc.ConsumerGroup.Consume(mc.ctx, mc.topics, handler)
if errors.Is(err, sarama.ErrClosedConsumerGroup) {
return
}
if mc.ctx.Err() != nil {
return
}
if err != nil { if err != nil {
log.ZWarn(ctx, "consume err", err, "topic", mc.topics, "groupID", mc.groupID) log.ZWarn(ctx, "consume err", err, "topic", mc.topics, "groupID", mc.groupID)
} }
@ -77,3 +87,8 @@ func (mc *MConsumerGroup) RegisterHandleAndConsumer(ctx context.Context, handler
} }
} }
} }
func (mc *MConsumerGroup) Close() {
mc.cancel()
mc.ConsumerGroup.Close()
}

Loading…
Cancel
Save