fix: process add errors wrap. (#1862)

* fix: process add errors wrap.

* fix: process add errors wrap.
pull/1869/head
OpenIM-Gordon 5 months ago committed by GitHub
parent 55ca661d13
commit c55e03dc70
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -17,6 +17,7 @@ package main
import (
"context"
"fmt"
"github.com/OpenIMSDK/tools/errs"
"net"
"net/http"
_ "net/http/pprof"
@ -28,8 +29,6 @@ import (
"github.com/OpenIMSDK/protocol/constant"
"github.com/OpenIMSDK/tools/discoveryregistry"
"github.com/OpenIMSDK/tools/log"
"github.com/openimsdk/open-im-server/v3/internal/api"
"github.com/openimsdk/open-im-server/v3/pkg/common/cmd"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
@ -44,55 +43,42 @@ func main() {
apiCmd.AddPortFlag()
apiCmd.AddApi(run)
if err := apiCmd.Execute(); err != nil {
log.ZError(context.Background(), "API command execution failed", err)
panic(err.Error())
}
}
func run(port int, proPort int) error {
log.ZInfo(context.Background(), "Openim api port:", "port", port, "proPort", proPort)
if port == 0 || proPort == 0 {
err := "port or proPort is empty:" + strconv.Itoa(port) + "," + strconv.Itoa(proPort)
log.ZError(context.Background(), err, nil)
return fmt.Errorf(err)
return errs.Wrap(fmt.Errorf(err))
}
rdb, err := cache.NewRedis()
if err != nil {
log.ZError(context.Background(), "Failed to initialize Redis", err)
return err
}
log.ZInfo(context.Background(), "api start init discov client")
var client discoveryregistry.SvcDiscoveryRegistry
// Determine whether zk is passed according to whether it is a clustered deployment
client, err = kdisc.NewDiscoveryRegister(config.Config.Envs.Discovery)
if err != nil {
log.ZError(context.Background(), "Failed to initialize discovery register", err)
return err
return errs.Wrap(err, "register discovery err")
}
if err = client.CreateRpcRootNodes(config.Config.GetServiceNames()); err != nil {
log.ZError(context.Background(), "Failed to create RPC root nodes", err)
return err
return errs.Wrap(err, "create rpc root nodes error")
}
log.ZInfo(context.Background(), "api register public config to discov")
if err = client.RegisterConf2Registry(constant.OpenIMCommonConfigKey, config.Config.EncodeConfig()); err != nil {
log.ZError(context.Background(), "Failed to register public config to discov", err)
return err
}
log.ZInfo(context.Background(), "api register public config to discov success")
router := api.NewGinRouter(client, rdb)
if config.Config.Prometheus.Enable {
p := ginprom.NewPrometheus("app", prommetrics.GetGinCusMetrics("Api"))
p.SetListenAddress(fmt.Sprintf(":%d", proPort))
p.Use(router)
}
log.ZInfo(context.Background(), "api init router success")
var address string
if config.Config.Api.ListenIP != "" {
@ -100,13 +86,11 @@ func run(port int, proPort int) error {
} else {
address = net.JoinHostPort("0.0.0.0", strconv.Itoa(port))
}
log.ZInfo(context.Background(), "start api server", "address", address, "OpenIM version", config.Version)
server := http.Server{Addr: address, Handler: router}
go func() {
err = server.ListenAndServe()
if err != nil && err != http.ErrServerClosed {
log.ZError(context.Background(), "api run failed", err, "address", address)
os.Exit(1)
}
}()
@ -120,7 +104,6 @@ func run(port int, proPort int) error {
// graceful shutdown operation.
if err := server.Shutdown(ctx); err != nil {
log.ZError(context.Background(), "failed to api-server shutdown", err)
return err
}

@ -15,8 +15,10 @@
package msgtransfer
import (
"context"
"errors"
"fmt"
"github.com/OpenIMSDK/tools/errs"
"log"
"net/http"
"sync"
@ -69,40 +71,47 @@ func StartTransfer(prometheusPort int) error {
client.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"LoadBalancingPolicy": "%s"}`, "round_robin")))
msgModel := cache.NewMsgCacheModel(rdb)
msgDocModel := unrelation.NewMsgMongoDriver(mongo.GetDatabase())
msgDatabase := controller.NewCommonMsgDatabase(msgDocModel, msgModel)
msgDatabase, err := controller.NewCommonMsgDatabase(msgDocModel, msgModel)
if err != nil {
return err
}
conversationRpcClient := rpcclient.NewConversationRpcClient(client)
groupRpcClient := rpcclient.NewGroupRpcClient(client)
msgTransfer := NewMsgTransfer(msgDatabase, &conversationRpcClient, &groupRpcClient)
msgTransfer, err := NewMsgTransfer(msgDatabase, &conversationRpcClient, &groupRpcClient)
if err != nil {
return err
}
return msgTransfer.Start(prometheusPort)
}
func NewMsgTransfer(msgDatabase controller.CommonMsgDatabase, conversationRpcClient *rpcclient.ConversationRpcClient, groupRpcClient *rpcclient.GroupRpcClient) *MsgTransfer {
return &MsgTransfer{
historyCH: NewOnlineHistoryRedisConsumerHandler(msgDatabase, conversationRpcClient, groupRpcClient),
historyMongoCH: NewOnlineHistoryMongoConsumerHandler(msgDatabase),
func NewMsgTransfer(msgDatabase controller.CommonMsgDatabase, conversationRpcClient *rpcclient.ConversationRpcClient, groupRpcClient *rpcclient.GroupRpcClient) (*MsgTransfer, error) {
historyCH, err := NewOnlineHistoryRedisConsumerHandler(msgDatabase, conversationRpcClient, groupRpcClient)
if err != nil {
return nil, err
}
historyMongoCH, err := NewOnlineHistoryMongoConsumerHandler(msgDatabase)
if err != nil {
return nil, err
}
return &MsgTransfer{
historyCH: historyCH,
historyMongoCH: historyMongoCH,
}, nil
}
func (m *MsgTransfer) Start(prometheusPort int) error {
ctx := context.Background()
var wg sync.WaitGroup
wg.Add(1)
fmt.Println("start msg transfer", "prometheusPort:", prometheusPort)
if prometheusPort <= 0 {
return errors.New("prometheusPort not correct")
}
if config.Config.ChatPersistenceMysql {
// go m.persistentCH.persistentConsumerGroup.RegisterHandleAndConsumer(m.persistentCH)
} else {
fmt.Println("msg transfer not start mysql consumer")
return errs.Wrap(errors.New("prometheusPort not correct"))
}
go m.historyCH.historyConsumerGroup.RegisterHandleAndConsumer(m.historyCH)
go m.historyMongoCH.historyConsumerGroup.RegisterHandleAndConsumer(m.historyMongoCH)
// go m.modifyCH.modifyMsgConsumerGroup.RegisterHandleAndConsumer(m.modifyCH)
/*err := prome.StartPrometheusSrv(prometheusPort)
if err != nil {
return err
}*/
////////////////////////////
go m.historyCH.historyConsumerGroup.RegisterHandleAndConsumer(ctx, m.historyCH)
go m.historyMongoCH.historyConsumerGroup.RegisterHandleAndConsumer(ctx, m.historyMongoCH)
if config.Config.Prometheus.Enable {
reg := prometheus.NewRegistry()
reg.MustRegister(

@ -87,7 +87,7 @@ func NewOnlineHistoryRedisConsumerHandler(
database controller.CommonMsgDatabase,
conversationRpcClient *rpcclient.ConversationRpcClient,
groupRpcClient *rpcclient.GroupRpcClient,
) *OnlineHistoryRedisConsumerHandler {
) (*OnlineHistoryRedisConsumerHandler, error) {
var och OnlineHistoryRedisConsumerHandler
och.msgDatabase = database
och.msgDistributionCh = make(chan Cmd2Value) // no buffer channel
@ -98,14 +98,15 @@ func NewOnlineHistoryRedisConsumerHandler(
}
och.conversationRpcClient = conversationRpcClient
och.groupRpcClient = groupRpcClient
och.historyConsumerGroup = kafka.NewMConsumerGroup(&kafka.MConsumerGroupConfig{
var err error
och.historyConsumerGroup, err = kafka.NewMConsumerGroup(&kafka.MConsumerGroupConfig{
KafkaVersion: sarama.V2_0_0_0,
OffsetsInitial: sarama.OffsetNewest, IsReturnErr: false,
}, []string{config.Config.Kafka.LatestMsgToRedis.Topic},
config.Config.Kafka.Addr, config.Config.Kafka.ConsumerGroupID.MsgToRedis)
// statistics.NewStatistics(&och.singleMsgSuccessCount, config.Config.ModuleName.MsgTransferName, fmt.Sprintf("%d
// second singleMsgCount insert to mongo", constant.StatisticsTimeInterval), constant.StatisticsTimeInterval)
return &och
return &och, err
}
func (och *OnlineHistoryRedisConsumerHandler) Run(channelID int) {

@ -34,16 +34,21 @@ type OnlineHistoryMongoConsumerHandler struct {
msgDatabase controller.CommonMsgDatabase
}
func NewOnlineHistoryMongoConsumerHandler(database controller.CommonMsgDatabase) *OnlineHistoryMongoConsumerHandler {
func NewOnlineHistoryMongoConsumerHandler(database controller.CommonMsgDatabase) (*OnlineHistoryMongoConsumerHandler, error) {
historyConsumerGroup, err := kfk.NewMConsumerGroup(&kfk.MConsumerGroupConfig{
KafkaVersion: sarama.V2_0_0_0,
OffsetsInitial: sarama.OffsetNewest, IsReturnErr: false,
}, []string{config.Config.Kafka.MsgToMongo.Topic},
config.Config.Kafka.Addr, config.Config.Kafka.ConsumerGroupID.MsgToMongo)
if err != nil {
return nil, err
}
mc := &OnlineHistoryMongoConsumerHandler{
historyConsumerGroup: kfk.NewMConsumerGroup(&kfk.MConsumerGroupConfig{
KafkaVersion: sarama.V2_0_0_0,
OffsetsInitial: sarama.OffsetNewest, IsReturnErr: false,
}, []string{config.Config.Kafka.MsgToMongo.Topic},
config.Config.Kafka.Addr, config.Config.Kafka.ConsumerGroupID.MsgToMongo),
msgDatabase: database,
historyConsumerGroup: historyConsumerGroup,
msgDatabase: database,
}
return mc
return mc, nil
}
func (mc *OnlineHistoryMongoConsumerHandler) handleChatWs2Mongo(

@ -14,19 +14,24 @@
package push
import "context"
type Consumer struct {
pushCh ConsumerHandler
successCount uint64
}
func NewConsumer(pusher *Pusher) *Consumer {
return &Consumer{
pushCh: *NewConsumerHandler(pusher),
func NewConsumer(pusher *Pusher) (*Consumer, error) {
c, err := NewConsumerHandler(pusher)
if err != nil {
return nil, err
}
return &Consumer{
pushCh: *c,
}, nil
}
func (c *Consumer) Start() {
// statistics.NewStatistics(&c.successCount, config.Config.ModuleName.PushName, fmt.Sprintf("%d second push to
// msg_gateway count", constant.StatisticsTimeInterval), constant.StatisticsTimeInterval)
go c.pushCh.pushConsumerGroup.RegisterHandleAndConsumer(&c.pushCh)
go c.pushCh.pushConsumerGroup.RegisterHandleAndConsumer(context.Background(), &c.pushCh)
}

@ -35,15 +35,19 @@ type ConsumerHandler struct {
pusher *Pusher
}
func NewConsumerHandler(pusher *Pusher) *ConsumerHandler {
func NewConsumerHandler(pusher *Pusher) (*ConsumerHandler, error) {
var consumerHandler ConsumerHandler
consumerHandler.pusher = pusher
consumerHandler.pushConsumerGroup = kfk.NewMConsumerGroup(&kfk.MConsumerGroupConfig{
var err error
consumerHandler.pushConsumerGroup, err = kfk.NewMConsumerGroup(&kfk.MConsumerGroupConfig{
KafkaVersion: sarama.V2_0_0_0,
OffsetsInitial: sarama.OffsetNewest, IsReturnErr: false,
}, []string{config.Config.Kafka.MsgToPush.Topic}, config.Config.Kafka.Addr,
config.Config.Kafka.ConsumerGroupID.MsgToPush)
return &consumerHandler
if err != nil {
return nil, err
}
return &consumerHandler, nil
}
func (c *ConsumerHandler) handleMs2PsChat(ctx context.Context, msg []byte) {

@ -66,9 +66,12 @@ func Start(client discoveryregistry.SvcDiscoveryRegistry, server *grpc.Server) e
pusher: pusher,
})
}()
consumer, err := NewConsumer(pusher)
if err != nil {
return err
}
go func() {
defer wg.Done()
consumer := NewConsumer(pusher)
consumer.Start()
}()
wg.Wait()

@ -80,7 +80,10 @@ func Start(client discoveryregistry.SvcDiscoveryRegistry, server *grpc.Server) e
userRpcClient := rpcclient.NewUserRpcClient(client)
groupRpcClient := rpcclient.NewGroupRpcClient(client)
friendRpcClient := rpcclient.NewFriendRpcClient(client)
msgDatabase := controller.NewCommonMsgDatabase(msgDocModel, cacheModel)
msgDatabase, err := controller.NewCommonMsgDatabase(msgDocModel, cacheModel)
if err != nil {
return err
}
s := &msgServer{
Conversation: &conversationClient,
User: &userRpcClient,

@ -17,6 +17,7 @@ package tools
import (
"context"
"fmt"
"github.com/OpenIMSDK/tools/errs"
"os"
"os/signal"
"syscall"
@ -25,14 +26,13 @@ import (
"github.com/redis/go-redis/v9"
"github.com/robfig/cron/v3"
"github.com/OpenIMSDK/tools/log"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/db/cache"
)
func StartTask() error {
fmt.Println("cron task start, config", config.Config.ChatRecordsClearTime)
msgTool, err := InitMsgTool()
if err != nil {
return err
@ -47,18 +47,16 @@ func StartTask() error {
// register cron tasks
var crontab = cron.New()
log.ZInfo(context.Background(), "start chatRecordsClearTime cron task", "cron config", config.Config.ChatRecordsClearTime)
fmt.Println("start chatRecordsClearTime cron task", "cron config", config.Config.ChatRecordsClearTime)
_, err = crontab.AddFunc(config.Config.ChatRecordsClearTime, cronWrapFunc(rdb, "cron_clear_msg_and_fix_seq", msgTool.AllConversationClearMsgAndFixSeq))
if err != nil {
log.ZError(context.Background(), "start allConversationClearMsgAndFixSeq cron failed", err)
panic(err)
return errs.Wrap(err)
}
log.ZInfo(context.Background(), "start msgDestruct cron task", "cron config", config.Config.MsgDestructTime)
fmt.Println("start msgDestruct cron task", "cron config", config.Config.MsgDestructTime)
_, err = crontab.AddFunc(config.Config.MsgDestructTime, cronWrapFunc(rdb, "cron_conversations_destruct_msgs", msgTool.ConversationsDestructMsgs))
if err != nil {
log.ZError(context.Background(), "start conversationsDestructMsgs cron failed", err)
panic(err)
return errs.Wrap(err)
}
// start crontab

@ -84,7 +84,10 @@ func InitMsgTool() (*MsgTool, error) {
if err != nil {
return nil, err
}
msgDatabase := controller.InitCommonMsgDatabase(rdb, mongo.GetDatabase())
msgDatabase, err := controller.InitCommonMsgDatabase(rdb, mongo.GetDatabase())
if err != nil {
return nil, err
}
userMongoDB := unrelation.NewUserMongoDriver(mongo.GetDatabase())
ctxTx := tx.NewMongo(mongo.GetClient())
userDatabase := controller.NewUserDatabase(

@ -49,7 +49,7 @@ func NewRedis() (redis.UniversalClient, error) {
overrideConfigFromEnv()
if len(config.Config.Redis.Address) == 0 {
return nil, errors.New("redis address is empty")
return nil, errs.Wrap(errors.New("redis address is empty"))
}
specialerror.AddReplace(redis.Nil, errs.ErrRecordNotFound)
var rdb redis.UniversalClient
@ -77,7 +77,7 @@ func NewRedis() (redis.UniversalClient, error) {
defer cancel()
err = rdb.Ping(ctx).Err()
if err != nil {
return nil, fmt.Errorf("redis ping %w", err)
return nil, errs.Wrap(fmt.Errorf("redis ping %w", err))
}
redisClient = rdb

@ -126,21 +126,32 @@ type CommonMsgDatabase interface {
ConvertMsgsDocLen(ctx context.Context, conversationIDs []string)
}
func NewCommonMsgDatabase(msgDocModel unrelationtb.MsgDocModelInterface, cacheModel cache.MsgModel) CommonMsgDatabase {
func NewCommonMsgDatabase(msgDocModel unrelationtb.MsgDocModelInterface, cacheModel cache.MsgModel) (CommonMsgDatabase, error) {
producerToRedis, err := kafka.NewKafkaProducer(config.Config.Kafka.Addr, config.Config.Kafka.LatestMsgToRedis.Topic)
if err != nil {
return nil, err
}
producerToMongo, err := kafka.NewKafkaProducer(config.Config.Kafka.Addr, config.Config.Kafka.MsgToMongo.Topic)
if err != nil {
return nil, err
}
producerToPush, err := kafka.NewKafkaProducer(config.Config.Kafka.Addr, config.Config.Kafka.MsgToPush.Topic)
if err != nil {
return nil, err
}
return &commonMsgDatabase{
msgDocDatabase: msgDocModel,
cache: cacheModel,
producer: kafka.NewKafkaProducer(config.Config.Kafka.Addr, config.Config.Kafka.LatestMsgToRedis.Topic),
producerToMongo: kafka.NewKafkaProducer(config.Config.Kafka.Addr, config.Config.Kafka.MsgToMongo.Topic),
producerToPush: kafka.NewKafkaProducer(config.Config.Kafka.Addr, config.Config.Kafka.MsgToPush.Topic),
}
producer: producerToRedis,
producerToMongo: producerToMongo,
producerToPush: producerToPush,
}, nil
}
func InitCommonMsgDatabase(rdb redis.UniversalClient, database *mongo.Database) CommonMsgDatabase {
func InitCommonMsgDatabase(rdb redis.UniversalClient, database *mongo.Database) (CommonMsgDatabase, error) {
cacheModel := cache.NewMsgCacheModel(rdb)
msgDocModel := unrelation.NewMsgMongoDriver(database)
CommonMsgDatabase := NewCommonMsgDatabase(msgDocModel, cacheModel)
return CommonMsgDatabase
return NewCommonMsgDatabase(msgDocModel, cacheModel)
}
type commonMsgDatabase struct {

@ -16,6 +16,7 @@ package mgo
import (
"context"
"github.com/OpenIMSDK/tools/errs"
"time"
"github.com/OpenIMSDK/protocol/constant"
@ -38,7 +39,7 @@ func NewConversationMongo(db *mongo.Database) (*ConversationMgo, error) {
Options: options.Index().SetUnique(true),
})
if err != nil {
return nil, err
return nil, errs.Wrap(err)
}
return &ConversationMgo{coll: coll}, nil
}

@ -16,6 +16,7 @@ package mgo
import (
"context"
"github.com/OpenIMSDK/tools/errs"
"time"
"github.com/OpenIMSDK/tools/mgoutil"
@ -36,7 +37,7 @@ func NewGroupMongo(db *mongo.Database) (relation.GroupModelInterface, error) {
Options: options.Index().SetUnique(true),
})
if err != nil {
return nil, err
return nil, errs.Wrap(err)
}
return &GroupMgo{coll: coll}, nil
}

@ -16,6 +16,7 @@ package mgo
import (
"context"
"github.com/OpenIMSDK/tools/errs"
"github.com/OpenIMSDK/protocol/constant"
"github.com/OpenIMSDK/tools/mgoutil"
@ -37,7 +38,7 @@ func NewGroupMember(db *mongo.Database) (relation.GroupMemberModelInterface, err
Options: options.Index().SetUnique(true),
})
if err != nil {
return nil, err
return nil, errs.Wrap(err)
}
return &GroupMemberMgo{coll: coll}, nil
}

@ -16,6 +16,7 @@ package mgo
import (
"context"
"github.com/OpenIMSDK/tools/errs"
"github.com/OpenIMSDK/tools/mgoutil"
"github.com/OpenIMSDK/tools/pagination"
@ -36,7 +37,7 @@ func NewGroupRequestMgo(db *mongo.Database) (relation.GroupRequestModelInterface
Options: options.Index().SetUnique(true),
})
if err != nil {
return nil, err
return nil, errs.Wrap(err)
}
return &GroupRequestMgo{coll: coll}, nil
}

@ -40,7 +40,7 @@ func NewUserMongo(db *mongo.Database) (relation.UserModelInterface, error) {
Options: options.Index().SetUnique(true),
})
if err != nil {
return nil, err
return nil, errs.Wrap(err)
}
return &UserMgo{coll: coll}, nil
}

@ -27,8 +27,6 @@ import (
"github.com/OpenIMSDK/tools/errs"
"github.com/OpenIMSDK/tools/mw/specialerror"
"github.com/OpenIMSDK/tools/utils"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/db/table/unrelation"
)
@ -63,9 +61,9 @@ func NewMongo() (*Mongo, error) {
time.Sleep(time.Second) // exponential backoff could be implemented here
continue
}
return nil, err
return nil, errs.Wrap(err)
}
return nil, err
return nil, errs.Wrap(err)
}
func buildMongoURI() string {
@ -150,7 +148,7 @@ func (m *Mongo) createMongoIndex(collection string, isUnique bool, keys ...strin
_, err := indexView.CreateOne(context.Background(), index, opts)
if err != nil {
return utils.Wrap(err, "CreateIndex")
return errs.Wrap(err, "CreateIndex")
}
return nil
}

@ -16,6 +16,8 @@ package kafka
import (
"context"
"github.com/OpenIMSDK/tools/errs"
"strings"
"github.com/OpenIMSDK/tools/log"
@ -36,7 +38,7 @@ type MConsumerGroupConfig struct {
IsReturnErr bool
}
func NewMConsumerGroup(consumerConfig *MConsumerGroupConfig, topics, addrs []string, groupID string) *MConsumerGroup {
func NewMConsumerGroup(consumerConfig *MConsumerGroupConfig, topics, addrs []string, groupID string) (*MConsumerGroup, error) {
consumerGroupConfig := sarama.NewConfig()
consumerGroupConfig.Version = consumerConfig.KafkaVersion
consumerGroupConfig.Consumer.Offsets.Initial = consumerConfig.OffsetsInitial
@ -49,26 +51,28 @@ func NewMConsumerGroup(consumerConfig *MConsumerGroupConfig, topics, addrs []str
SetupTLSConfig(consumerGroupConfig)
consumerGroup, err := sarama.NewConsumerGroup(addrs, groupID, consumerGroupConfig)
if err != nil {
panic(err.Error())
return nil, errs.Wrap(err, strings.Join(topics, ","), strings.Join(addrs, ","), groupID)
}
return &MConsumerGroup{
consumerGroup,
groupID,
topics,
}
}, nil
}
func (mc *MConsumerGroup) GetContextFromMsg(cMsg *sarama.ConsumerMessage) context.Context {
return GetContextWithMQHeader(cMsg.Headers)
}
func (mc *MConsumerGroup) RegisterHandleAndConsumer(handler sarama.ConsumerGroupHandler) {
func (mc *MConsumerGroup) RegisterHandleAndConsumer(ctx context.Context, handler sarama.ConsumerGroupHandler) {
log.ZDebug(context.Background(), "register consumer group", "groupID", mc.groupID)
ctx := context.Background()
for {
err := mc.ConsumerGroup.Consume(ctx, mc.topics, handler)
if err != nil {
panic(err.Error())
log.ZWarn(ctx, "consume err", err, "topic", mc.topics, "groupID", mc.groupID)
}
if ctx.Err() != nil {
return
}
}
}

@ -18,6 +18,7 @@ import (
"bytes"
"context"
"errors"
"github.com/OpenIMSDK/tools/errs"
"strings"
"time"
@ -44,7 +45,7 @@ type Producer struct {
}
// NewKafkaProducer initializes a new Kafka producer.
func NewKafkaProducer(addr []string, topic string) *Producer {
func NewKafkaProducer(addr []string, topic string) (*Producer, error) {
p := Producer{
addr: addr,
topic: topic,
@ -87,17 +88,17 @@ func NewKafkaProducer(addr []string, topic string) *Producer {
for i := 0; i <= maxRetry; i++ {
p.producer, err = sarama.NewSyncProducer(p.addr, p.config)
if err == nil {
return &p
return &p, nil
}
time.Sleep(1 * time.Second) // Wait before retrying
}
// Panic if unable to create producer after retries
if err != nil {
panic("Failed to create Kafka producer: " + err.Error())
return nil, errs.Wrap(errors.New("failed to create Kafka producer: " + err.Error()))
}
return &p
return &p, nil
}
// configureProducerAck configures the producer's acknowledgement level.

@ -17,6 +17,7 @@ package startrpc
import (
"errors"
"fmt"
"github.com/OpenIMSDK/tools/errs"
"log"
"net"
"net/http"
@ -43,7 +44,6 @@ import (
"github.com/OpenIMSDK/tools/discoveryregistry"
"github.com/OpenIMSDK/tools/mw"
"github.com/OpenIMSDK/tools/network"
"github.com/OpenIMSDK/tools/utils"
)
// Start rpc server.
@ -61,20 +61,20 @@ func Start(
net.JoinHostPort(network.GetListenIP(config.Config.Rpc.ListenIP), strconv.Itoa(rpcPort)),
)
if err != nil {
return err
return errs.Wrap(err, network.GetListenIP(config.Config.Rpc.ListenIP), strconv.Itoa(rpcPort))
}
defer listener.Close()
client, err := kdisc.NewDiscoveryRegister(config.Config.Envs.Discovery)
if err != nil {
return utils.Wrap1(err)
return errs.Wrap(err)
}
defer client.Close()
client.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"LoadBalancingPolicy": "%s"}`, "round_robin")))
registerIP, err := network.GetRpcRegisterIP(config.Config.Rpc.RegisterIP)
if err != nil {
return err
return errs.Wrap(err)
}
var reg *prometheus.Registry
@ -96,7 +96,7 @@ func Start(
err = rpcFn(client, srv)
if err != nil {
return utils.Wrap1(err)
return errs.Wrap(err)
}
err = client.Register(
rpcRegisterName,
@ -105,7 +105,7 @@ func Start(
grpc.WithTransportCredentials(insecure.NewCredentials()),
)
if err != nil {
return utils.Wrap1(err)
return errs.Wrap(err)
}
var wg errgroup.Group
@ -123,7 +123,7 @@ func Start(
})
wg.Go(func() error {
return utils.Wrap1(srv.Serve(listener))
return errs.Wrap(srv.Serve(listener))
})
sigs := make(chan os.Signal, 1)
@ -146,7 +146,7 @@ func Start(
return gerr
case <-time.After(15 * time.Second):
return utils.Wrap1(errors.New("timeout exit"))
return errs.Wrap(errors.New("timeout exit"))
}
}

Loading…
Cancel
Save