update loadConfig and discovery logic in kubernetes.

pull/2920/head
Monet Lee 10 months ago
parent c9e13e645e
commit 0d8f6ddb22

@ -5,6 +5,9 @@ etcd:
username: '' username: ''
password: '' password: ''
kubenetes:
namespace: default
rpcService: rpcService:
user: user-rpc-service user: user-rpc-service
friend: friend-rpc-service friend: friend-rpc-service
@ -15,5 +18,3 @@ rpcService:
auth: auth-rpc-service auth: auth-rpc-service
conversation: conversation-rpc-service conversation: conversation-rpc-service
third: third-rpc-service third: third-rpc-service

@ -17,9 +17,6 @@ package api
import ( import (
"context" "context"
"fmt" "fmt"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/tools/utils/datautil"
"github.com/openimsdk/tools/utils/network"
"net" "net"
"net/http" "net/http"
"os" "os"
@ -28,6 +25,11 @@ import (
"syscall" "syscall"
"time" "time"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/tools/utils/datautil"
"github.com/openimsdk/tools/utils/network"
"github.com/openimsdk/tools/utils/runtimeenv"
kdisc "github.com/openimsdk/open-im-server/v3/pkg/common/discoveryregister" kdisc "github.com/openimsdk/open-im-server/v3/pkg/common/discoveryregister"
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics" "github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
"github.com/openimsdk/tools/discovery" "github.com/openimsdk/tools/discovery"
@ -40,6 +42,8 @@ type Config struct {
API config.API API config.API
Share config.Share Share config.Share
Discovery config.Discovery Discovery config.Discovery
RuntimeEnv string
} }
func Start(ctx context.Context, index int, config *Config) error { func Start(ctx context.Context, index int, config *Config) error {
@ -48,10 +52,12 @@ func Start(ctx context.Context, index int, config *Config) error {
return err return err
} }
config.RuntimeEnv = runtimeenv.PrintRuntimeEnvironment()
var client discovery.SvcDiscoveryRegistry var client discovery.SvcDiscoveryRegistry
// Determine whether zk is passed according to whether it is a clustered deployment // Determine whether zk is passed according to whether it is a clustered deployment
client, err = kdisc.NewDiscoveryRegister(&config.Discovery) client, err = kdisc.NewDiscoveryRegister(&config.Discovery, config.RuntimeEnv)
if err != nil { if err != nil {
return errs.WrapMsg(err, "failed to register discovery service") return errs.WrapMsg(err, "failed to register discovery service")
} }
@ -81,7 +87,7 @@ func Start(ctx context.Context, index int, config *Config) error {
address := net.JoinHostPort(network.GetListenIP(config.API.Api.ListenIP), strconv.Itoa(apiPort)) address := net.JoinHostPort(network.GetListenIP(config.API.Api.ListenIP), strconv.Itoa(apiPort))
server := http.Server{Addr: address, Handler: router} server := http.Server{Addr: address, Handler: router}
log.CInfo(ctx, "API server is initializing", "address", address, "apiPort", apiPort, "prometheusPort", prometheusPort) log.CInfo(ctx, "API server is initializing", "runtimeEnv", config.RuntimeEnv, "address", address, "apiPort", apiPort, "prometheusPort", prometheusPort)
go func() { go func() {
err = server.ListenAndServe() err = server.ListenAndServe()
if err != nil && err != http.ErrServerClosed { if err != nil && err != http.ErrServerClosed {

@ -16,11 +16,13 @@ package msggateway
import ( import (
"context" "context"
"time"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/rpccache" "github.com/openimsdk/open-im-server/v3/pkg/rpccache"
"github.com/openimsdk/tools/db/redisutil" "github.com/openimsdk/tools/db/redisutil"
"github.com/openimsdk/tools/utils/datautil" "github.com/openimsdk/tools/utils/datautil"
"time" "github.com/openimsdk/tools/utils/runtimeenv"
"github.com/openimsdk/tools/log" "github.com/openimsdk/tools/log"
) )
@ -31,11 +33,15 @@ type Config struct {
RedisConfig config.Redis RedisConfig config.Redis
WebhooksConfig config.Webhooks WebhooksConfig config.Webhooks
Discovery config.Discovery Discovery config.Discovery
RuntimeEnv string
} }
// Start run ws server. // Start run ws server.
func Start(ctx context.Context, index int, conf *Config) error { func Start(ctx context.Context, index int, conf *Config) error {
log.CInfo(ctx, "MSG-GATEWAY server is initializing", "autoSetPorts", conf.MsgGateway.RPC.AutoSetPorts, conf.RuntimeEnv = runtimeenv.PrintRuntimeEnvironment()
log.CInfo(ctx, "MSG-GATEWAY server is initializing", "runtimeEnv", conf.RuntimeEnv, "autoSetPorts", conf.MsgGateway.RPC.AutoSetPorts,
"rpcPorts", conf.MsgGateway.RPC.Ports, "rpcPorts", conf.MsgGateway.RPC.Ports,
"wsPort", conf.MsgGateway.LongConnSvr.Ports, "prometheusPorts", conf.MsgGateway.Prometheus.Ports) "wsPort", conf.MsgGateway.LongConnSvr.Ports, "prometheusPorts", conf.MsgGateway.Prometheus.Ports)
wsPort, err := datautil.GetElemByIndex(conf.MsgGateway.LongConnSvr.Ports, index) wsPort, err := datautil.GetElemByIndex(conf.MsgGateway.LongConnSvr.Ports, index)

@ -29,6 +29,7 @@ import (
"github.com/openimsdk/tools/db/mongoutil" "github.com/openimsdk/tools/db/mongoutil"
"github.com/openimsdk/tools/db/redisutil" "github.com/openimsdk/tools/db/redisutil"
"github.com/openimsdk/tools/utils/datautil" "github.com/openimsdk/tools/utils/datautil"
"github.com/openimsdk/tools/utils/runtimeenv"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
discRegister "github.com/openimsdk/open-im-server/v3/pkg/common/discoveryregister" discRegister "github.com/openimsdk/open-im-server/v3/pkg/common/discoveryregister"
@ -51,6 +52,8 @@ type MsgTransfer struct {
historyMongoCH *OnlineHistoryMongoConsumerHandler historyMongoCH *OnlineHistoryMongoConsumerHandler
ctx context.Context ctx context.Context
cancel context.CancelFunc cancel context.CancelFunc
runTimeEnv string
} }
type Config struct { type Config struct {
@ -64,7 +67,9 @@ type Config struct {
} }
func Start(ctx context.Context, index int, config *Config) error { func Start(ctx context.Context, index int, config *Config) error {
log.CInfo(ctx, "MSG-TRANSFER server is initializing", "prometheusPorts", runTimeEnv := runtimeenv.PrintRuntimeEnvironment()
log.CInfo(ctx, "MSG-TRANSFER server is initializing", "runTimeEnv", runTimeEnv, "prometheusPorts",
config.MsgTransfer.Prometheus.Ports, "index", index) config.MsgTransfer.Prometheus.Ports, "index", index)
mgocli, err := mongoutil.NewMongoDB(ctx, config.MongodbConfig.Build()) mgocli, err := mongoutil.NewMongoDB(ctx, config.MongodbConfig.Build())
@ -75,7 +80,7 @@ func Start(ctx context.Context, index int, config *Config) error {
if err != nil { if err != nil {
return err return err
} }
client, err := discRegister.NewDiscoveryRegister(&config.Discovery) client, err := discRegister.NewDiscoveryRegister(&config.Discovery, runTimeEnv)
if err != nil { if err != nil {
return err return err
} }
@ -115,6 +120,7 @@ func Start(ctx context.Context, index int, config *Config) error {
msgTransfer := &MsgTransfer{ msgTransfer := &MsgTransfer{
historyCH: historyCH, historyCH: historyCH,
historyMongoCH: historyMongoCH, historyMongoCH: historyMongoCH,
runTimeEnv: runTimeEnv,
} }
return msgTransfer.Start(index, config) return msgTransfer.Start(index, config)
} }

@ -14,6 +14,7 @@ import (
"github.com/openimsdk/tools/mcontext" "github.com/openimsdk/tools/mcontext"
"github.com/openimsdk/tools/mw" "github.com/openimsdk/tools/mw"
"github.com/openimsdk/tools/utils/runtimeenv"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/credentials/insecure"
@ -26,14 +27,18 @@ type CronTaskConfig struct {
CronTask config.CronTask CronTask config.CronTask
Share config.Share Share config.Share
Discovery config.Discovery Discovery config.Discovery
runTimeEnv string
} }
func Start(ctx context.Context, config *CronTaskConfig) error { func Start(ctx context.Context, config *CronTaskConfig) error {
log.CInfo(ctx, "CRON-TASK server is initializing", "chatRecordsClearTime", config.CronTask.CronExecuteTime, "msgDestructTime", config.CronTask.RetainChatRecords) config.runTimeEnv = runtimeenv.PrintRuntimeEnvironment()
log.CInfo(ctx, "CRON-TASK server is initializing", "runTimeEnv", config.runTimeEnv, "chatRecordsClearTime", config.CronTask.CronExecuteTime, "msgDestructTime", config.CronTask.RetainChatRecords)
if config.CronTask.RetainChatRecords < 1 { if config.CronTask.RetainChatRecords < 1 {
return errs.New("msg destruct time must be greater than 1").Wrap() return errs.New("msg destruct time must be greater than 1").Wrap()
} }
client, err := kdisc.NewDiscoveryRegister(&config.Discovery) client, err := kdisc.NewDiscoveryRegister(&config.Discovery, config.runTimeEnv)
if err != nil { if err != nil {
return errs.WrapMsg(err, "failed to register discovery service") return errs.WrapMsg(err, "failed to register discovery service")
} }

@ -16,12 +16,12 @@ package cmd
import ( import (
"fmt" "fmt"
"path/filepath"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/version" "github.com/openimsdk/open-im-server/v3/version"
"github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log" "github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/utils/runtimeenv"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
@ -105,18 +105,19 @@ func (r *RootCmd) initializeConfiguration(cmd *cobra.Command, opts *CmdOpts) err
if err != nil { if err != nil {
return err return err
} }
runtimeEnv := runtimeenv.PrintRuntimeEnvironment()
// Load common configuration file // Load common configuration file
//opts.configMap[ShareFileName] = StructEnvPrefix{EnvPrefix: shareEnvPrefix, ConfigStruct: &r.share} //opts.configMap[ShareFileName] = StructEnvPrefix{EnvPrefix: shareEnvPrefix, ConfigStruct: &r.share}
for configFileName, configStruct := range opts.configMap { for configFileName, configStruct := range opts.configMap {
err := config.LoadConfig(filepath.Join(configDirectory, configFileName), err := config.Load(configDirectory, configFileName, ConfigEnvPrefixMap[configFileName], runtimeEnv, configStruct)
ConfigEnvPrefixMap[configFileName], configStruct)
if err != nil { if err != nil {
return err return err
} }
} }
// Load common log configuration file // Load common log configuration file
return config.LoadConfig(filepath.Join(configDirectory, LogConfigFileName), return config.Load(configDirectory, LogConfigFileName, ConfigEnvPrefixMap[LogConfigFileName], runtimeEnv, &r.log)
ConfigEnvPrefixMap[LogConfigFileName], &r.log)
} }
func (r *RootCmd) applyOptions(opts ...func(*CmdOpts)) *CmdOpts { func (r *RootCmd) applyOptions(opts ...func(*CmdOpts)) *CmdOpts {

@ -15,10 +15,11 @@
package config package config
import ( import (
"github.com/openimsdk/tools/s3/aws"
"strings" "strings"
"time" "time"
"github.com/openimsdk/tools/s3/aws"
"github.com/openimsdk/tools/db/mongoutil" "github.com/openimsdk/tools/db/mongoutil"
"github.com/openimsdk/tools/db/redisutil" "github.com/openimsdk/tools/db/redisutil"
"github.com/openimsdk/tools/mq/kafka" "github.com/openimsdk/tools/mq/kafka"
@ -474,9 +475,14 @@ type ZooKeeper struct {
type Discovery struct { type Discovery struct {
Enable string `mapstructure:"enable"` Enable string `mapstructure:"enable"`
Etcd Etcd `mapstructure:"etcd"` Etcd Etcd `mapstructure:"etcd"`
Kubernetes Kubernetes `mapstructure:"kubernetes"`
RpcService RpcService `mapstructure:"rpcService"` RpcService RpcService `mapstructure:"rpcService"`
} }
type Kubernetes struct {
Namespace string `mapstructure:"namespace"`
}
type Etcd struct { type Etcd struct {
RootDirectory string `mapstructure:"rootDirectory"` RootDirectory string `mapstructure:"rootDirectory"`
Address []string `mapstructure:"address"` Address []string `mapstructure:"address"`

@ -16,6 +16,12 @@ package config
const ConfKey = "conf" const ConfKey = "conf"
const (
MountConfigFilePath = "CONFIG_PATH"
DeploymentType = "DEPLOYMENT_TYPE"
KUBERNETES = "kubernetes"
)
const ( const (
// DefaultDirPerm is used for creating general directories, allowing the owner to read, write, and execute, // DefaultDirPerm is used for creating general directories, allowing the owner to read, write, and execute,
// while the group and others can only read and execute. // while the group and others can only read and execute.

@ -1,13 +1,29 @@
package config package config
import ( import (
"os"
"path/filepath"
"strings"
"github.com/mitchellh/mapstructure" "github.com/mitchellh/mapstructure"
"github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/errs"
"github.com/spf13/viper" "github.com/spf13/viper"
"strings"
) )
func LoadConfig(path string, envPrefix string, config any) error { func Load(configDirectory string, configFileName string, envPrefix string, runtimeEnv string, config any) error {
if runtimeEnv == KUBERNETES {
mountPath := os.Getenv(MountConfigFilePath)
if mountPath == "" {
return errs.ErrArgs.WrapMsg(MountConfigFilePath + " env is empty")
}
return loadConfig(filepath.Join(mountPath, configFileName), envPrefix, config)
}
return loadConfig(filepath.Join(configDirectory, configFileName), envPrefix, config)
}
func loadConfig(path string, envPrefix string, config any) error {
v := viper.New() v := viper.New()
v.SetConfigFile(path) v.SetConfigFile(path)
v.SetEnvPrefix(envPrefix) v.SetEnvPrefix(envPrefix)

@ -1,27 +1,51 @@
package config package config
import ( import (
"github.com/stretchr/testify/assert" "os"
"testing" "testing"
"github.com/stretchr/testify/assert"
) )
func TestLoadLogConfig(t *testing.T) { func TestLoadLogConfig(t *testing.T) {
var log Log var log Log
err := LoadConfig("../../../config/log.yml", "IMENV_LOG", &log) os.Setenv("IMENV_LOG_REMAINLOGLEVEL", "5")
err := Load("../../../config/", "log.yml", "IMENV_LOG", &log)
assert.Nil(t, err)
t.Log(log.RemainLogLevel)
// assert.Equal(t, "../../../../logs/", log.StorageLocation)
}
func TestLoadMongoConfig(t *testing.T) {
var mongo Mongo
// os.Setenv("DEPLOYMENT_TYPE", "kubernetes")
os.Setenv("IMENV_MONGODB_PASSWORD", "openIM1231231")
// os.Setenv("IMENV_MONGODB_URI", "openIM123")
// os.Setenv("IMENV_MONGODB_USERNAME", "openIM123")
err := Load("../../../config/", "mongodb.yml", "IMENV_MONGODB", &mongo)
// err := LoadConfig("../../../config/mongodb.yml", "IMENV_MONGODB", &mongo)
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal(t, "../../../../logs/", log.StorageLocation) t.Log(mongo.Password)
// assert.Equal(t, "openIM123", mongo.Password)
t.Log(os.Getenv("IMENV_MONGODB_PASSWORD"))
t.Log(mongo)
// //export IMENV_OPENIM_RPC_USER_RPC_LISTENIP="0.0.0.0"
// assert.Equal(t, "0.0.0.0", user.RPC.ListenIP)
// //export IMENV_OPENIM_RPC_USER_RPC_PORTS="10110,10111,10112"
// assert.Equal(t, []int{10110, 10111, 10112}, user.RPC.Ports)
} }
func TestLoadMinioConfig(t *testing.T) { func TestLoadMinioConfig(t *testing.T) {
var storageConfig Minio var storageConfig Minio
err := LoadConfig("../../../config/minio.yml", "IMENV_MINIO", &storageConfig) err := Load("../../../config/minio.yml", "IMENV_MINIO", "", &storageConfig)
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal(t, "openim", storageConfig.Bucket) assert.Equal(t, "openim", storageConfig.Bucket)
} }
func TestLoadWebhooksConfig(t *testing.T) { func TestLoadWebhooksConfig(t *testing.T) {
var webhooks Webhooks var webhooks Webhooks
err := LoadConfig("../../../config/webhooks.yml", "IMENV_WEBHOOKS", &webhooks) err := Load("../../../config/webhooks.yml", "IMENV_WEBHOOKS", "", &webhooks)
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal(t, 5, webhooks.BeforeAddBlack.Timeout) assert.Equal(t, 5, webhooks.BeforeAddBlack.Timeout)
@ -29,7 +53,7 @@ func TestLoadWebhooksConfig(t *testing.T) {
func TestLoadOpenIMRpcUserConfig(t *testing.T) { func TestLoadOpenIMRpcUserConfig(t *testing.T) {
var user User var user User
err := LoadConfig("../../../config/openim-rpc-user.yml", "IMENV_OPENIM_RPC_USER", &user) err := Load("../../../config/openim-rpc-user.yml", "IMENV_OPENIM_RPC_USER", "", &user)
assert.Nil(t, err) assert.Nil(t, err)
//export IMENV_OPENIM_RPC_USER_RPC_LISTENIP="0.0.0.0" //export IMENV_OPENIM_RPC_USER_RPC_LISTENIP="0.0.0.0"
assert.Equal(t, "0.0.0.0", user.RPC.ListenIP) assert.Equal(t, "0.0.0.0", user.RPC.ListenIP)
@ -39,14 +63,14 @@ func TestLoadOpenIMRpcUserConfig(t *testing.T) {
func TestLoadNotificationConfig(t *testing.T) { func TestLoadNotificationConfig(t *testing.T) {
var noti Notification var noti Notification
err := LoadConfig("../../../config/notification.yml", "IMENV_NOTIFICATION", &noti) err := Load("../../../config/notification.yml", "IMENV_NOTIFICATION", "", &noti)
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal(t, "Your friend's profile has been changed", noti.FriendRemarkSet.OfflinePush.Title) assert.Equal(t, "Your friend's profile has been changed", noti.FriendRemarkSet.OfflinePush.Title)
} }
func TestLoadOpenIMThirdConfig(t *testing.T) { func TestLoadOpenIMThirdConfig(t *testing.T) {
var third Third var third Third
err := LoadConfig("../../../config/openim-rpc-third.yml", "IMENV_OPENIM_RPC_THIRD", &third) err := Load("../../../config/openim-rpc-third.yml", "IMENV_OPENIM_RPC_THIRD", "", &third)
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal(t, "enabled", third.Object.Enable) assert.Equal(t, "enabled", third.Object.Enable)
assert.Equal(t, "https://oss-cn-chengdu.aliyuncs.com", third.Object.Oss.Endpoint) assert.Equal(t, "https://oss-cn-chengdu.aliyuncs.com", third.Object.Oss.Endpoint)

@ -15,19 +15,26 @@
package discoveryregister package discoveryregister
import ( import (
"time"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/discoveryregister/kubernetes"
"github.com/openimsdk/tools/discovery" "github.com/openimsdk/tools/discovery"
"github.com/openimsdk/tools/discovery/kubernetes"
"github.com/openimsdk/tools/discovery/etcd" "github.com/openimsdk/tools/discovery/etcd"
"github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/errs"
"time"
) )
// NewDiscoveryRegister creates a new service discovery and registry client based on the provided environment type. // NewDiscoveryRegister creates a new service discovery and registry client based on the provided environment type.
func NewDiscoveryRegister(discovery *config.Discovery) (discovery.SvcDiscoveryRegistry, error) { func NewDiscoveryRegister(discovery *config.Discovery, runtimeEnv string) (discovery.SvcDiscoveryRegistry, error) {
if runtimeEnv == "kubernetes" {
discovery.Enable = "kubernetes"
}
switch discovery.Enable { switch discovery.Enable {
case "k8s": case "kubernetes":
return kubernetes.NewK8sDiscoveryRegister(discovery.RpcService.MessageGateway) return kubernetes.NewKubernetesConnManager(discovery.Kubernetes.Namespace)
case "etcd": case "etcd":
return etcd.NewSvcDiscoveryRegistry( return etcd.NewSvcDiscoveryRegistry(
discovery.Etcd.RootDirectory, discovery.Etcd.RootDirectory,

@ -16,184 +16,281 @@ package kubernetes
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"os" "sync"
"strconv" "time"
"strings"
"github.com/openimsdk/tools/discovery"
"github.com/openimsdk/tools/log"
"github.com/stathat/consistent"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
) )
// K8sDR represents the Kubernetes service discovery and registration client. type KubernetesConnManager struct {
type K8sDR struct { clientset *kubernetes.Clientset
options []grpc.DialOption namespace string
rpcRegisterAddr string dialOptions []grpc.DialOption
gatewayHostConsistent *consistent.Consistent
gatewayName string selfTarget string
mu sync.RWMutex
connMap map[string][]*grpc.ClientConn
} }
func NewK8sDiscoveryRegister(gatewayName string) (discovery.SvcDiscoveryRegistry, error) { // NewKubernetesConnManager creates a new connection manager that uses Kubernetes services for service discovery.
gatewayConsistent := consistent.New() func NewKubernetesConnManager(namespace string, options ...grpc.DialOption) (*KubernetesConnManager, error) {
gatewayHosts := getMsgGatewayHost(context.Background(), gatewayName) config, err := rest.InClusterConfig()
for _, v := range gatewayHosts { if err != nil {
gatewayConsistent.Add(v) return nil, fmt.Errorf("failed to create in-cluster config: %v", err)
} }
return &K8sDR{gatewayHostConsistent: gatewayConsistent}, nil
}
func (cli *K8sDR) Register(serviceName, host string, port int, opts ...grpc.DialOption) error { clientset, err := kubernetes.NewForConfig(config)
if serviceName != cli.gatewayName { if err != nil {
cli.rpcRegisterAddr = serviceName return nil, fmt.Errorf("failed to create clientset: %v", err)
} else {
cli.rpcRegisterAddr = getSelfHost(context.Background(), cli.gatewayName)
} }
return nil k := &KubernetesConnManager{
} clientset: clientset,
namespace: namespace,
dialOptions: options,
connMap: make(map[string][]*grpc.ClientConn),
}
func (cli *K8sDR) UnRegister() error { go k.watchEndpoints()
return nil return k, nil
} }
func (cli *K8sDR) CreateRpcRootNodes(serviceNames []string) error { func (k *KubernetesConnManager) initializeConns(serviceName string) error {
port, err := k.getServicePort(serviceName)
if err != nil {
return err
}
return nil endpoints, err := k.clientset.CoreV1().Endpoints(k.namespace).Get(context.Background(), serviceName, metav1.GetOptions{})
} if err != nil {
return fmt.Errorf("failed to get endpoints for service %s: %v", serviceName, err)
}
var conns []*grpc.ClientConn
for _, subset := range endpoints.Subsets {
for _, address := range subset.Addresses {
target := fmt.Sprintf("%s:%d", address.IP, port)
conn, err := grpc.Dial(target, grpc.WithTransportCredentials(insecure.NewCredentials()))
if err != nil {
return fmt.Errorf("failed to dial endpoint %s: %v", target, err)
}
conns = append(conns, conn)
}
}
k.mu.Lock()
defer k.mu.Unlock()
k.connMap[serviceName] = conns
func (cli *K8sDR) RegisterConf2Registry(key string, conf []byte) error { // go k.watchEndpoints(serviceName)
return nil return nil
} }
func (cli *K8sDR) GetConfFromRegistry(key string) ([]byte, error) { // GetConns returns gRPC client connections for a given Kubernetes service name.
func (k *KubernetesConnManager) GetConns(ctx context.Context, serviceName string, opts ...grpc.DialOption) ([]*grpc.ClientConn, error) {
k.mu.RLock()
conns, exists := k.connMap[serviceName]
defer k.mu.RUnlock()
if exists {
return conns, nil
}
k.mu.Lock()
defer k.mu.Unlock()
// Check if another goroutine has already initialized the connections when we released the read lock
conns, exists = k.connMap[serviceName]
if exists {
return conns, nil
}
if err := k.initializeConns(serviceName); err != nil {
return nil, fmt.Errorf("failed to initialize connections for service %s: %v", serviceName, err)
}
return nil, nil return k.connMap[serviceName], nil
} }
func (cli *K8sDR) GetUserIdHashGatewayHost(ctx context.Context, userId string) (string, error) { // GetConn returns a single gRPC client connection for a given Kubernetes service name.
host, err := cli.gatewayHostConsistent.Get(userId) func (k *KubernetesConnManager) GetConn(ctx context.Context, serviceName string, opts ...grpc.DialOption) (*grpc.ClientConn, error) {
port, err := k.getServicePort(serviceName)
if err != nil { if err != nil {
log.ZError(ctx, "GetUserIdHashGatewayHost error", err) return nil, err
} }
return host, err
fmt.Println("SVC port:", port)
target := fmt.Sprintf("%s.%s.svc.cluster.local:%d", serviceName, k.namespace, port)
fmt.Println("SVC target:", target)
return grpc.DialContext(
ctx,
target,
append([]grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}, k.dialOptions...)...,
)
} }
func getSelfHost(ctx context.Context, gatewayName string) string { // GetSelfConnTarget returns the connection target for the current service.
port := 88 func (k *KubernetesConnManager) GetSelfConnTarget() string {
instance := "openimserver" return k.selfTarget
selfPodName := os.Getenv("MY_POD_NAME")
ns := os.Getenv("MY_POD_NAMESPACE")
statefuleIndex := 0
gatewayEnds := strings.Split(gatewayName, ":")
if len(gatewayEnds) != 2 {
log.ZError(ctx, "msggateway RpcRegisterName is error:config.RpcRegisterName.OpenImMessageGatewayName", errors.New("config error"))
} else {
port, _ = strconv.Atoi(gatewayEnds[1])
}
podInfo := strings.Split(selfPodName, "-")
instance = podInfo[0]
count := len(podInfo)
statefuleIndex, _ = strconv.Atoi(podInfo[count-1])
host := fmt.Sprintf("%s-openim-msggateway-%d.%s-openim-msggateway-headless.%s.svc.cluster.local:%d", instance, statefuleIndex, instance, ns, port)
return host
} }
// like openimserver-openim-msggateway-0.openimserver-openim-msggateway-headless.openim-lin.svc.cluster.local:88. // AddOption appends gRPC dial options to the existing options.
// Replica set in kubernetes environment func (k *KubernetesConnManager) AddOption(opts ...grpc.DialOption) {
func getMsgGatewayHost(ctx context.Context, gatewayName string) []string { k.mu.Lock()
port := 88 defer k.mu.Unlock()
instance := "openimserver" k.dialOptions = append(k.dialOptions, opts...)
selfPodName := os.Getenv("MY_POD_NAME")
replicas := os.Getenv("MY_MSGGATEWAY_REPLICACOUNT")
ns := os.Getenv("MY_POD_NAMESPACE")
gatewayEnds := strings.Split(gatewayName, ":")
if len(gatewayEnds) != 2 {
log.ZError(ctx, "msggateway RpcRegisterName is error:config.RpcRegisterName.OpenImMessageGatewayName", errors.New("config error"))
} else {
port, _ = strconv.Atoi(gatewayEnds[1])
}
nReplicas, _ := strconv.Atoi(replicas)
podInfo := strings.Split(selfPodName, "-")
instance = podInfo[0]
var ret []string
for i := 0; i < nReplicas; i++ {
host := fmt.Sprintf("%s-openim-msggateway-%d.%s-openim-msggateway-headless.%s.svc.cluster.local:%d", instance, i, instance, ns, port)
ret = append(ret, host)
}
log.ZDebug(ctx, "getMsgGatewayHost", "instance", instance, "selfPodName", selfPodName, "replicas", replicas, "ns", ns, "ret", ret)
return ret
} }
// GetConns returns the gRPC client connections to the specified service. // CloseConn closes a given gRPC client connection.
func (cli *K8sDR) GetConns(ctx context.Context, serviceName string, opts ...grpc.DialOption) ([]*grpc.ClientConn, error) { func (k *KubernetesConnManager) CloseConn(conn *grpc.ClientConn) {
conn.Close()
// This conditional checks if the serviceName is not the OpenImMessageGatewayName. }
// It seems to handle a special case for the OpenImMessageGateway.
if serviceName != cli.gatewayName { // Close closes all gRPC connections managed by KubernetesConnManager.
// DialContext creates a client connection to the given target (serviceName) using the specified context. func (k *KubernetesConnManager) Close() {
// 'cli.options' are likely default or common options for all connections in this struct. k.mu.Lock()
// 'opts...' allows for additional gRPC dial options to be passed and used. defer k.mu.Unlock()
conn, err := grpc.DialContext(ctx, serviceName, append(cli.options, opts...)...) for _, conns := range k.connMap {
for _, conn := range conns {
// The function returns a slice of client connections with the new connection, or an error if occurred. _ = conn.Close()
return []*grpc.ClientConn{conn}, err
} else {
// This block is executed if the serviceName is OpenImMessageGatewayName.
// 'ret' will accumulate the connections to return.
var ret []*grpc.ClientConn
// getMsgGatewayHost presumably retrieves hosts for the message gateway service.
// The context is passed, likely for cancellation and timeout control.
gatewayHosts := getMsgGatewayHost(ctx, cli.gatewayName)
// Iterating over the retrieved gateway hosts.
for _, host := range gatewayHosts {
// Establishes a connection to each host.
// Again, appending cli.options with any additional opts provided.
conn, err := grpc.DialContext(ctx, host, append(cli.options, opts...)...)
// If there's an error while dialing any host, the function returns immediately with the error.
if err != nil {
return nil, err
} else {
// If the connection is successful, it is added to the 'ret' slice.
ret = append(ret, conn)
}
} }
// After all hosts are processed, the slice of connections is returned.
return ret, nil
} }
k.connMap = make(map[string][]*grpc.ClientConn)
} }
func (cli *K8sDR) GetConn(ctx context.Context, serviceName string, opts ...grpc.DialOption) (*grpc.ClientConn, error) { func (k *KubernetesConnManager) Register(serviceName, host string, port int, opts ...grpc.DialOption) error {
return nil
}
func (k *KubernetesConnManager) UnRegister() error {
return nil
}
return grpc.DialContext(ctx, serviceName, append(cli.options, opts...)...) func (k *KubernetesConnManager) GetUserIdHashGatewayHost(ctx context.Context, userId string) (string, error) {
return "", nil
} }
func (cli *K8sDR) GetSelfConnTarget() string { func (k *KubernetesConnManager) getServicePort(serviceName string) (int32, error) {
svc, err := k.clientset.CoreV1().Services(k.namespace).Get(context.Background(), serviceName, metav1.GetOptions{})
if err != nil {
fmt.Print("namespace:", k.namespace)
return 0, fmt.Errorf("failed to get service %s: %v", serviceName, err)
}
return cli.rpcRegisterAddr if len(svc.Spec.Ports) == 0 {
} return 0, fmt.Errorf("service %s has no ports defined", serviceName)
}
func (cli *K8sDR) AddOption(opts ...grpc.DialOption) { return svc.Spec.Ports[0].Port, nil
cli.options = append(cli.options, opts...)
} }
func (cli *K8sDR) CloseConn(conn *grpc.ClientConn) { // watchEndpoints listens for changes in Pod resources.
conn.Close() func (k *KubernetesConnManager) watchEndpoints() {
informerFactory := informers.NewSharedInformerFactory(k.clientset, time.Minute*10)
informer := informerFactory.Core().V1().Pods().Informer()
// Watch for Pod changes (add, update, delete)
informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
k.handleEndpointChange(obj)
},
UpdateFunc: func(oldObj, newObj interface{}) {
k.handleEndpointChange(newObj)
},
DeleteFunc: func(obj interface{}) {
k.handleEndpointChange(obj)
},
})
informerFactory.Start(context.Background().Done())
<-context.Background().Done() // Block forever
} }
// do not use this method for call rpc. func (k *KubernetesConnManager) handleEndpointChange(obj interface{}) {
func (cli *K8sDR) GetClientLocalConns() map[string][]*grpc.ClientConn { endpoint, ok := obj.(*v1.Endpoints)
log.ZError(context.Background(), "should not call this function!", nil) if !ok {
return nil return
}
serviceName := endpoint.Name
if err := k.initializeConns(serviceName); err != nil {
fmt.Printf("Error initializing connections for %s: %v\n", serviceName, err)
}
} }
func (cli *K8sDR) Close() { // =================
} // initEndpoints initializes connections by fetching all available endpoints in the specified namespace.
// func (k *KubernetesConnManager) initEndpoints() error {
// k.mu.Lock()
// defer k.mu.Unlock()
// pods, err := k.clientset.CoreV1().Pods(k.namespace).List(context.TODO(), metav1.ListOptions{})
// if err != nil {
// return fmt.Errorf("failed to list pods: %v", err)
// }
// for _, pod := range pods.Items {
// if pod.Status.Phase == v1.PodRunning {
// target := fmt.Sprintf("%s:%d", address.IP, port)
// conn, err := grpc.Dial(target, grpc.WithTransportCredentials(insecure.NewCredentials()))
// conn, err := k.createGRPCConnection(pod)
// if err != nil {
// return fmt.Errorf("failed to create GRPC connection for pod %s: %v", pod.Name, err)
// }
// k.connMap[pod.Name] = append(k.connMap[pod.Name], conn)
// }
// }
// return nil
// }
// -----
// func (k *KubernetesConnManager) watchEndpoints1(serviceName string) {
// // watch for changes to the service's endpoints
// informerFactory := informers.NewSharedInformerFactory(k.clientset, time.Minute)
// endpointsInformer := informerFactory.Core().V1().Endpoints().Informer()
// endpointsInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
// AddFunc: func(obj interface{}) {
// eps := obj.(*v1.Endpoints)
// if eps.Name == serviceName {
// k.initializeConns(serviceName)
// }
// },
// UpdateFunc: func(oldObj, newObj interface{}) {
// eps := newObj.(*v1.Endpoints)
// if eps.Name == serviceName {
// k.initializeConns(serviceName)
// }
// },
// DeleteFunc: func(obj interface{}) {
// eps := obj.(*v1.Endpoints)
// if eps.Name == serviceName {
// k.mu.Lock()
// defer k.mu.Unlock()
// for _, conn := range k.connMap[serviceName] {
// _ = conn.Close()
// }
// delete(k.connMap, serviceName)
// }
// },
// })
// informerFactory.Start(wait.NeverStop)
// informerFactory.WaitForCacheSync(wait.NeverStop)
// }

@ -18,9 +18,6 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/tools/utils/datautil"
"google.golang.org/grpc/status"
"net" "net"
"net/http" "net/http"
"os" "os"
@ -28,6 +25,13 @@ import (
"syscall" "syscall"
"time" "time"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/tools/utils/datautil"
"github.com/openimsdk/tools/utils/runtimeenv"
"google.golang.org/grpc/status"
"strconv"
kdisc "github.com/openimsdk/open-im-server/v3/pkg/common/discoveryregister" kdisc "github.com/openimsdk/open-im-server/v3/pkg/common/discoveryregister"
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics" "github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
"github.com/openimsdk/tools/discovery" "github.com/openimsdk/tools/discovery"
@ -37,7 +41,6 @@ import (
"github.com/openimsdk/tools/utils/network" "github.com/openimsdk/tools/utils/network"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/credentials/insecure"
"strconv"
) )
// Start rpc server. // Start rpc server.
@ -51,6 +54,8 @@ func Start[T any](ctx context.Context, discovery *config.Discovery, prometheusCo
netErr error netErr error
) )
runTimeEnv := runtimeenv.PrintRuntimeEnvironment()
if !autoSetPorts { if !autoSetPorts {
rpcPort, err := datautil.GetElemByIndex(rpcPorts, index) rpcPort, err := datautil.GetElemByIndex(rpcPorts, index)
if err != nil { if err != nil {
@ -107,11 +112,11 @@ func Start[T any](ctx context.Context, discovery *config.Discovery, prometheusCo
registerIP = network.GetListenIP(registerIP) registerIP = network.GetListenIP(registerIP)
port, _ := strconv.Atoi(portStr) port, _ := strconv.Atoi(portStr)
log.CInfo(ctx, "RPC server is initializing", "rpcRegisterName", rpcRegisterName, "rpcPort", portStr, log.CInfo(ctx, "RPC server is initializing", "runTimeEnv", runTimeEnv, "rpcRegisterName", rpcRegisterName, "rpcPort", portStr,
"prometheusPorts", prometheusConfig.Ports) "prometheusPorts", prometheusConfig.Ports)
defer listener.Close() defer listener.Close()
client, err := kdisc.NewDiscoveryRegister(discovery) client, err := kdisc.NewDiscoveryRegister(discovery, runTimeEnv)
if err != nil { if err != nil {
return err return err
} }

@ -33,6 +33,7 @@ import (
"github.com/openimsdk/tools/mq/kafka" "github.com/openimsdk/tools/mq/kafka"
"github.com/openimsdk/tools/s3/minio" "github.com/openimsdk/tools/s3/minio"
"github.com/openimsdk/tools/system/program" "github.com/openimsdk/tools/system/program"
"github.com/openimsdk/tools/utils/runtimeenv"
) )
const maxRetry = 180 const maxRetry = 180
@ -78,35 +79,37 @@ func initConfig(configDir string) (*config.Mongo, *config.Redis, *config.Kafka,
discovery = &config.Discovery{} discovery = &config.Discovery{}
thirdConfig = &config.Third{} thirdConfig = &config.Third{}
) )
err := config.LoadConfig(filepath.Join(configDir, cmd.MongodbConfigFileName), cmd.ConfigEnvPrefixMap[cmd.MongodbConfigFileName], mongoConfig) runtimeEnv := runtimeenv.PrintRuntimeEnvironment()
err := config.Load(configDir, cmd.MongodbConfigFileName, cmd.ConfigEnvPrefixMap[cmd.MongodbConfigFileName], runtimeEnv, mongoConfig)
if err != nil { if err != nil {
return nil, nil, nil, nil, nil, err return nil, nil, nil, nil, nil, err
} }
err = config.LoadConfig(filepath.Join(configDir, cmd.RedisConfigFileName), cmd.ConfigEnvPrefixMap[cmd.RedisConfigFileName], redisConfig) err = config.Load(configDir, cmd.RedisConfigFileName, cmd.ConfigEnvPrefixMap[cmd.RedisConfigFileName], runtimeEnv, redisConfig)
if err != nil { if err != nil {
return nil, nil, nil, nil, nil, err return nil, nil, nil, nil, nil, err
} }
err = config.LoadConfig(filepath.Join(configDir, cmd.KafkaConfigFileName), cmd.ConfigEnvPrefixMap[cmd.KafkaConfigFileName], kafkaConfig) err = config.Load(configDir, cmd.KafkaConfigFileName, cmd.ConfigEnvPrefixMap[cmd.KafkaConfigFileName], runtimeEnv, kafkaConfig)
if err != nil { if err != nil {
return nil, nil, nil, nil, nil, err return nil, nil, nil, nil, nil, err
} }
err = config.LoadConfig(filepath.Join(configDir, cmd.OpenIMRPCThirdCfgFileName), cmd.ConfigEnvPrefixMap[cmd.OpenIMRPCThirdCfgFileName], thirdConfig) err = config.Load(configDir, cmd.OpenIMRPCThirdCfgFileName, cmd.ConfigEnvPrefixMap[cmd.OpenIMRPCThirdCfgFileName], runtimeEnv, thirdConfig)
if err != nil { if err != nil {
return nil, nil, nil, nil, nil, err return nil, nil, nil, nil, nil, err
} }
if thirdConfig.Object.Enable == "minio" { if thirdConfig.Object.Enable == "minio" {
err = config.LoadConfig(filepath.Join(configDir, cmd.MinioConfigFileName), cmd.ConfigEnvPrefixMap[cmd.MinioConfigFileName], minioConfig) err = config.Load(configDir, cmd.MinioConfigFileName, cmd.ConfigEnvPrefixMap[cmd.MinioConfigFileName], runtimeEnv, minioConfig)
if err != nil { if err != nil {
return nil, nil, nil, nil, nil, err return nil, nil, nil, nil, nil, err
} }
} else { } else {
minioConfig = nil minioConfig = nil
} }
err = config.LoadConfig(filepath.Join(configDir, cmd.DiscoveryConfigFilename), cmd.ConfigEnvPrefixMap[cmd.DiscoveryConfigFilename], discovery) err = config.Load(configDir, cmd.DiscoveryConfigFilename, cmd.ConfigEnvPrefixMap[cmd.DiscoveryConfigFilename], runtimeEnv, discovery)
if err != nil { if err != nil {
return nil, nil, nil, nil, nil, err return nil, nil, nil, nil, nil, err
} }

Loading…
Cancel
Save