fix: fix the print format

pull/1885/head
luhaoling 2 years ago
commit b482cdfe51

@ -17,6 +17,7 @@ package main
import ( import (
"fmt" "fmt"
"os" "os"
"path/filepath"
"github.com/openimsdk/open-im-server/v3/internal/rpc/auth" "github.com/openimsdk/open-im-server/v3/internal/rpc/auth"
"github.com/openimsdk/open-im-server/v3/pkg/common/cmd" "github.com/openimsdk/open-im-server/v3/pkg/common/cmd"
@ -31,7 +32,9 @@ func main() {
panic(err.Error()) panic(err.Error())
} }
if err := authCmd.StartSvr(config.Config.RpcRegisterName.OpenImAuthName, auth.Start); err != nil { if err := authCmd.StartSvr(config.Config.RpcRegisterName.OpenImAuthName, auth.Start); err != nil {
fmt.Fprintf(os.Stderr, "\n\nexit -1: \n%+v\n\n", err) progName := filepath.Base(os.Args[0])
fmt.Fprintf(os.Stderr, "\n\n%s exit -1: \n%+v\n\n", progName, err)
os.Exit(-1) os.Exit(-1)
} }
} }

@ -4,7 +4,7 @@ go 1.19
require ( require (
firebase.google.com/go v3.13.0+incompatible firebase.google.com/go v3.13.0+incompatible
github.com/OpenIMSDK/protocol v0.0.48 github.com/OpenIMSDK/protocol v0.0.55
github.com/OpenIMSDK/tools v0.0.33 github.com/OpenIMSDK/tools v0.0.33
github.com/bwmarrin/snowflake v0.3.0 // indirect github.com/bwmarrin/snowflake v0.3.0 // indirect
github.com/dtm-labs/rockscache v0.1.1 github.com/dtm-labs/rockscache v0.1.1
@ -155,5 +155,3 @@ require (
golang.org/x/crypto v0.17.0 // indirect golang.org/x/crypto v0.17.0 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect
) )
replace github.com/OpenIMSDK/protocol v0.0.47 => github.com/AndrewZuo01/protocol v0.0.0-20240112093520-fd9c53e27b94

@ -18,8 +18,8 @@ firebase.google.com/go v3.13.0+incompatible/go.mod h1:xlah6XbEyW6tbfSklcfe5FHJIw
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/IBM/sarama v1.41.3 h1:MWBEJ12vHC8coMjdEXFq/6ftO6DUZnQlFYcxtOJFa7c= github.com/IBM/sarama v1.41.3 h1:MWBEJ12vHC8coMjdEXFq/6ftO6DUZnQlFYcxtOJFa7c=
github.com/IBM/sarama v1.41.3/go.mod h1:Xxho9HkHd4K/MDUo/T/sOqwtX/17D33++E9Wib6hUdQ= github.com/IBM/sarama v1.41.3/go.mod h1:Xxho9HkHd4K/MDUo/T/sOqwtX/17D33++E9Wib6hUdQ=
github.com/OpenIMSDK/protocol v0.0.48 h1:8MIMjyzJRsruYhVv2ZKArFiOveroaofDOb3dlAdgjsw= github.com/OpenIMSDK/protocol v0.0.55 h1:eBjg8DyuhxGmuCUjpoZjg6MJJJXU/xJ3xJwFhrn34yA=
github.com/OpenIMSDK/protocol v0.0.48/go.mod h1:F25dFrwrIx3lkNoiuf6FkCfxuwf8L4Z8UIsdTHP/r0Y= github.com/OpenIMSDK/protocol v0.0.55/go.mod h1:F25dFrwrIx3lkNoiuf6FkCfxuwf8L4Z8UIsdTHP/r0Y=
github.com/OpenIMSDK/tools v0.0.33 h1:rvFCxXaXxLv1MJFC4qcoWRGwKBnV+hR68UN2N0/zZhE= github.com/OpenIMSDK/tools v0.0.33 h1:rvFCxXaXxLv1MJFC4qcoWRGwKBnV+hR68UN2N0/zZhE=
github.com/OpenIMSDK/tools v0.0.33/go.mod h1:wBfR5CYmEyvxl03QJbTkhz1CluK6J4/lX0lviu8JAjE= github.com/OpenIMSDK/tools v0.0.33/go.mod h1:wBfR5CYmEyvxl03QJbTkhz1CluK6J4/lX0lviu8JAjE=
github.com/QcloudApi/qcloud_sign_golang v0.0.0-20141224014652-e4130a326409/go.mod h1:1pk82RBxDY/JZnPQrtqHlUFfCctgdorsd9M06fMynOM= github.com/QcloudApi/qcloud_sign_golang v0.0.0-20141224014652-e4130a326409/go.mod h1:1pk82RBxDY/JZnPQrtqHlUFfCctgdorsd9M06fMynOM=

@ -33,6 +33,10 @@ func (o *AuthApi) UserToken(c *gin.Context) {
a2r.Call(auth.AuthClient.UserToken, o.Client, c) a2r.Call(auth.AuthClient.UserToken, o.Client, c)
} }
func (o *AuthApi) GetUserToken(c *gin.Context) {
a2r.Call(auth.AuthClient.GetUserToken, o.Client, c)
}
func (o *AuthApi) ParseToken(c *gin.Context) { func (o *AuthApi) ParseToken(c *gin.Context) {
a2r.Call(auth.AuthClient.ParseToken, o.Client, c) a2r.Call(auth.AuthClient.ParseToken, o.Client, c)
} }

@ -150,6 +150,7 @@ func NewGinRouter(discov discoveryregistry.SvcDiscoveryRegistry, rdb redis.Unive
{ {
a := NewAuthApi(*authRpc) a := NewAuthApi(*authRpc)
authRouterGroup.POST("/user_token", a.UserToken) authRouterGroup.POST("/user_token", a.UserToken)
authRouterGroup.POST("/get_user_token", ParseToken, a.GetUserToken)
authRouterGroup.POST("/parse_token", a.ParseToken) authRouterGroup.POST("/parse_token", a.ParseToken)
authRouterGroup.POST("/force_logout", ParseToken, a.ForceLogout) authRouterGroup.POST("/force_logout", ParseToken, a.ForceLogout)
} }

@ -18,7 +18,10 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"log" "os"
"os/signal"
"syscall"
"time"
"net/http" "net/http"
"sync" "sync"
@ -30,7 +33,7 @@ import (
"github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/client_golang/prometheus/promhttp"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/credentials/insecure"
"github.com/OpenIMSDK/tools/log"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/db/cache" "github.com/openimsdk/open-im-server/v3/pkg/common/db/cache"
"github.com/openimsdk/open-im-server/v3/pkg/common/db/controller" "github.com/openimsdk/open-im-server/v3/pkg/common/db/controller"
@ -51,11 +54,13 @@ func StartTransfer(prometheusPort int) error {
if err != nil { if err != nil {
return err return err
} }
mongo, err := unrelation.NewMongo() mongo, err := unrelation.NewMongo()
if err != nil { if err != nil {
return err return err
} }
if err := mongo.CreateMsgIndex(); err != nil {
if err = mongo.CreateMsgIndex(); err != nil {
return err return err
} }
client, err := kdisc.NewDiscoveryRegister(config.Config.Envs.Discovery) client, err := kdisc.NewDiscoveryRegister(config.Config.Envs.Discovery)
@ -66,6 +71,7 @@ func StartTransfer(prometheusPort int) error {
if err != nil { if err != nil {
return err return err
} }
if err := client.CreateRpcRootNodes(config.Config.GetServiceNames()); err != nil { if err := client.CreateRpcRootNodes(config.Config.GetServiceNames()); err != nil {
return err return err
} }
@ -103,26 +109,62 @@ func NewMsgTransfer(msgDatabase controller.CommonMsgDatabase, conversationRpcCli
func (m *MsgTransfer) Start(prometheusPort int) error { func (m *MsgTransfer) Start(prometheusPort int) error {
ctx := context.Background() ctx := context.Background()
var wg sync.WaitGroup
wg.Add(1)
fmt.Println("start msg transfer", "prometheusPort:", prometheusPort) fmt.Println("start msg transfer", "prometheusPort:", prometheusPort)
if prometheusPort <= 0 { if prometheusPort <= 0 {
return errs.Wrap(errors.New("prometheusPort not correct")) return errs.Wrap(errors.New("prometheusPort not correct"))
} }
go m.historyCH.historyConsumerGroup.RegisterHandleAndConsumer(ctx, m.historyCH) var wg sync.WaitGroup
go m.historyMongoCH.historyConsumerGroup.RegisterHandleAndConsumer(ctx, m.historyMongoCH)
wg.Add(1)
go func() {
defer wg.Done()
m.historyCH.historyConsumerGroup.RegisterHandleAndConsumer(ctx, m.historyCH)
}()
wg.Add(1)
go func() {
defer wg.Done()
m.historyMongoCH.historyConsumerGroup.RegisterHandleAndConsumer(ctx, m.historyMongoCH)
}()
if config.Config.Prometheus.Enable { if config.Config.Prometheus.Enable {
reg := prometheus.NewRegistry() go func() {
reg.MustRegister( proreg := prometheus.NewRegistry()
collectors.NewGoCollector(), proreg.MustRegister(
) collectors.NewGoCollector(),
reg.MustRegister(prommetrics.GetGrpcCusMetrics("Transfer")...) )
http.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg})) proreg.MustRegister(prommetrics.GetGrpcCusMetrics("Transfer")...)
log.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", prometheusPort), nil)) http.Handle("/metrics", promhttp.HandlerFor(proreg, promhttp.HandlerOpts{Registry: proreg}))
err := http.ListenAndServe(fmt.Sprintf(":%d", prometheusPort), nil)
if err != nil && err != http.ErrServerClosed {
panic(err)
}
}()
} }
////////////////////////////////////////
wg.Wait() sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)
<-sigs
// graceful close kafka client.
go m.historyCH.historyConsumerGroup.Close()
go m.historyMongoCH.historyConsumerGroup.Close()
done := make(chan struct{}, 1)
go func() {
wg.Wait()
close(done)
}()
select {
case <-done:
log.ZInfo(context.Background(), "msgtrasfer exit successfully")
case <-time.After(15 * time.Second):
log.ZError(context.Background(), "msgtransfer force to exit, timeout 15s", nil)
}
return nil return nil
} }

@ -19,6 +19,7 @@ import (
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
"sync/atomic"
"time" "time"
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor" "github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
@ -431,16 +432,29 @@ func (och *OnlineHistoryRedisConsumerHandler) ConsumeClaim(
log.ZDebug(context.Background(), "online new session msg come", "highWaterMarkOffset", log.ZDebug(context.Background(), "online new session msg come", "highWaterMarkOffset",
claim.HighWaterMarkOffset(), "topic", claim.Topic(), "partition", claim.Partition()) claim.HighWaterMarkOffset(), "topic", claim.Topic(), "partition", claim.Partition())
split := 1000 var (
rwLock := new(sync.RWMutex) split = 1000
messages := make([]*sarama.ConsumerMessage, 0, 1000) rwLock = new(sync.RWMutex)
ticker := time.NewTicker(time.Millisecond * 100) messages = make([]*sarama.ConsumerMessage, 0, 1000)
ticker = time.NewTicker(time.Millisecond * 100)
wg = sync.WaitGroup{}
running = new(atomic.Bool)
)
wg.Add(1)
go func() { go func() {
defer wg.Done()
for { for {
select { select {
case <-ticker.C: case <-ticker.C:
// if the buffer is empty and running is false, return loop.
if len(messages) == 0 { if len(messages) == 0 {
if !running.Load() {
return
}
continue continue
} }
@ -473,17 +487,35 @@ func (och *OnlineHistoryRedisConsumerHandler) ConsumeClaim(
} }
}() }()
for msg := range claim.Messages() { wg.Add(1)
if len(msg.Value) == 0 { go func() {
continue defer wg.Done()
}
rwLock.Lock() for running.Load() {
messages = append(messages, msg) select {
rwLock.Unlock() case msg, ok := <-claim.Messages():
if !ok {
running.Store(false)
return
}
sess.MarkMessage(msg, "") if len(msg.Value) == 0 {
} continue
}
rwLock.Lock()
messages = append(messages, msg)
rwLock.Unlock()
sess.MarkMessage(msg, "")
case <-sess.Context().Done():
running.Store(false)
return
}
}
}()
wg.Wait()
return nil return nil
} }

@ -80,6 +80,28 @@ func (s *authServer) UserToken(ctx context.Context, req *pbauth.UserTokenReq) (*
return &resp, nil return &resp, nil
} }
func (s *authServer) GetUserToken(ctx context.Context, req *pbauth.GetUserTokenReq) (*pbauth.GetUserTokenResp, error) {
if err := authverify.CheckAdmin(ctx); err != nil {
return nil, err
}
resp := pbauth.GetUserTokenResp{}
if authverify.IsManagerUserID(req.UserID) {
return nil, errs.ErrNoPermission.Wrap("don't get Admin token")
}
if _, err := s.userRpcClient.GetUserInfo(ctx, req.UserID); err != nil {
return nil, err
}
token, err := s.authDatabase.CreateToken(ctx, req.UserID, int(req.PlatformID))
if err != nil {
return nil, err
}
resp.Token = token
resp.ExpireTimeSeconds = config.Config.TokenPolicy.Expire * 24 * 60 * 60
return &resp, nil
}
func (s *authServer) parseToken(ctx context.Context, tokensString string) (claims *tokenverify.Claims, err error) { func (s *authServer) parseToken(ctx context.Context, tokensString string) (claims *tokenverify.Claims, err error) {
claims, err = tokenverify.GetClaimFromToken(tokensString, authverify.Secret()) claims, err = tokenverify.GetClaimFromToken(tokensString, authverify.Secret())
if err != nil { if err != nil {

@ -51,6 +51,11 @@ type conversationServer struct {
conversationNotificationSender *notification.ConversationNotificationSender conversationNotificationSender *notification.ConversationNotificationSender
} }
func (c *conversationServer) GetConversationNotReceiveMessageUserIDs(ctx context.Context, req *pbconversation.GetConversationNotReceiveMessageUserIDsReq) (*pbconversation.GetConversationNotReceiveMessageUserIDsResp, error) {
//TODO implement me
panic("implement me")
}
func Start(client discoveryregistry.SvcDiscoveryRegistry, server *grpc.Server) error { func Start(client discoveryregistry.SvcDiscoveryRegistry, server *grpc.Server) error {
rdb, err := cache.NewRedis() rdb, err := cache.NewRedis()
if err != nil { if err != nil {

@ -108,6 +108,11 @@ type groupServer struct {
msgRpcClient rpcclient.MessageRpcClient msgRpcClient rpcclient.MessageRpcClient
} }
func (s *groupServer) GetJoinedGroupIDs(ctx context.Context, req *pbgroup.GetJoinedGroupIDsReq) (*pbgroup.GetJoinedGroupIDsResp, error) {
//TODO implement me
panic("implement me")
}
func (s *groupServer) NotificationUserInfoUpdate(ctx context.Context, req *pbgroup.NotificationUserInfoUpdateReq) (*pbgroup.NotificationUserInfoUpdateResp, error) { func (s *groupServer) NotificationUserInfoUpdate(ctx context.Context, req *pbgroup.NotificationUserInfoUpdateReq) (*pbgroup.NotificationUserInfoUpdateResp, error) {
defer log.ZDebug(ctx, "NotificationUserInfoUpdate return") defer log.ZDebug(ctx, "NotificationUserInfoUpdate return")
members, err := s.db.FindGroupMemberUser(ctx, nil, req.UserID) members, err := s.db.FindGroupMemberUser(ctx, nil, req.UserID)

@ -61,6 +61,11 @@ type userServer struct {
RegisterCenter registry.SvcDiscoveryRegistry RegisterCenter registry.SvcDiscoveryRegistry
} }
func (s *userServer) GetGroupOnlineUser(ctx context.Context, req *pbuser.GetGroupOnlineUserReq) (*pbuser.GetGroupOnlineUserResp, error) {
//TODO implement me
panic("implement me")
}
func Start(client registry.SvcDiscoveryRegistry, server *grpc.Server) error { func Start(client registry.SvcDiscoveryRegistry, server *grpc.Server) error {
rdb, err := cache.NewRedis() rdb, err := cache.NewRedis()
if err != nil { if err != nil {

@ -48,8 +48,7 @@ func CheckAccessV3(ctx context.Context, ownerUserID string) (err error) {
} }
func IsAppManagerUid(ctx context.Context) bool { func IsAppManagerUid(ctx context.Context) bool {
return (len(config.Config.Manager.UserID) > 0 && utils.IsContain(mcontext.GetOpUserID(ctx), config.Config.Manager.UserID)) || return (len(config.Config.Manager.UserID) > 0 && utils.IsContain(mcontext.GetOpUserID(ctx), config.Config.Manager.UserID)) || utils.IsContain(mcontext.GetOpUserID(ctx), config.Config.IMAdmin.UserID)
utils.IsContain(mcontext.GetOpUserID(ctx), config.Config.IMAdmin.UserID)
} }
func CheckAdmin(ctx context.Context) error { func CheckAdmin(ctx context.Context) error {

@ -16,18 +16,18 @@ package kafka
import ( import (
"context" "context"
"errors"
"github.com/IBM/sarama"
"strings" "strings"
"github.com/OpenIMSDK/tools/errs" "github.com/OpenIMSDK/tools/errs"
"github.com/OpenIMSDK/tools/log" "github.com/OpenIMSDK/tools/log"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/IBM/sarama"
) )
type MConsumerGroup struct { type MConsumerGroup struct {
ctx context.Context
cancel context.CancelFunc
sarama.ConsumerGroup sarama.ConsumerGroup
groupID string groupID string
topics []string topics []string
@ -54,7 +54,10 @@ func NewMConsumerGroup(consumerConfig *MConsumerGroupConfig, topics, addrs []str
if err != nil { if err != nil {
return nil, errs.Wrap(err, strings.Join(topics, ","), strings.Join(addrs, ","), groupID, config.Config.Kafka.Username, config.Config.Kafka.Password) return nil, errs.Wrap(err, strings.Join(topics, ","), strings.Join(addrs, ","), groupID, config.Config.Kafka.Username, config.Config.Kafka.Password)
} }
ctx, cancel := context.WithCancel(context.Background())
return &MConsumerGroup{ return &MConsumerGroup{
ctx, cancel,
consumerGroup, consumerGroup,
groupID, groupID,
topics, topics,
@ -68,7 +71,14 @@ func (mc *MConsumerGroup) GetContextFromMsg(cMsg *sarama.ConsumerMessage) contex
func (mc *MConsumerGroup) RegisterHandleAndConsumer(ctx context.Context, handler sarama.ConsumerGroupHandler) { func (mc *MConsumerGroup) RegisterHandleAndConsumer(ctx context.Context, handler sarama.ConsumerGroupHandler) {
log.ZDebug(context.Background(), "register consumer group", "groupID", mc.groupID) log.ZDebug(context.Background(), "register consumer group", "groupID", mc.groupID)
for { for {
err := mc.ConsumerGroup.Consume(ctx, mc.topics, handler) err := mc.ConsumerGroup.Consume(mc.ctx, mc.topics, handler)
if errors.Is(err, sarama.ErrClosedConsumerGroup) {
return
}
if mc.ctx.Err() != nil {
return
}
if err != nil { if err != nil {
log.ZWarn(ctx, "consume err", err, "topic", mc.topics, "groupID", mc.groupID) log.ZWarn(ctx, "consume err", err, "topic", mc.topics, "groupID", mc.groupID)
} }
@ -77,3 +87,8 @@ func (mc *MConsumerGroup) RegisterHandleAndConsumer(ctx context.Context, handler
} }
} }
} }
func (mc *MConsumerGroup) Close() {
mc.cancel()
mc.ConsumerGroup.Close()
}

@ -23,6 +23,7 @@ import (
"net/http" "net/http"
"os" "os"
"os/signal" "os/signal"
"path/filepath"
"strconv" "strconv"
"sync" "sync"
"syscall" "syscall"
@ -134,10 +135,10 @@ func Start(
sigs := make(chan os.Signal, 1) sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGUSR1) signal.Notify(sigs, syscall.SIGUSR1)
select { select {
case <-sigs: case <-sigs:
print("receive process terminal SIGUSR1 exit\n") progName := filepath.Base(os.Args[0])
print("\n\n%s receive process terminal SIGUSR1 exit 0\n\n", progName)
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
defer cancel() defer cancel()
if err := gracefulStopWithCtx(ctx, srv.GracefulStop); err != nil { if err := gracefulStopWithCtx(ctx, srv.GracefulStop); err != nil {

@ -131,16 +131,24 @@ openim::log::error_exit() {
exit "${code}" exit "${code}"
} }
# Log an error but keep going. Don't dump the stack or exit. # Log an error but keep going. Don't dump the stack or exit.
openim::log::error() { openim::log::error() {
# Define red color
red='\033[0;31m'
# No color (reset)
nc='\033[0m' # No Color
timestamp=$(date +"[%Y-%m-%d %H:%M:%S %Z]") timestamp=$(date +"[%Y-%m-%d %H:%M:%S %Z]")
echo_log "!!! ${timestamp} ${1-}" >&2 # Apply red color for error message
echo_log "${red}!!! ${timestamp} ${1-}${nc}" >&2
shift shift
for message; do for message; do
echo_log " ${message}" >&2 # Apply red color for subsequent lines of the error message
echo_log "${red} ${message}${nc}" >&2
done done
} }
# Print an usage message to stderr. The arguments are printed directly. # Print an usage message to stderr. The arguments are printed directly.
openim::log::usage() { openim::log::usage() {
echo_log >&2 echo_log >&2

@ -486,7 +486,7 @@ openim::util::stop_services_on_ports() {
local pid=$(echo $line | awk '{print $2}') local pid=$(echo $line | awk '{print $2}')
# Try to stop the service by killing its process. # Try to stop the service by killing its process.
if kill -TERM $pid; then if kill -10 $pid; then
stopped+=($port) stopped+=($port)
else else
not_stopped+=($port) not_stopped+=($port)
@ -563,7 +563,7 @@ openim::util::stop_services_with_name() {
# If there's a Process ID, it means the service with the name is running. # If there's a Process ID, it means the service with the name is running.
if [[ -n $pid ]]; then if [[ -n $pid ]]; then
# Try to stop the service by killing its process. # Try to stop the service by killing its process.
if kill -TERM $pid 2>/dev/null; then if kill -10 $pid 2>/dev/null; then
stopped_this_time=true stopped_this_time=true
fi fi
fi fi
@ -1541,12 +1541,8 @@ openim::util::check_ports() {
if [[ "$OSTYPE" == "linux-gnu"* ]]; then if [[ "$OSTYPE" == "linux-gnu"* ]]; then
if command -v ss > /dev/null 2>&1; then if command -v ss > /dev/null 2>&1; then
info=$(ss -ltnp | grep ":$port" || true) info=$(ss -ltnp | grep ":$port" || true)
openim::color::echo $COLOR_RED "!!!!!!!! port=$port"
openim::color::echo $COLOR_RED "!!!!!!!! info=$info"
else else
info=$(netstat -ltnp | grep ":$port" || true) info=$(netstat -ltnp | grep ":$port" || true)
openim::color::echo $COLOR_RED "!!!!!!!! port=$port"
openim::color::echo $COLOR_RED "!!!!!!!! info=$info"
fi fi
elif [[ "$OSTYPE" == "darwin"* ]]; then elif [[ "$OSTYPE" == "darwin"* ]]; then
# For macOS, use lsof # For macOS, use lsof
@ -1726,7 +1722,7 @@ openim::util::stop_services_on_ports() {
local pid=$(echo $line | awk '{print $2}') local pid=$(echo $line | awk '{print $2}')
# Try to stop the service by killing its process. # Try to stop the service by killing its process.
if kill -TERM $pid; then if kill -10 $pid; then
stopped+=($port) stopped+=($port)
else else
not_stopped+=($port) not_stopped+=($port)
@ -1803,7 +1799,7 @@ openim::util::stop_services_with_name() {
# If there's a Process ID, it means the service with the name is running. # If there's a Process ID, it means the service with the name is running.
if [[ -n $pid ]]; then if [[ -n $pid ]]; then
# Try to stop the service by killing its process. # Try to stop the service by killing its process.
if kill -TERM $pid 2>/dev/null; then if kill -10 $pid 2>/dev/null; then
stopped_this_time=true stopped_this_time=true
fi fi
fi fi

@ -82,4 +82,4 @@ execute_scripts
openim::log::info "\n## Post Starting OpenIM services" openim::log::info "\n## Post Starting OpenIM services"
${TOOLS_START_SCRIPTS_PATH} openim::tools::post-start ${TOOLS_START_SCRIPTS_PATH} openim::tools::post-start
openim::log::success "✨ All OpenIM services have been successfully started!" openim::color::echo $COLOR_BLUE "✨ All OpenIM services have been successfully started!"
Loading…
Cancel
Save