Merge branch 'openimsdk:main' into main

pull/2910/head
chao 10 months ago committed by GitHub
commit 8a3d692838
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -0,0 +1,78 @@
name: Release Changelog
on:
release:
types: [released]
permissions:
contents: write
pull-requests: write
jobs:
update-changelog:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Run Go Changelog Generator
run: |
# Run the Go changelog generator, passing the release tag if available
if [ "${{ github.event.release.tag_name }}" = "latest" ]; then
go run tools/changelog/changelog.go > "${{ github.event.release.tag_name }}-changelog.md"
else
go run tools/changelog/changelog.go "${{ github.event.release.tag_name }}" > "${{ github.event.release.tag_name }}-changelog.md"
fi
- name: Handle changelog files
run: |
# Ensure that the CHANGELOG directory exists
mkdir -p CHANGELOG
# Extract Major.Minor version by removing the 'v' prefix from the tag name
TAG_NAME=${{ github.event.release.tag_name }}
CHANGELOG_VERSION_NUMBER=$(echo "$TAG_NAME" | sed 's/^v//' | grep -oP '^\d+\.\d+')
# Define the new changelog file path
CHANGELOG_FILENAME="CHANGELOG-$CHANGELOG_VERSION_NUMBER.md"
CHANGELOG_PATH="CHANGELOG/$CHANGELOG_FILENAME"
# Check if the changelog file for the current release already exists
if [ -f "$CHANGELOG_PATH" ]; then
# If the file exists, append the new changelog to the existing one
cat "$CHANGELOG_PATH" >> "${TAG_NAME}-changelog.md"
# Overwrite the existing changelog with the updated content
mv "${TAG_NAME}-changelog.md" "$CHANGELOG_PATH"
else
# If the changelog file doesn't exist, rename the temp changelog file to the new changelog file
mv "${TAG_NAME}-changelog.md" "$CHANGELOG_PATH"
# Ensure that README.md exists
if [ ! -f "CHANGELOG/README.md" ]; then
echo -e "# CHANGELOGs\n\n" > CHANGELOG/README.md
fi
# Add the new changelog entry at the top of the README.md
if ! grep -q "\[$CHANGELOG_FILENAME\]" CHANGELOG/README.md; then
sed -i "3i- [$CHANGELOG_FILENAME](./$CHANGELOG_FILENAME)" CHANGELOG/README.md
# Remove the extra newline character added by sed
# sed -i '4d' CHANGELOG/README.md
fi
fi
- name: Clean up
run: |
# Remove any temporary files that were created during the process
rm -f "${{ github.event.release.tag_name }}-changelog.md"
- name: Create Pull Request
uses: peter-evans/create-pull-request@v7.0.5
with:
token: ${{ secrets.GITHUB_TOKEN }}
commit-message: "Update CHANGELOG for release ${{ github.event.release.tag_name }}"
title: "Update CHANGELOG for release ${{ github.event.release.tag_name }}"
body: "This PR updates the CHANGELOG files for release ${{ github.event.release.tag_name }}"
branch: changelog-${{ github.event.release.tag_name }}
base: main
delete-branch: true
labels: changelog

@ -149,7 +149,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
strategy: strategy:
matrix: matrix:
go_version: ["1.21"] go_version: ["1.22"]
steps: steps:
- name: Checkout Repository - name: Checkout Repository

@ -13,8 +13,8 @@ prometheus:
ports: [ 12170, 12171, 12172, 12173, 12174, 12175, 12176, 12177, 12178, 12179, 12180, 12182, 12183, 12184, 12185, 12186 ] ports: [ 12170, 12171, 12172, 12173, 12174, 12175, 12176, 12177, 12178, 12179, 12180, 12182, 12183, 12184, 12185, 12186 ]
maxConcurrentWorkers: 3 maxConcurrentWorkers: 3
#Use geTui for offline push notifications, or choose fcm or jpush; corresponding configuration settings must be specified. #Use geTui for offline push notifications, or choose fcm or jpns; corresponding configuration settings must be specified.
enable: geTui enable:
geTui: geTui:
pushUrl: https://restapi.getui.com/v2/$appId pushUrl: https://restapi.getui.com/v2/$appId
masterSecret: masterSecret:

@ -15,15 +15,4 @@ imAdminUserID: [ imAdmin ]
# 1: For Android, iOS, Windows, Mac, and web platforms, only one instance can be online at a time # 1: For Android, iOS, Windows, Mac, and web platforms, only one instance can be online at a time
multiLogin: multiLogin:
policy: 1 policy: 1
maxNumOneEnd: 30 maxNumOneEnd: 30
customizeLoginNum:
ios: 1
android: 1
windows: 1
osx: 1
web: 1
miniWeb: 1
linux: 1
aPad: 1
iPad: 1
admin: 1

@ -10,11 +10,11 @@ require (
github.com/gin-gonic/gin v1.9.1 github.com/gin-gonic/gin v1.9.1
github.com/go-playground/validator/v10 v10.20.0 github.com/go-playground/validator/v10 v10.20.0
github.com/gogo/protobuf v1.3.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.5.0 github.com/golang-jwt/jwt/v4 v4.5.1
github.com/gorilla/websocket v1.5.1 github.com/gorilla/websocket v1.5.1
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
github.com/mitchellh/mapstructure v1.5.0 github.com/mitchellh/mapstructure v1.5.0
github.com/openimsdk/protocol v0.0.72-alpha.54 github.com/openimsdk/protocol v0.0.72-alpha.55
github.com/openimsdk/tools v0.0.50-alpha.32 github.com/openimsdk/tools v0.0.50-alpha.32
github.com/pkg/errors v0.9.1 // indirect github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_golang v1.18.0 github.com/prometheus/client_golang v1.18.0

@ -158,8 +158,8 @@ github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo=
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
@ -319,8 +319,8 @@ github.com/onsi/gomega v1.25.0 h1:Vw7br2PCDYijJHSfBOWhov+8cAnUf8MfMaIOV323l6Y=
github.com/onsi/gomega v1.25.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM= github.com/onsi/gomega v1.25.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM=
github.com/openimsdk/gomake v0.0.14-alpha.5 h1:VY9c5x515lTfmdhhPjMvR3BBRrRquAUCFsz7t7vbv7Y= github.com/openimsdk/gomake v0.0.14-alpha.5 h1:VY9c5x515lTfmdhhPjMvR3BBRrRquAUCFsz7t7vbv7Y=
github.com/openimsdk/gomake v0.0.14-alpha.5/go.mod h1:PndCozNc2IsQIciyn9mvEblYWZwJmAI+06z94EY+csI= github.com/openimsdk/gomake v0.0.14-alpha.5/go.mod h1:PndCozNc2IsQIciyn9mvEblYWZwJmAI+06z94EY+csI=
github.com/openimsdk/protocol v0.0.72-alpha.54 h1:opato7N4QjjRq/SHD54bDSVBpOEEDp1VLWVk5Os2A9s= github.com/openimsdk/protocol v0.0.72-alpha.55 h1:9PPWPHvkFk3neBSbNr+IoOdKIFjxTvEqUfMK/TEq1+8=
github.com/openimsdk/protocol v0.0.72-alpha.54/go.mod h1:OZQA9FR55lseYoN2Ql1XAHYKHJGu7OMNkUbuekrKCM8= github.com/openimsdk/protocol v0.0.72-alpha.55/go.mod h1:OZQA9FR55lseYoN2Ql1XAHYKHJGu7OMNkUbuekrKCM8=
github.com/openimsdk/tools v0.0.50-alpha.32 h1:JEsUFHFnaYg230TG+Ke3SUnaA2h44t4kABAzEdv5VZw= github.com/openimsdk/tools v0.0.50-alpha.32 h1:JEsUFHFnaYg230TG+Ke3SUnaA2h44t4kABAzEdv5VZw=
github.com/openimsdk/tools v0.0.50-alpha.32/go.mod h1:r5U6RbxcR4xhKb2fhTmKGC9Yt5LcErHBVt3lhXQIHSo= github.com/openimsdk/tools v0.0.50-alpha.32/go.mod h1:r5U6RbxcR4xhKb2fhTmKGC9Yt5LcErHBVt3lhXQIHSo=
github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=

@ -74,7 +74,7 @@ func newGinRouter(disCov discovery.SvcDiscoveryRegistry, config *Config) *gin.En
case BestSpeed: case BestSpeed:
r.Use(gzip.Gzip(gzip.BestSpeed)) r.Use(gzip.Gzip(gzip.BestSpeed))
} }
r.Use(prommetricsGin(), gin.Recovery(), mw.CorsHandler(), mw.GinParseOperationID(), GinParseToken(authRpc)) r.Use(prommetricsGin(), gin.RecoveryWithWriter(gin.DefaultErrorWriter, mw.GinPanicErr), mw.CorsHandler(), mw.GinParseOperationID(), GinParseToken(authRpc))
u := NewUserApi(*userRpc) u := NewUserApi(*userRpc)
m := NewMessageApi(messageRpc, userRpc, config.Share.IMAdminUserID) m := NewMessageApi(messageRpc, userRpc, config.Share.IMAdminUserID)
j := jssdk.NewJSSdkApi(userRpc.Client, friendRpc.Client, groupRpc.Client, messageRpc.Client, conversationRpc.Client) j := jssdk.NewJSSdkApi(userRpc.Client, friendRpc.Client, groupRpc.Client, messageRpc.Client, conversationRpc.Client)

@ -18,6 +18,7 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"github.com/openimsdk/tools/mw"
"runtime/debug" "runtime/debug"
"sync" "sync"
"sync/atomic" "sync/atomic"
@ -375,6 +376,11 @@ func (c *Client) writeBinaryMsg(resp Resp) error {
func (c *Client) activeHeartbeat(ctx context.Context) { func (c *Client) activeHeartbeat(ctx context.Context) {
if c.PlatformID == constant.WebPlatformID { if c.PlatformID == constant.WebPlatformID {
go func() { go func() {
defer func() {
if r := recover(); r != nil {
mw.PanicStackToLog(ctx, r)
}
}()
log.ZDebug(ctx, "server initiative send heartbeat start.") log.ZDebug(ctx, "server initiative send heartbeat start.")
ticker := time.NewTicker(pingPeriod) ticker := time.NewTicker(pingPeriod)
defer ticker.Stop() defer ticker.Stop()

@ -136,6 +136,11 @@ func (m *MsgTransfer) Start(index int, config *Config) error {
if config.MsgTransfer.Prometheus.Enable { if config.MsgTransfer.Prometheus.Enable {
go func() { go func() {
defer func() {
if r := recover(); r != nil {
mw.PanicStackToLog(m.ctx, r)
}
}()
prometheusPort, err := datautil.GetElemByIndex(config.MsgTransfer.Prometheus.Ports, index) prometheusPort, err := datautil.GetElemByIndex(config.MsgTransfer.Prometheus.Ports, index)
if err != nil { if err != nil {
netErr = err netErr = err

@ -19,6 +19,7 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics" "github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
"github.com/openimsdk/tools/mw"
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
@ -346,6 +347,12 @@ func (och *OnlineHistoryRedisConsumerHandler) handleNotification(ctx context.Con
} }
} }
func (och *OnlineHistoryRedisConsumerHandler) HandleUserHasReadSeqMessages(ctx context.Context) { func (och *OnlineHistoryRedisConsumerHandler) HandleUserHasReadSeqMessages(ctx context.Context) {
defer func() {
if r := recover(); r != nil {
mw.PanicStackToLog(ctx, r)
}
}()
defer och.wg.Done() defer och.wg.Done()
for msg := range och.conversationUserHasReadChan { for msg := range och.conversationUserHasReadChan {

@ -29,5 +29,6 @@ type Dummy struct {
func (d *Dummy) Push(ctx context.Context, userIDs []string, title, content string, opts *options.Opts) error { func (d *Dummy) Push(ctx context.Context, userIDs []string, title, content string, opts *options.Opts) error {
log.ZDebug(ctx, "dummy push") log.ZDebug(ctx, "dummy push")
log.ZWarn(ctx, "Dummy push", nil, "ps", "The offline push is not configured. To configure it, please go to config/openim-push.yml.")
return nil return nil
} }

@ -23,8 +23,6 @@ import (
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options" "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/mcontext"
"strings" "strings"
) )
@ -51,7 +49,6 @@ func NewOfflinePusher(pushConf *config.Push, cache cache.ThirdCache, fcmConfigPa
offlinePusher = jpush.NewClient(pushConf) offlinePusher = jpush.NewClient(pushConf)
default: default:
offlinePusher = dummy.NewClient() offlinePusher = dummy.NewClient()
log.ZWarn(mcontext.WithMustInfoCtx([]string{"push start", "admin", "admin", ""}), "Unknown push config", nil)
} }
return offlinePusher, nil return offlinePusher, nil
} }

@ -166,17 +166,21 @@ func (c *ConsumerHandler) Push2User(ctx context.Context, userIDs []string, msg *
return nil return nil
} }
} }
offlinePushUserID := []string{msg.RecvID} needOfflinePushUserID := []string{msg.RecvID}
var offlinePushUserID []string
//receiver offline push //receiver offline push
if err = c.webhookBeforeOfflinePush(ctx, &c.config.WebhooksConfig.BeforeOfflinePush, if err = c.webhookBeforeOfflinePush(ctx, &c.config.WebhooksConfig.BeforeOfflinePush, needOfflinePushUserID, msg, &offlinePushUserID); err != nil {
offlinePushUserID, msg, nil); err != nil {
return err return err
} }
log.ZInfo(ctx, "webhookBeforeOfflinePush end") log.ZInfo(ctx, "webhookBeforeOfflinePush end")
err = c.offlinePushMsg(ctx, msg, offlinePushUserID)
if len(offlinePushUserID) > 0 {
needOfflinePushUserID = offlinePushUserID
}
err = c.offlinePushMsg(ctx, msg, needOfflinePushUserID)
if err != nil { if err != nil {
log.ZWarn(ctx, "offlinePushMsg failed", err, "offlinePushUserID", offlinePushUserID, "msg", msg) log.ZWarn(ctx, "offlinePushMsg failed", err, "needOfflinePushUserID", needOfflinePushUserID, "msg", msg)
return nil return nil
} }

@ -16,6 +16,7 @@ package auth
import ( import (
"context" "context"
"errors"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
redis2 "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis" redis2 "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
@ -66,6 +67,7 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
config.Share.Secret, config.Share.Secret,
config.RpcConfig.TokenPolicy.Expire, config.RpcConfig.TokenPolicy.Expire,
config.Share.MultiLogin, config.Share.MultiLogin,
config.Share.IMAdminUserID,
), ),
config: config, config: config,
}) })
@ -129,6 +131,10 @@ func (s *authServer) parseToken(ctx context.Context, tokensString string) (claim
if err != nil { if err != nil {
return nil, errs.Wrap(err) return nil, errs.Wrap(err)
} }
isAdmin := authverify.IsManagerUserID(claims.UserID, s.config.Share.IMAdminUserID)
if isAdmin {
return claims, nil
}
m, err := s.authDatabase.GetTokensWithoutError(ctx, claims.UserID, claims.PlatformID) m, err := s.authDatabase.GetTokensWithoutError(ctx, claims.UserID, claims.PlatformID)
if err != nil { if err != nil {
return nil, err return nil, err
@ -190,7 +196,7 @@ func (s *authServer) forceKickOff(ctx context.Context, userID string, platformID
} }
m, err := s.authDatabase.GetTokensWithoutError(ctx, userID, int(platformID)) m, err := s.authDatabase.GetTokensWithoutError(ctx, userID, int(platformID))
if err != nil && err != redis.Nil { if err != nil && errors.Is(err, redis.Nil) {
return err return err
} }
for k := range m { for k := range m {
@ -208,7 +214,7 @@ func (s *authServer) forceKickOff(ctx context.Context, userID string, platformID
func (s *authServer) InvalidateToken(ctx context.Context, req *pbauth.InvalidateTokenReq) (*pbauth.InvalidateTokenResp, error) { func (s *authServer) InvalidateToken(ctx context.Context, req *pbauth.InvalidateTokenReq) (*pbauth.InvalidateTokenResp, error) {
m, err := s.authDatabase.GetTokensWithoutError(ctx, req.UserID, int(req.PlatformID)) m, err := s.authDatabase.GetTokensWithoutError(ctx, req.UserID, int(req.PlatformID))
if err != nil && err != redis.Nil { if err != nil && errors.Is(err, redis.Nil) {
return nil, err return nil, err
} }
if m == nil { if m == nil {

@ -16,6 +16,7 @@ package msg
import ( import (
"context" "context"
"errors"
cbapi "github.com/openimsdk/open-im-server/v3/pkg/callbackstruct" cbapi "github.com/openimsdk/open-im-server/v3/pkg/callbackstruct"
"github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/constant"
@ -108,7 +109,7 @@ func (m *msgServer) MarkMsgsAsRead(ctx context.Context, req *msg.MarkMsgsAsReadR
return nil, err return nil, err
} }
currentHasReadSeq, err := m.MsgDatabase.GetHasReadSeq(ctx, req.UserID, req.ConversationID) currentHasReadSeq, err := m.MsgDatabase.GetHasReadSeq(ctx, req.UserID, req.ConversationID)
if err != nil && errs.Unwrap(err) != redis.Nil { if err != nil && !errors.Is(err, redis.Nil) {
return nil, err return nil, err
} }
if hasReadSeq > currentHasReadSeq { if hasReadSeq > currentHasReadSeq {
@ -136,7 +137,7 @@ func (m *msgServer) MarkConversationAsRead(ctx context.Context, req *msg.MarkCon
return nil, err return nil, err
} }
hasReadSeq, err := m.MsgDatabase.GetHasReadSeq(ctx, req.UserID, req.ConversationID) hasReadSeq, err := m.MsgDatabase.GetHasReadSeq(ctx, req.UserID, req.ConversationID)
if err != nil && errs.Unwrap(err) != redis.Nil { if err != nil && errors.Is(err, redis.Nil) {
return nil, err return nil, err
} }
var seqs []int64 var seqs []int64
@ -180,14 +181,23 @@ func (m *msgServer) MarkConversationAsRead(ctx context.Context, req *msg.MarkCon
req.UserID, seqs, hasReadSeq) req.UserID, seqs, hasReadSeq)
} }
reqCall := &cbapi.CallbackGroupMsgReadReq{ if conversation.ConversationType == constant.SingleChatType {
SendID: conversation.OwnerUserID, reqCall := &cbapi.CallbackSingleMsgReadReq{
ReceiveID: req.UserID, ConversationID: conversation.ConversationID,
UnreadMsgNum: req.HasReadSeq, UserID: conversation.OwnerUserID,
ContentType: int64(conversation.ConversationType), Seqs: req.Seqs,
ContentType: conversation.ConversationType,
}
m.webhookAfterSingleMsgRead(ctx, &m.config.WebhooksConfig.AfterSingleMsgRead, reqCall)
} else if conversation.ConversationType == constant.ReadGroupChatType {
reqCall := &cbapi.CallbackGroupMsgReadReq{
SendID: conversation.OwnerUserID,
ReceiveID: req.UserID,
UnreadMsgNum: req.HasReadSeq,
ContentType: int64(conversation.ConversationType),
}
m.webhookAfterGroupMsgRead(ctx, &m.config.WebhooksConfig.AfterGroupMsgRead, reqCall)
} }
m.webhookAfterGroupMsgRead(ctx, &m.config.WebhooksConfig.AfterGroupMsgRead, reqCall)
return &msg.MarkConversationAsReadResp{}, nil return &msg.MarkConversationAsReadResp{}, nil
} }

@ -16,6 +16,7 @@ package msg
import ( import (
"context" "context"
"github.com/openimsdk/tools/mw"
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics" "github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor" "github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
@ -83,8 +84,15 @@ func (m *msgServer) sendMsgGroupChat(ctx context.Context, req *pbmsg.SendMsgReq)
} }
func (m *msgServer) setConversationAtInfo(nctx context.Context, msg *sdkws.MsgData) { func (m *msgServer) setConversationAtInfo(nctx context.Context, msg *sdkws.MsgData) {
log.ZDebug(nctx, "setConversationAtInfo", "msg", msg) log.ZDebug(nctx, "setConversationAtInfo", "msg", msg)
defer func() {
if r := recover(); r != nil {
mw.PanicStackToLog(nctx, r)
}
}()
ctx := mcontext.NewCtx("@@@" + mcontext.GetOperationID(nctx)) ctx := mcontext.NewCtx("@@@" + mcontext.GetOperationID(nctx))
var atUserID []string var atUserID []string
@ -171,9 +179,6 @@ func (m *msgServer) sendMsgSingleChat(ctx context.Context, req *pbmsg.SendMsgReq
prommetrics.SingleChatMsgProcessFailedCounter.Inc() prommetrics.SingleChatMsgProcessFailedCounter.Inc()
return nil, nil return nil, nil
} else { } else {
if err = m.webhookBeforeSendSingleMsg(ctx, &m.config.WebhooksConfig.BeforeSendSingleMsg, req); err != nil {
return nil, err
}
if err := m.webhookBeforeMsgModify(ctx, &m.config.WebhooksConfig.BeforeMsgModify, req); err != nil { if err := m.webhookBeforeMsgModify(ctx, &m.config.WebhooksConfig.BeforeMsgModify, req); err != nil {
return nil, err return nil, err
} }

@ -16,15 +16,15 @@ package msg
import ( import (
"context" "context"
"errors"
pbmsg "github.com/openimsdk/protocol/msg" pbmsg "github.com/openimsdk/protocol/msg"
"github.com/openimsdk/tools/errs"
"github.com/redis/go-redis/v9" "github.com/redis/go-redis/v9"
"sort" "sort"
) )
func (m *msgServer) GetConversationMaxSeq(ctx context.Context, req *pbmsg.GetConversationMaxSeqReq) (*pbmsg.GetConversationMaxSeqResp, error) { func (m *msgServer) GetConversationMaxSeq(ctx context.Context, req *pbmsg.GetConversationMaxSeqReq) (*pbmsg.GetConversationMaxSeqResp, error) {
maxSeq, err := m.MsgDatabase.GetMaxSeq(ctx, req.ConversationID) maxSeq, err := m.MsgDatabase.GetMaxSeq(ctx, req.ConversationID)
if err != nil && errs.Unwrap(err) != redis.Nil { if err != nil && !errors.Is(err, redis.Nil) {
return nil, err return nil, err
} }
return &pbmsg.GetConversationMaxSeqResp{MaxSeq: maxSeq}, nil return &pbmsg.GetConversationMaxSeqResp{MaxSeq: maxSeq}, nil

@ -59,6 +59,9 @@ func (m *msgServer) messageVerification(ctx context.Context, data *msg.SendMsgRe
data.MsgData.ContentType >= constant.NotificationBegin { data.MsgData.ContentType >= constant.NotificationBegin {
return nil return nil
} }
if err := m.webhookBeforeSendSingleMsg(ctx, &m.config.WebhooksConfig.BeforeSendSingleMsg, data); err != nil {
return err
}
black, err := m.FriendLocalCache.IsBlack(ctx, data.MsgData.SendID, data.MsgData.RecvID) black, err := m.FriendLocalCache.IsBlack(ctx, data.MsgData.SendID, data.MsgData.RecvID)
if err != nil { if err != nil {
return err return err

@ -369,20 +369,8 @@ type Share struct {
} }
type MultiLogin struct { type MultiLogin struct {
Policy int `mapstructure:"policy"` Policy int `mapstructure:"policy"`
MaxNumOneEnd int `mapstructure:"maxNumOneEnd"` MaxNumOneEnd int `mapstructure:"maxNumOneEnd"`
CustomizeLoginNum struct {
IOS int `mapstructure:"ios"`
Android int `mapstructure:"android"`
Windows int `mapstructure:"windows"`
OSX int `mapstructure:"osx"`
Web int `mapstructure:"web"`
MiniWeb int `mapstructure:"miniWeb"`
Linux int `mapstructure:"linux"`
APad int `mapstructure:"aPad"`
IPad int `mapstructure:"iPad"`
Admin int `mapstructure:"admin"`
} `mapstructure:"customizeLoginNum"`
} }
type RpcRegisterName struct { type RpcRegisterName struct {

@ -38,7 +38,7 @@ const (
func NewConversationRedis(rdb redis.UniversalClient, localCache *config.LocalCache, opts *rockscache.Options, db database.Conversation) cache.ConversationCache { func NewConversationRedis(rdb redis.UniversalClient, localCache *config.LocalCache, opts *rockscache.Options, db database.Conversation) cache.ConversationCache {
batchHandler := NewBatchDeleterRedis(rdb, opts, []string{localCache.Conversation.Topic}) batchHandler := NewBatchDeleterRedis(rdb, opts, []string{localCache.Conversation.Topic})
c := localCache.Conversation c := localCache.Conversation
log.ZDebug(context.Background(), "black local cache init", "Topic", c.Topic, "SlotNum", c.SlotNum, "SlotSize", c.SlotSize, "enable", c.Enable()) log.ZDebug(context.Background(), "conversation local cache init", "Topic", c.Topic, "SlotNum", c.SlotNum, "SlotSize", c.SlotSize, "enable", c.Enable())
return &ConversationRedisCache{ return &ConversationRedisCache{
BatchDeleter: batchHandler, BatchDeleter: batchHandler,
rcClient: rockscache.NewClient(rdb, *opts), rcClient: rockscache.NewClient(rdb, *opts),

@ -25,9 +25,8 @@ type AuthDatabase interface {
} }
type multiLoginConfig struct { type multiLoginConfig struct {
Policy int Policy int
MaxNumOneEnd int MaxNumOneEnd int
CustomizeLoginNum map[int]int
} }
type authDatabase struct { type authDatabase struct {
@ -35,25 +34,16 @@ type authDatabase struct {
accessSecret string accessSecret string
accessExpire int64 accessExpire int64
multiLogin multiLoginConfig multiLogin multiLoginConfig
adminUserIDs []string
} }
func NewAuthDatabase(cache cache.TokenModel, accessSecret string, accessExpire int64, multiLogin config.MultiLogin) AuthDatabase { func NewAuthDatabase(cache cache.TokenModel, accessSecret string, accessExpire int64, multiLogin config.MultiLogin, adminUserIDs []string) AuthDatabase {
return &authDatabase{cache: cache, accessSecret: accessSecret, accessExpire: accessExpire, multiLogin: multiLoginConfig{ return &authDatabase{cache: cache, accessSecret: accessSecret, accessExpire: accessExpire, multiLogin: multiLoginConfig{
Policy: multiLogin.Policy, Policy: multiLogin.Policy,
MaxNumOneEnd: multiLogin.MaxNumOneEnd, MaxNumOneEnd: multiLogin.MaxNumOneEnd,
CustomizeLoginNum: map[int]int{ },
constant.IOSPlatformID: multiLogin.CustomizeLoginNum.IOS, adminUserIDs: adminUserIDs,
constant.AndroidPlatformID: multiLogin.CustomizeLoginNum.Android, }
constant.WindowsPlatformID: multiLogin.CustomizeLoginNum.Windows,
constant.OSXPlatformID: multiLogin.CustomizeLoginNum.OSX,
constant.WebPlatformID: multiLogin.CustomizeLoginNum.Web,
constant.MiniWebPlatformID: multiLogin.CustomizeLoginNum.MiniWeb,
constant.LinuxPlatformID: multiLogin.CustomizeLoginNum.Linux,
constant.AndroidPadPlatformID: multiLogin.CustomizeLoginNum.APad,
constant.IPadPlatformID: multiLogin.CustomizeLoginNum.IPad,
constant.AdminPlatformID: multiLogin.CustomizeLoginNum.Admin,
},
}}
} }
// If the result is empty. // If the result is empty.
@ -90,27 +80,31 @@ func (a *authDatabase) BatchSetTokenMapByUidPid(ctx context.Context, tokens []st
// Create Token. // Create Token.
func (a *authDatabase) CreateToken(ctx context.Context, userID string, platformID int) (string, error) { func (a *authDatabase) CreateToken(ctx context.Context, userID string, platformID int) (string, error) {
tokens, err := a.cache.GetAllTokensWithoutError(ctx, userID) isAdmin := authverify.IsManagerUserID(userID, a.adminUserIDs)
if err != nil { if !isAdmin {
return "", err tokens, err := a.cache.GetAllTokensWithoutError(ctx, userID)
}
deleteTokenKey, kickedTokenKey, err := a.checkToken(ctx, tokens, platformID)
if err != nil {
return "", err
}
if len(deleteTokenKey) != 0 {
err = a.cache.DeleteTokenByUidPid(ctx, userID, platformID, deleteTokenKey)
if err != nil { if err != nil {
return "", err return "", err
} }
}
if len(kickedTokenKey) != 0 { deleteTokenKey, kickedTokenKey, err := a.checkToken(ctx, tokens, platformID)
for _, k := range kickedTokenKey { if err != nil {
err := a.cache.SetTokenFlagEx(ctx, userID, platformID, k, constant.KickedToken) return "", err
}
if len(deleteTokenKey) != 0 {
err = a.cache.DeleteTokenByUidPid(ctx, userID, platformID, deleteTokenKey)
if err != nil { if err != nil {
return "", err return "", err
} }
log.ZDebug(ctx, "kicked token in create token", "token", k) }
if len(kickedTokenKey) != 0 {
for _, k := range kickedTokenKey {
err := a.cache.SetTokenFlagEx(ctx, userID, platformID, k, constant.KickedToken)
if err != nil {
return "", err
}
log.ZDebug(ctx, "kicked token in create token", "token", k)
}
} }
} }
@ -121,9 +115,12 @@ func (a *authDatabase) CreateToken(ctx context.Context, userID string, platformI
return "", errs.WrapMsg(err, "token.SignedString") return "", errs.WrapMsg(err, "token.SignedString")
} }
if err = a.cache.SetTokenFlagEx(ctx, userID, platformID, tokenString, constant.NormalToken); err != nil { if !isAdmin {
return "", err if err = a.cache.SetTokenFlagEx(ctx, userID, platformID, tokenString, constant.NormalToken); err != nil {
return "", err
}
} }
return tokenString, nil return tokenString, nil
} }
@ -226,16 +223,16 @@ func (a *authDatabase) checkToken(ctx context.Context, tokens map[int]map[string
return nil, nil, errs.New("unknown multiLogin policy").Wrap() return nil, nil, errs.New("unknown multiLogin policy").Wrap()
} }
var adminTokenMaxNum = a.multiLogin.MaxNumOneEnd //var adminTokenMaxNum = a.multiLogin.MaxNumOneEnd
if a.multiLogin.Policy == constant.Customize { //if a.multiLogin.Policy == constant.Customize {
adminTokenMaxNum = a.multiLogin.CustomizeLoginNum[constant.AdminPlatformID] // adminTokenMaxNum = a.multiLogin.CustomizeLoginNum[constant.AdminPlatformID]
} //}
l := len(adminToken) //l := len(adminToken)
if platformID == constant.AdminPlatformID { //if platformID == constant.AdminPlatformID {
l++ // l++
} //}
if l > adminTokenMaxNum { //if l > adminTokenMaxNum {
kickToken = append(kickToken, adminToken[:l-adminTokenMaxNum]...) // kickToken = append(kickToken, adminToken[:l-adminTokenMaxNum]...)
} //}
return deleteToken, kickToken, nil return deleteToken, kickToken, nil
} }

@ -372,7 +372,7 @@ func (db *commonMsgDatabase) getMsgBySeqsRange(ctx context.Context, userID strin
// This ensures that their message retrieval starts from the point they joined. // This ensures that their message retrieval starts from the point they joined.
func (db *commonMsgDatabase) GetMsgBySeqsRange(ctx context.Context, userID string, conversationID string, begin, end, num, userMaxSeq int64) (int64, int64, []*sdkws.MsgData, error) { func (db *commonMsgDatabase) GetMsgBySeqsRange(ctx context.Context, userID string, conversationID string, begin, end, num, userMaxSeq int64) (int64, int64, []*sdkws.MsgData, error) {
userMinSeq, err := db.seqUser.GetUserMinSeq(ctx, conversationID, userID) userMinSeq, err := db.seqUser.GetUserMinSeq(ctx, conversationID, userID)
if err != nil && errs.Unwrap(err) != redis.Nil { if err != nil && !errors.Is(err, redis.Nil) {
return 0, 0, nil, err return 0, 0, nil, err
} }
minSeq, err := db.seqConversation.GetMinSeq(ctx, conversationID) minSeq, err := db.seqConversation.GetMinSeq(ctx, conversationID)
@ -490,7 +490,7 @@ func (db *commonMsgDatabase) GetMsgBySeqs(ctx context.Context, userID string, co
} }
successMsgs, failedSeqs, err := db.msg.GetMessagesBySeq(ctx, conversationID, newSeqs) successMsgs, failedSeqs, err := db.msg.GetMessagesBySeq(ctx, conversationID, newSeqs)
if err != nil { if err != nil {
if err != redis.Nil { if errors.Is(err, redis.Nil) {
log.ZError(ctx, "get message from redis exception", err, "failedSeqs", failedSeqs, "conversationID", conversationID) log.ZError(ctx, "get message from redis exception", err, "failedSeqs", failedSeqs, "conversationID", conversationID)
} }
} }

@ -17,12 +17,18 @@ package rpccache
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"github.com/openimsdk/tools/mw"
"github.com/openimsdk/tools/log" "github.com/openimsdk/tools/log"
"github.com/redis/go-redis/v9" "github.com/redis/go-redis/v9"
) )
func subscriberRedisDeleteCache(ctx context.Context, client redis.UniversalClient, channel string, del func(ctx context.Context, key ...string)) { func subscriberRedisDeleteCache(ctx context.Context, client redis.UniversalClient, channel string, del func(ctx context.Context, key ...string)) {
defer func() {
if r := recover(); r != nil {
mw.PanicStackToLog(ctx, r)
}
}()
for message := range client.Subscribe(ctx, channel).Channel() { for message := range client.Subscribe(ctx, channel).Channel() {
log.ZDebug(ctx, "subscriberRedisDeleteCache", "channel", channel, "payload", message.Payload) log.ZDebug(ctx, "subscriberRedisDeleteCache", "channel", channel, "payload", message.Payload)
var keys []string var keys []string

@ -0,0 +1,198 @@
package main
import (
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"regexp"
"strings"
)
// You can specify a tag as a command line argument to generate the changelog for a specific version.
// Example: go run tools/changelog/changelog.go v0.0.33
// If no tag is provided, the latest release will be used.
// Setting repo owner and repo name by generate changelog
const (
repoOwner = "openimsdk"
repoName = "open-im-server"
)
// GitHubRepo struct represents the repo details.
type GitHubRepo struct {
Owner string
Repo string
FullChangelog string
}
// ReleaseData represents the JSON structure for release data.
type ReleaseData struct {
TagName string `json:"tag_name"`
Body string `json:"body"`
HtmlUrl string `json:"html_url"`
Published string `json:"published_at"`
}
// Method to classify and format release notes.
func (g *GitHubRepo) classifyReleaseNotes(body string) map[string][]string {
result := map[string][]string{
"feat": {},
"fix": {},
"chore": {},
"refactor": {},
"build": {},
"other": {},
}
// Regular expression to extract PR number and URL (case insensitive)
rePR := regexp.MustCompile(`(?i)in (https://github\.com/[^\s]+/pull/(\d+))`)
// Split the body into individual lines.
lines := strings.Split(body, "\n")
for _, line := range lines {
// Skip lines that contain "deps: Merge"
if strings.Contains(strings.ToLower(line), "deps: merge #") {
continue
}
// Use a regular expression to extract Full Changelog link and its title (case insensitive).
if strings.Contains(strings.ToLower(line), "**full changelog**") {
matches := regexp.MustCompile(`(?i)\*\*full changelog\*\*: (https://github\.com/[^\s]+/compare/([^\s]+))`).FindStringSubmatch(line)
if len(matches) > 2 {
// Format the Full Changelog link with title
g.FullChangelog = fmt.Sprintf("[%s](%s)", matches[2], matches[1])
}
continue // Skip further processing for this line.
}
if strings.HasPrefix(line, "*") {
var category string
// Use strings.ToLower to make the matching case insensitive
lowerLine := strings.ToLower(line)
// Determine the category based on the prefix (case insensitive).
if strings.HasPrefix(lowerLine, "* feat") {
category = "feat"
} else if strings.HasPrefix(lowerLine, "* fix") {
category = "fix"
} else if strings.HasPrefix(lowerLine, "* chore") {
category = "chore"
} else if strings.HasPrefix(lowerLine, "* refactor") {
category = "refactor"
} else if strings.HasPrefix(lowerLine, "* build") {
category = "build"
} else {
category = "other"
}
// Extract PR number and URL (case insensitive)
matches := rePR.FindStringSubmatch(line)
if len(matches) == 3 {
prURL := matches[1]
prNumber := matches[2]
// Format the line with the PR link and use original content for the final result
formattedLine := fmt.Sprintf("* %s [#%s](%s)", strings.Split(line, " by ")[0][2:], prNumber, prURL)
result[category] = append(result[category], formattedLine)
} else {
// If no PR link is found, just add the line as is
result[category] = append(result[category], line)
}
}
}
return result
}
// Method to generate the final changelog.
func (g *GitHubRepo) generateChangelog(tag, date, htmlURL, body string) string {
sections := g.classifyReleaseNotes(body)
// Convert ISO 8601 date to simpler format (YYYY-MM-DD)
formattedDate := date[:10]
// Changelog header with tag, date, and links.
changelog := fmt.Sprintf("## [%s](%s) \t(%s)\n\n", tag, htmlURL, formattedDate)
if len(sections["feat"]) > 0 {
changelog += "### New Features\n" + strings.Join(sections["feat"], "\n") + "\n\n"
}
if len(sections["fix"]) > 0 {
changelog += "### Bug Fixes\n" + strings.Join(sections["fix"], "\n") + "\n\n"
}
if len(sections["chore"]) > 0 {
changelog += "### Chores\n" + strings.Join(sections["chore"], "\n") + "\n\n"
}
if len(sections["refactor"]) > 0 {
changelog += "### Refactors\n" + strings.Join(sections["refactor"], "\n") + "\n\n"
}
if len(sections["build"]) > 0 {
changelog += "### Builds\n" + strings.Join(sections["build"], "\n") + "\n\n"
}
if len(sections["other"]) > 0 {
changelog += "### Others\n" + strings.Join(sections["other"], "\n") + "\n\n"
}
if g.FullChangelog != "" {
changelog += fmt.Sprintf("**Full Changelog**: %s\n", g.FullChangelog)
}
return changelog
}
// Method to fetch release data from GitHub API.
func (g *GitHubRepo) fetchReleaseData(version string) (*ReleaseData, error) {
var apiURL string
if version == "" {
// Fetch the latest release.
apiURL = fmt.Sprintf("https://api.github.com/repos/%s/%s/releases/latest", g.Owner, g.Repo)
} else {
// Fetch a specific version.
apiURL = fmt.Sprintf("https://api.github.com/repos/%s/%s/releases/tags/%s", g.Owner, g.Repo, version)
}
resp, err := http.Get(apiURL)
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
var releaseData ReleaseData
err = json.Unmarshal(body, &releaseData)
if err != nil {
return nil, err
}
return &releaseData, nil
}
func main() {
repo := &GitHubRepo{Owner: repoOwner, Repo: repoName}
// Get the version from command line arguments, if provided
var version string // Default is use latest
if len(os.Args) > 1 {
version = os.Args[1] // Use the provided version
}
// Fetch release data (either for latest or specific version)
releaseData, err := repo.fetchReleaseData(version)
if err != nil {
fmt.Println("Error fetching release data:", err)
return
}
// Generate and print the formatted changelog
changelog := repo.generateChangelog(releaseData.TagName, releaseData.Published, releaseData.HtmlUrl, releaseData.Body)
fmt.Println(changelog)
}

@ -1,308 +0,0 @@
// Copyright © 2023 OpenIM. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"fmt"
"log"
"os"
"os/exec"
"regexp"
"sort"
"strings"
)
var (
mergeRequest = regexp.MustCompile(`Merge pull request #([\d]+)`)
webconsoleBump = regexp.MustCompile(regexp.QuoteMeta("bump(github.com/openshift/origin-web-console): ") + `([\w]+)`)
upstreamKube = regexp.MustCompile(`^UPSTREAM: (\d+)+:(.+)`)
upstreamRepo = regexp.MustCompile(`^UPSTREAM: ([\w/-]+): (\d+)+:(.+)`)
prefix = regexp.MustCompile(`^[\w-]: `)
assignments = []prefixAssignment{
{"cluster up", "cluster"},
{" pv ", "storage"},
{"haproxy", "router"},
{"router", "router"},
{"route", "route"},
{"authoriz", "auth"},
{"rbac", "auth"},
{"authent", "auth"},
{"reconcil", "auth"},
{"auth", "auth"},
{"role", "auth"},
{" dc ", "deploy"},
{"deployment", "deploy"},
{"rolling", "deploy"},
{"security context constr", "security"},
{"scc", "security"},
{"pipeline", "build"},
{"build", "build"},
{"registry", "registry"},
{"registries", "image"},
{"image", "image"},
{" arp ", "network"},
{" cni ", "network"},
{"egress", "network"},
{"network", "network"},
{"oc ", "cli"},
{"template", "template"},
{"etcd", "server"},
{"pod", "node"},
{"scripts/", "hack"},
{"e2e", "test"},
{"integration", "test"},
{"cluster", "cluster"},
{"master", "server"},
{"packages", "hack"},
{"api", "server"},
}
)
type prefixAssignment struct {
term string
prefix string
}
type commit struct {
short string
parents []string
message string
}
func contains(arr []string, value string) bool {
for _, s := range arr {
if s == value {
return true
}
}
return false
}
func main() {
log.SetFlags(0)
if len(os.Args) != 3 {
log.Fatalf("Must specify two arguments, FROM and TO")
}
from := os.Args[1]
to := os.Args[2]
out, err := exec.Command("git", "log", "--topo-order", "--pretty=tformat:%h %p|%s", "--reverse", fmt.Sprintf("%s..%s", from, to)).CombinedOutput()
if err != nil {
log.Fatal(err)
}
hide := make(map[string]struct{})
var apiChanges []string
var webconsole []string
var commits []commit
var upstreams []commit
var bumps []commit
for _, line := range strings.Split(string(out), "\n") {
if len(strings.TrimSpace(line)) == 0 {
continue
}
parts := strings.SplitN(line, "|", 2)
hashes := strings.Split(parts[0], " ")
c := commit{short: hashes[0], parents: hashes[1:], message: parts[1]}
if strings.HasPrefix(c.message, "UPSTREAM: ") {
hide[c.short] = struct{}{}
upstreams = append(upstreams, c)
}
if strings.HasPrefix(c.message, "bump(") {
hide[c.short] = struct{}{}
bumps = append(bumps, c)
}
if len(c.parents) == 1 {
commits = append(commits, c)
continue
}
matches := mergeRequest.FindStringSubmatch(line)
if len(matches) == 0 {
// this may have been a human pressing the merge button, we'll just record this as a direct push
continue
}
// split the accumulated commits into any that are force merges (assumed to be the initial set due
// to --topo-order) from the PR commits as soon as we see any of our merge parents. Then print
// any of the force merges
var first int
for i := range commits {
first = i
if contains(c.parents, commits[i].short) {
first++
break
}
}
individual := commits[:first]
merged := commits[first:]
for _, commit := range individual {
if len(commit.parents) > 1 {
continue
}
if _, ok := hide[commit.short]; ok {
continue
}
fmt.Printf("force-merge: %s %s\n", commit.message, commit.short)
}
// try to find either the PR title or the first commit title from the merge commit
out, err := exec.Command("git", "show", "--pretty=tformat:%b", c.short).CombinedOutput()
if err != nil {
log.Fatal(err)
}
var message string
para := strings.Split(string(out), "\n\n")
if len(para) > 0 && strings.HasPrefix(para[0], "Automatic merge from submit-queue") {
para = para[1:]
}
// this is no longer necessary with the submit queue in place
if len(para) > 0 && strings.HasPrefix(para[0], "Merged by ") {
para = para[1:]
}
// post submit-queue, the merge bot will add the PR title, which is usually pretty good
if len(para) > 0 {
message = strings.Split(para[0], "\n")[0]
}
if len(message) == 0 && len(merged) > 0 {
message = merged[0].message
}
if len(message) > 0 && len(merged) == 1 && message == merged[0].message {
merged = nil
}
// try to calculate a prefix based on the diff
if len(message) > 0 && !prefix.MatchString(message) {
prefix, ok := findPrefixFor(message, merged)
if ok {
message = prefix + ": " + message
}
}
// github merge
// has api changes
display := fmt.Sprintf("%s [\\#%s](https://github.com/openimsdk/Open-IM-Server/pull/%s)", message, matches[1], matches[1])
if hasFileChanges(c.short, "pkg/apistruct/") {
apiChanges = append(apiChanges, display)
}
var filtered []commit
for _, commit := range merged {
if _, ok := hide[commit.short]; ok {
continue
}
filtered = append(filtered, commit)
}
if len(filtered) > 0 {
fmt.Printf("- %s\n", display)
for _, commit := range filtered {
fmt.Printf(" - %s (%s)\n", commit.message, commit.short)
}
}
// stick the merge commit in at the beginning of the next list so we can anchor the previous parent
commits = []commit{c}
}
// chunk the bumps
var lines []string
for _, commit := range bumps {
if m := webconsoleBump.FindStringSubmatch(commit.message); len(m) > 0 {
webconsole = append(webconsole, m[1])
continue
}
lines = append(lines, commit.message)
}
lines = sortAndUniq(lines)
for _, line := range lines {
fmt.Printf("- %s\n", line)
}
// chunk the upstreams
lines = nil
for _, commit := range upstreams {
lines = append(lines, commit.message)
}
lines = sortAndUniq(lines)
for _, line := range lines {
fmt.Printf("- %s\n", upstreamLinkify(line))
}
if len(webconsole) > 0 {
fmt.Printf("- web: from %s^..%s\n", webconsole[0], webconsole[len(webconsole)-1])
}
for _, apiChange := range apiChanges {
fmt.Printf(" - %s\n", apiChange)
}
}
func findPrefixFor(message string, commits []commit) (string, bool) {
message = strings.ToLower(message)
for _, m := range assignments {
if strings.Contains(message, m.term) {
return m.prefix, true
}
}
for _, c := range commits {
if prefix, ok := findPrefixFor(c.message, nil); ok {
return prefix, ok
}
}
return "", false
}
func hasFileChanges(commit string, prefixes ...string) bool {
out, err := exec.Command("git", "diff", "--name-only", fmt.Sprintf("%s^..%s", commit, commit)).CombinedOutput()
if err != nil {
log.Fatal(err)
}
for _, file := range strings.Split(string(out), "\n") {
for _, prefix := range prefixes {
if strings.HasPrefix(file, prefix) {
return true
}
}
}
return false
}
func sortAndUniq(lines []string) []string {
sort.Strings(lines)
out := make([]string, 0, len(lines))
last := ""
for _, s := range lines {
if last == s {
continue
}
last = s
out = append(out, s)
}
return out
}
func upstreamLinkify(line string) string {
if m := upstreamKube.FindStringSubmatch(line); len(m) > 0 {
return fmt.Sprintf("UPSTREAM: [#%s](https://github.com/openimsdk/open-im-server/pull/%s):%s", m[1], m[1], m[2])
}
if m := upstreamRepo.FindStringSubmatch(line); len(m) > 0 {
return fmt.Sprintf("UPSTREAM: [%s#%s](https://github.com/%s/pull/%s):%s", m[1], m[2], m[1], m[2], m[3])
}
return line
}
Loading…
Cancel
Save