Merge branch 'OpenIMSDK:v2.3.0release' into v2.3.0release

pull/352/head
soasurs 3 years ago committed by GitHub
commit ca394b642f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -1,3 +1,4 @@
USER=root USER=root
PASSWORD=openIM123 PASSWORD=openIM123
MINIO_ENDPOINT=http://127.0.0.1:10005
DATA_DIR=./ DATA_DIR=./

@ -26,8 +26,6 @@ By deployment of the Open-IM-Server on the customer's server, developers can int
- Supports multiple protocols - Supports multiple protocols
## Community ## Community
- Join Slack Community : https://join.slack.com/t/openimcommunity/shared_invite/zt-1jo5m1wm9-ym2qj0LUU5UbO68L6Z1zQg
- 中文官网访问这里:[Open-IM中文开发文档](https://doc.rentsoft.cn/) - 中文官网访问这里:[Open-IM中文开发文档](https://doc.rentsoft.cn/)
## Quick start ## Quick start
@ -36,75 +34,69 @@ By deployment of the Open-IM-Server on the customer's server, developers can int
> Open-IM relies on five open source high-performance components: ETCD, MySQL, MongoDB, Redis, and Kafka. Privatization deployment Before Open-IM-Server, please make sure that the above five components have been installed. If your server does not have the above components, you must first install Missing components. If you have the above components, it is recommended to use them directly. If not, it is recommended to use Docker-compose, no To install dependencies, one-click deployment, faster and more convenient. > Open-IM relies on five open source high-performance components: ETCD, MySQL, MongoDB, Redis, and Kafka. Privatization deployment Before Open-IM-Server, please make sure that the above five components have been installed. If your server does not have the above components, you must first install Missing components. If you have the above components, it is recommended to use them directly. If not, it is recommended to use Docker-compose, no To install dependencies, one-click deployment, faster and more convenient.
#### Source code deployment #### Deploy using Docker
1. Install [Go environment](https://golang.org/doc/install). Make sure Go version is at least 1.15. 1. Install [Go environment](https://golang.org/doc/install). Make sure Go version is at least 1.17
2. Clone the Open-IM project to your server. 2. Clone the Open-IM project to your server
``` ```
git clone https://github.com/OpenIMSDK/Open-IM-Server.git --recursive git clone https://github.com/OpenIMSDK/Open-IM-Server.git --recursive
``` ```
3. Build and start Service. 3. Deploy
1. Shell authorization
```
#cd Open-IM-server/script
chmod +x *.sh
```
2. Execute the build shell 1. Modify env
``` ```
./build_all_service.sh #cd Open-IM-server
USER=root
PASSWORD=openIM123 #Password with more than 8 digits, excluding special characters
ENDPOINT=http://127.0.0.1:10005 #Replace 127.0.0.1 with Internet IP
DATA_DIR=./
``` ```
3. Start service 2. Deploy && Start
``` ```
./start_all.sh chmod +x install_im_server.sh;
./install_im_server.sh;
``` ```
4. Check service 4. Check service
``` ```
./check_all.sh cd script;
./docker_check_service.sh./check_all.sh
``` ```
![OpenIMServersonSystempng](https://github.com/OpenIMSDK/Open-IM-Server/blob/main/docs/Open-IM-Servers-on-System.png) ![OpenIMServersonSystempng](https://github.com/OpenIMSDK/Open-IM-Server/blob/main/docs/Open-IM-Servers-on-System.png)
#### Docker deployment #### Deploy using source code
All images are available at https://hub.docker.com/r/lyt1123/open_im_server 1. Go 1.17 or above。
2. Clone
1. [Install Docker](https://docs.docker.com/install/) 1.13 or above. ```shell
2. [Install Docker Compose](https://docs.docker.com/compose/install/) 1.22 or above.
3. Clone the Open-IM project to your server.
```
git clone https://github.com/OpenIMSDK/Open-IM-Server.git --recursive git clone https://github.com/OpenIMSDK/Open-IM-Server.git --recursive
cd cmd/Open-IM-SDK-Core
git checkout main
``` ```
4. Start docker-compose with one click(Docker automatically pulls all images) 1. Set executable permissions
``` ```shell
cd Open-IM-Server cd ../../script/
docker-compose up -d chmod +x *.sh
``` ```
5. Check service 1. build
``` ```shell
./docker_check_service.sh ./batch_build_all_service.sh
./check_all.sh
``` ```
![OpenIMServersondockerpng](https://github.com/OpenIMSDK/Open-IM-Server/blob/main/docs/Open-IM-Servers-on-docker.png) all services build success
### CONFIGURATION INSTRUCTIONS ### CONFIGURATION INSTRUCTIONS

@ -1 +1 @@
Subproject commit 1667b0f4e205fc4ed7c690ab55b662087d61c277 Subproject commit e731cb86ec9314a0b30b4f8331d53854c2c9d858

@ -1,13 +1,24 @@
.PHONY: all build run gotool install clean help .PHONY: all build run gotool install clean help
BINARY_NAME=open_im_api NAME=open_im_api
BIN_DIR=../../bin/ BIN_DIR=../../bin/
OS:= $(or $(os),linux)
ARCH:=$(or $(arch),amd64)
all: gotool build all: gotool build
ifeq ($(OS),windows)
BINARY_NAME=${NAME}.exe
else
BINARY_NAME=${NAME}
endif
build: build:
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags="-w -s" CGO_ENABLED=0 GOOS=${OS} GOARCH=${ARCH} go build -ldflags="-w -s"
run: run:
@go run ./ @go run ./
@ -16,8 +27,7 @@ gotool:
go fmt ./ go fmt ./
go vet ./ go vet ./
install: install:build
make build
mv ${BINARY_NAME} ${BIN_DIR} mv ${BINARY_NAME} ${BIN_DIR}
clean: clean:

@ -15,6 +15,7 @@ import (
"Open_IM/internal/api/user" "Open_IM/internal/api/user"
"Open_IM/pkg/common/config" "Open_IM/pkg/common/config"
"Open_IM/pkg/common/log" "Open_IM/pkg/common/log"
"Open_IM/pkg/grpc-etcdv3/getcdv3"
"Open_IM/pkg/utils" "Open_IM/pkg/utils"
"flag" "flag"
"fmt" "fmt"
@ -110,6 +111,8 @@ func main() {
groupRouterGroup.POST("/get_group_all_member_list", group.GetGroupAllMemberList) //1 groupRouterGroup.POST("/get_group_all_member_list", group.GetGroupAllMemberList) //1
groupRouterGroup.POST("/get_group_members_info", group.GetGroupMembersInfo) //1 groupRouterGroup.POST("/get_group_members_info", group.GetGroupMembersInfo) //1
groupRouterGroup.POST("/invite_user_to_group", group.InviteUserToGroup) //1 groupRouterGroup.POST("/invite_user_to_group", group.InviteUserToGroup) //1
//only for supergroup
groupRouterGroup.POST("/invite_user_to_groups", group.InviteUserToGroups)
groupRouterGroup.POST("/get_joined_group_list", group.GetJoinedGroupList) groupRouterGroup.POST("/get_joined_group_list", group.GetJoinedGroupList)
groupRouterGroup.POST("/dismiss_group", group.DismissGroup) // groupRouterGroup.POST("/dismiss_group", group.DismissGroup) //
groupRouterGroup.POST("/mute_group_member", group.MuteGroupMember) groupRouterGroup.POST("/mute_group_member", group.MuteGroupMember)
@ -162,6 +165,11 @@ func main() {
chatGroup.POST("/batch_send_msg", manage.ManagementBatchSendMsg) chatGroup.POST("/batch_send_msg", manage.ManagementBatchSendMsg)
chatGroup.POST("/check_msg_is_send_success", manage.CheckMsgIsSendSuccess) chatGroup.POST("/check_msg_is_send_success", manage.CheckMsgIsSendSuccess)
chatGroup.POST("/set_msg_min_seq", apiChat.SetMsgMinSeq) chatGroup.POST("/set_msg_min_seq", apiChat.SetMsgMinSeq)
chatGroup.POST("/set_message_reaction_extensions", apiChat.SetMessageReactionExtensions)
chatGroup.POST("/get_message_list_reaction_extensions", apiChat.GetMessageListReactionExtensions)
chatGroup.POST("/add_message_reaction_extensions", apiChat.AddMessageReactionExtensions)
chatGroup.POST("/delete_message_reaction_extensions", apiChat.DeleteMessageReactionExtensions)
} }
//Conversation //Conversation
conversationGroup := r.Group("/conversation") conversationGroup := r.Group("/conversation")
@ -169,8 +177,10 @@ func main() {
conversationGroup.POST("/get_all_conversations", conversation.GetAllConversations) conversationGroup.POST("/get_all_conversations", conversation.GetAllConversations)
conversationGroup.POST("/get_conversation", conversation.GetConversation) conversationGroup.POST("/get_conversation", conversation.GetConversation)
conversationGroup.POST("/get_conversations", conversation.GetConversations) conversationGroup.POST("/get_conversations", conversation.GetConversations)
//deprecated
conversationGroup.POST("/set_conversation", conversation.SetConversation) conversationGroup.POST("/set_conversation", conversation.SetConversation)
conversationGroup.POST("/batch_set_conversation", conversation.BatchSetConversations) conversationGroup.POST("/batch_set_conversation", conversation.BatchSetConversations)
//deprecated
conversationGroup.POST("/set_recv_msg_opt", conversation.SetRecvMsgOpt) conversationGroup.POST("/set_recv_msg_opt", conversation.SetRecvMsgOpt)
conversationGroup.POST("/modify_conversation_field", conversation.ModifyConversationField) conversationGroup.POST("/modify_conversation_field", conversation.ModifyConversationField)
} }
@ -222,7 +232,7 @@ func main() {
initGroup.POST("/set_client_config", clientInit.SetClientInitConfig) initGroup.POST("/set_client_config", clientInit.SetClientInitConfig)
initGroup.POST("/get_client_config", clientInit.GetClientInitConfig) initGroup.POST("/get_client_config", clientInit.GetClientInitConfig)
} }
go getcdv3.RegisterConf()
go apiThird.MinioInit() go apiThird.MinioInit()
defaultPorts := config.Config.Api.GinPort defaultPorts := config.Config.Api.GinPort
ginPort := flag.Int("port", defaultPorts[0], "get ginServerPort from cmd,default 10002 as port") ginPort := flag.Int("port", defaultPorts[0], "get ginServerPort from cmd,default 10002 as port")
@ -231,7 +241,7 @@ func main() {
if config.Config.Api.ListenIP != "" { if config.Config.Api.ListenIP != "" {
address = config.Config.Api.ListenIP + ":" + strconv.Itoa(*ginPort) address = config.Config.Api.ListenIP + ":" + strconv.Itoa(*ginPort)
} }
fmt.Println("start api server, address: ", address, "OpenIM version: ", constant.CurrentVersion, "\n") fmt.Println("start api server, address: ", address, ", OpenIM version: ", constant.CurrentVersion)
err := r.Run(address) err := r.Run(address)
if err != nil { if err != nil {
log.Error("", "api run failed ", address, err.Error()) log.Error("", "api run failed ", address, err.Error())

@ -1,13 +1,25 @@
.PHONY: all build run gotool install clean help .PHONY: all build run gotool install clean help
BINARY_NAME=open_im_cms_api NAME=open_im_cms_api
BIN_DIR=../../bin/ BIN_DIR=../../bin/
OS:= $(or $(os),linux)
ARCH:=$(or $(arch),amd64)
all: gotool build all: gotool build
ifeq ($(OS),windows)
BINARY_NAME=${NAME}.exe
else
BINARY_NAME=${NAME}
endif
build: build:
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags="-w -s" CGO_ENABLED=0 GOOS=${OS} GOARCH=${ARCH} go build -ldflags="-w -s"
run: run:
@go run ./ @go run ./
@ -16,8 +28,7 @@ gotool:
go fmt ./ go fmt ./
go vet ./ go vet ./
install: install:build
make build
mv ${BINARY_NAME} ${BIN_DIR} mv ${BINARY_NAME} ${BIN_DIR}
clean: clean:

@ -25,6 +25,6 @@ func main() {
address = config.Config.Api.ListenIP + ":" + strconv.Itoa(*ginPort) address = config.Config.Api.ListenIP + ":" + strconv.Itoa(*ginPort)
} }
address = config.Config.CmsApi.ListenIP + ":" + strconv.Itoa(*ginPort) address = config.Config.CmsApi.ListenIP + ":" + strconv.Itoa(*ginPort)
fmt.Println("start cms api server, address: ", address, "OpenIM version: ", constant.CurrentVersion, "\n") fmt.Println("start cms api server, address: ", address, ", OpenIM version: ", constant.CurrentVersion, "\n")
router.Run(address) router.Run(address)
} }

@ -1,12 +1,24 @@
.PHONY: all build run gotool install clean help .PHONY: all build run gotool install clean help
BINARY_NAME=open_im_cron_task NAME=open_im_cron_task
BIN_DIR=../../bin/ BIN_DIR=../../bin/
OS:= $(or $(os),linux)
ARCH:=$(or $(arch),amd64)
all: gotool build all: gotool build
ifeq ($(OS),windows)
BINARY_NAME=${NAME}.exe
else
BINARY_NAME=${NAME}
endif
build: build:
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags="-w -s" CGO_ENABLED=0 GOOS=${OS} GOARCH=${ARCH} go build -ldflags="-w -s"
run: run:
@go run ./ @go run ./
@ -15,8 +27,7 @@ gotool:
go fmt ./ go fmt ./
go vet ./ go vet ./
install: install:build
make build
mv ${BINARY_NAME} ${BIN_DIR} mv ${BINARY_NAME} ${BIN_DIR}
clean: clean:

@ -2,10 +2,15 @@ package main
import ( import (
"Open_IM/internal/cron_task" "Open_IM/internal/cron_task"
"flag"
"fmt" "fmt"
"time"
) )
func main() { func main() {
fmt.Println("start cronTask") var userID = flag.String("userID", "", "userID to clear msg and reset seq")
cronTask.StartCronTask() var workingGroupID = flag.String("workingGroupID", "", "workingGroupID to clear msg and reset seq")
flag.Parse()
fmt.Println(time.Now(), "start cronTask", *userID, *workingGroupID)
cronTask.StartCronTask(*userID, *workingGroupID)
} }

@ -1,13 +1,25 @@
.PHONY: all build run gotool install clean help .PHONY: all build run gotool install clean help
BINARY_NAME=open_im_demo NAME=open_im_demo
BIN_DIR=../../bin/ BIN_DIR=../../bin/
OS:= $(or $(os),linux)
ARCH:=$(or $(arch),amd64)
all: gotool build all: gotool build
ifeq ($(OS),windows)
BINARY_NAME=${NAME}.exe
else
BINARY_NAME=${NAME}
endif
build: build:
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags="-w -s" CGO_ENABLED=0 GOOS=${OS} GOARCH=${ARCH} go build -ldflags="-w -s"
run: run:
@go run ./ @go run ./
@ -16,8 +28,7 @@ gotool:
go fmt ./ go fmt ./
go vet ./ go vet ./
install: install:build
make build
mv ${BINARY_NAME} ${BIN_DIR} mv ${BINARY_NAME} ${BIN_DIR}
clean: clean:

@ -70,7 +70,7 @@ func main() {
address = config.Config.Api.ListenIP + ":" + strconv.Itoa(*ginPort) address = config.Config.Api.ListenIP + ":" + strconv.Itoa(*ginPort)
} }
address = config.Config.CmsApi.ListenIP + ":" + strconv.Itoa(*ginPort) address = config.Config.CmsApi.ListenIP + ":" + strconv.Itoa(*ginPort)
fmt.Println("start demo api server address: ", address, "OpenIM version: ", constant.CurrentVersion, "\n") fmt.Println("start demo api server address: ", address, ", OpenIM version: ", constant.CurrentVersion, "\n")
go register.OnboardingProcessRoutine() go register.OnboardingProcessRoutine()
go register.ImportFriendRoutine() go register.ImportFriendRoutine()
err := r.Run(address) err := r.Run(address)

@ -1,12 +1,24 @@
.PHONY: all build run gotool install clean help .PHONY: all build run gotool install clean help
BINARY_NAME=open_im_msg_gateway NAME=open_im_msg_gateway
BIN_DIR=../../bin/ BIN_DIR=../../bin/
OS:= $(or $(os),linux)
ARCH:=$(or $(arch),amd64)
all: gotool build all: gotool build
ifeq ($(OS),windows)
BINARY_NAME=${NAME}.exe
else
BINARY_NAME=${NAME}
endif
build: build:
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags="-w -s" CGO_ENABLED=0 GOOS=${OS} GOARCH=${ARCH} go build -ldflags="-w -s"
run: run:
@go run ./ @go run ./
@ -15,8 +27,7 @@ gotool:
go fmt ./ go fmt ./
go vet ./ go vet ./
install: install:build
make build
mv ${BINARY_NAME} ${BIN_DIR} mv ${BINARY_NAME} ${BIN_DIR}
clean: clean:

@ -21,7 +21,7 @@ func main() {
flag.Parse() flag.Parse()
var wg sync.WaitGroup var wg sync.WaitGroup
wg.Add(1) wg.Add(1)
fmt.Println("start rpc/msg_gateway server, port: ", *rpcPort, *wsPort, *prometheusPort, "OpenIM version: ", constant.CurrentVersion, "\n") fmt.Println("start rpc/msg_gateway server, port: ", *rpcPort, *wsPort, *prometheusPort, ", OpenIM version: ", constant.CurrentVersion, "\n")
gate.Init(*rpcPort, *wsPort) gate.Init(*rpcPort, *wsPort)
gate.Run(*prometheusPort) gate.Run(*prometheusPort)
wg.Wait() wg.Wait()

@ -1,13 +1,25 @@
.PHONY: all build run gotool install clean help .PHONY: all build run gotool install clean help
BINARY_NAME=open_im_msg_transfer NAME=open_im_msg_transfer
BIN_DIR=../../bin/ BIN_DIR=../../bin/
OS:= $(or $(os),linux)
ARCH:=$(or $(arch),amd64)
all: gotool build all: gotool build
ifeq ($(OS),windows)
BINARY_NAME=${NAME}.exe
else
BINARY_NAME=${NAME}
endif
build: build:
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags="-w -s" CGO_ENABLED=0 GOOS=${OS} GOARCH=${ARCH} go build -ldflags="-w -s"
run: run:
@go run ./ @go run ./
@ -16,8 +28,7 @@ gotool:
go fmt ./ go fmt ./
go vet ./ go vet ./
install: install:build
make build
mv ${BINARY_NAME} ${BIN_DIR} mv ${BINARY_NAME} ${BIN_DIR}
clean: clean:

@ -17,7 +17,7 @@ func main() {
flag.Parse() flag.Parse()
log.NewPrivateLog(constant.LogFileName) log.NewPrivateLog(constant.LogFileName)
logic.Init() logic.Init()
fmt.Println("start msg_transfer server ", "OpenIM version: ", constant.CurrentVersion, "\n") fmt.Println("start msg_transfer server ", ", OpenIM version: ", constant.CurrentVersion, "\n")
logic.Run(*prometheusPort) logic.Run(*prometheusPort)
wg.Wait() wg.Wait()
} }

@ -1,12 +1,24 @@
.PHONY: all build run gotool install clean help .PHONY: all build run gotool install clean help
BINARY_NAME=open_im_push NAME=open_im_push
BIN_DIR=../../bin/ BIN_DIR=../../bin/
OS:= $(or $(os),linux)
ARCH:=$(or $(arch),amd64)
all: gotool build all: gotool build
ifeq ($(OS),windows)
BINARY_NAME=${NAME}.exe
else
BINARY_NAME=${NAME}
endif
build: build:
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags="-w -s" CGO_ENABLED=0 GOOS=${OS} GOARCH=${ARCH} go build -ldflags="-w -s"
run: run:
@go run ./ @go run ./
@ -15,8 +27,7 @@ gotool:
go fmt ./ go fmt ./
go vet ./ go vet ./
install: install:build
make build
mv ${BINARY_NAME} ${BIN_DIR} mv ${BINARY_NAME} ${BIN_DIR}
clean: clean:

@ -18,7 +18,7 @@ func main() {
var wg sync.WaitGroup var wg sync.WaitGroup
wg.Add(1) wg.Add(1)
log.NewPrivateLog(constant.LogFileName) log.NewPrivateLog(constant.LogFileName)
fmt.Println("start push rpc server, port: ", *rpcPort, "OpenIM version: ", constant.CurrentVersion, "\n") fmt.Println("start push rpc server, port: ", *rpcPort, ", OpenIM version: ", constant.CurrentVersion, "\n")
logic.Init(*rpcPort) logic.Init(*rpcPort)
logic.Run(*prometheusPort) logic.Run(*prometheusPort)
wg.Wait() wg.Wait()

@ -1,12 +1,24 @@
.PHONY: all build run gotool install clean help .PHONY: all build run gotool install clean help
BINARY_NAME=open_im_admin_cms NAME=open_im_admin_cms
BIN_DIR=../../../bin/ BIN_DIR=../../../bin/
OS:= $(or $(os),linux)
ARCH:=$(or $(arch),amd64)
all: gotool build all: gotool build
ifeq ($(OS),windows)
BINARY_NAME=${NAME}.exe
else
BINARY_NAME=${NAME}
endif
build: build:
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags="-w -s" CGO_ENABLED=0 GOOS=${OS} GOARCH=${ARCH} go build -ldflags="-w -s"
run: run:
@go run ./ @go run ./
@ -15,8 +27,7 @@ gotool:
go fmt ./ go fmt ./
go vet ./ go vet ./
install: install:build
make build
mv ${BINARY_NAME} ${BIN_DIR} mv ${BINARY_NAME} ${BIN_DIR}
clean: clean:

@ -14,7 +14,7 @@ func main() {
rpcPort := flag.Int("port", defaultPorts[0], "rpc listening port") rpcPort := flag.Int("port", defaultPorts[0], "rpc listening port")
prometheusPort := flag.Int("prometheus_port", config.Config.Prometheus.AdminCmsPrometheusPort[0], "adminCMSPrometheusPort default listen port") prometheusPort := flag.Int("prometheus_port", config.Config.Prometheus.AdminCmsPrometheusPort[0], "adminCMSPrometheusPort default listen port")
flag.Parse() flag.Parse()
fmt.Println("start cms rpc server, port: ", *rpcPort, "OpenIM version: ", constant.CurrentVersion, "\n") fmt.Println("start cms rpc server, port: ", *rpcPort, ", OpenIM version: ", constant.CurrentVersion, "\n")
rpcServer := rpcMessageCMS.NewAdminCMSServer(*rpcPort) rpcServer := rpcMessageCMS.NewAdminCMSServer(*rpcPort)
go func() { go func() {
err := promePkg.StartPromeSrv(*prometheusPort) err := promePkg.StartPromeSrv(*prometheusPort)

@ -1,12 +1,24 @@
.PHONY: all build run gotool install clean help .PHONY: all build run gotool install clean help
BINARY_NAME=open_im_auth NAME=open_im_auth
BIN_DIR=../../../bin/ BIN_DIR=../../../bin/
OS:= $(or $(os),linux)
ARCH:=$(or $(arch),amd64)
all: gotool build all: gotool build
ifeq ($(OS),windows)
BINARY_NAME=${NAME}.exe
else
BINARY_NAME=${NAME}
endif
build: build:
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags="-w -s" CGO_ENABLED=0 GOOS=${OS} GOARCH=${ARCH} go build -ldflags="-w -s"
run: run:
@go run ./ @go run ./
@ -15,8 +27,7 @@ gotool:
go fmt ./ go fmt ./
go vet ./ go vet ./
install: install:build
make build
mv ${BINARY_NAME} ${BIN_DIR} mv ${BINARY_NAME} ${BIN_DIR}
clean: clean:

@ -14,7 +14,7 @@ func main() {
rpcPort := flag.Int("port", defaultPorts[0], "RpcToken default listen port 10800") rpcPort := flag.Int("port", defaultPorts[0], "RpcToken default listen port 10800")
prometheusPort := flag.Int("prometheus_port", config.Config.Prometheus.AuthPrometheusPort[0], "authPrometheusPort default listen port") prometheusPort := flag.Int("prometheus_port", config.Config.Prometheus.AuthPrometheusPort[0], "authPrometheusPort default listen port")
flag.Parse() flag.Parse()
fmt.Println("start auth rpc server, port: ", *rpcPort, "OpenIM version: ", constant.CurrentVersion, "\n") fmt.Println("start auth rpc server, port: ", *rpcPort, ", OpenIM version: ", constant.CurrentVersion, "\n")
rpcServer := rpcAuth.NewRpcAuthServer(*rpcPort) rpcServer := rpcAuth.NewRpcAuthServer(*rpcPort)
go func() { go func() {
err := promePkg.StartPromeSrv(*prometheusPort) err := promePkg.StartPromeSrv(*prometheusPort)

@ -1,12 +1,24 @@
.PHONY: all build run gotool install clean help .PHONY: all build run gotool install clean help
BINARY_NAME=open_im_cache NAME=open_im_cache
BIN_DIR=../../../bin/ BIN_DIR=../../../bin/
OS:= $(or $(os),linux)
ARCH:=$(or $(arch),amd64)
all: gotool build all: gotool build
ifeq ($(OS),windows)
BINARY_NAME=${NAME}.exe
else
BINARY_NAME=${NAME}
endif
build: build:
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags="-w -s" CGO_ENABLED=0 GOOS=${OS} GOARCH=${ARCH} go build -ldflags="-w -s"
run: run:
@go run ./ @go run ./
@ -15,8 +27,7 @@ gotool:
go fmt ./ go fmt ./
go vet ./ go vet ./
install: install:build
make build
mv ${BINARY_NAME} ${BIN_DIR} mv ${BINARY_NAME} ${BIN_DIR}
clean: clean:

@ -15,7 +15,7 @@ func main() {
rpcPort := flag.Int("port", defaultPorts[0], "RpcToken default listen port 10800") rpcPort := flag.Int("port", defaultPorts[0], "RpcToken default listen port 10800")
prometheusPort := flag.Int("prometheus_port", config.Config.Prometheus.CachePrometheusPort[0], "cachePrometheusPort default listen port") prometheusPort := flag.Int("prometheus_port", config.Config.Prometheus.CachePrometheusPort[0], "cachePrometheusPort default listen port")
flag.Parse() flag.Parse()
fmt.Println("start cache rpc server, port: ", *rpcPort, "OpenIM version: ", constant.CurrentVersion, "\n") fmt.Println("start cache rpc server, port: ", *rpcPort, ", OpenIM version: ", constant.CurrentVersion, "\n")
rpcServer := rpcCache.NewCacheServer(*rpcPort) rpcServer := rpcCache.NewCacheServer(*rpcPort)
go func() { go func() {
err := promePkg.StartPromeSrv(*prometheusPort) err := promePkg.StartPromeSrv(*prometheusPort)

@ -1,12 +1,24 @@
.PHONY: all build run gotool install clean help .PHONY: all build run gotool install clean help
BINARY_NAME=open_im_conversation NAME=open_im_conversation
BIN_DIR=../../../bin/ BIN_DIR=../../../bin/
OS:= $(or $(os),linux)
ARCH:=$(or $(arch),amd64)
all: gotool build all: gotool build
ifeq ($(OS),windows)
BINARY_NAME=${NAME}.exe
else
BINARY_NAME=${NAME}
endif
build: build:
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags="-w -s" CGO_ENABLED=0 GOOS=${OS} GOARCH=${ARCH} go build -ldflags="-w -s"
run: run:
@go run ./ @go run ./
@ -15,8 +27,7 @@ gotool:
go fmt ./ go fmt ./
go vet ./ go vet ./
install: install:build
make build
mv ${BINARY_NAME} ${BIN_DIR} mv ${BINARY_NAME} ${BIN_DIR}
clean: clean:

@ -14,7 +14,7 @@ func main() {
rpcPort := flag.Int("port", defaultPorts[0], "RpcConversation default listen port 11300") rpcPort := flag.Int("port", defaultPorts[0], "RpcConversation default listen port 11300")
prometheusPort := flag.Int("prometheus_port", config.Config.Prometheus.ConversationPrometheusPort[0], "conversationPrometheusPort default listen port") prometheusPort := flag.Int("prometheus_port", config.Config.Prometheus.ConversationPrometheusPort[0], "conversationPrometheusPort default listen port")
flag.Parse() flag.Parse()
fmt.Println("start conversation rpc server, port: ", *rpcPort, "OpenIM version: ", constant.CurrentVersion, "\n") fmt.Println("start conversation rpc server, port: ", *rpcPort, ", OpenIM version: ", constant.CurrentVersion)
rpcServer := rpcConversation.NewRpcConversationServer(*rpcPort) rpcServer := rpcConversation.NewRpcConversationServer(*rpcPort)
go func() { go func() {
err := promePkg.StartPromeSrv(*prometheusPort) err := promePkg.StartPromeSrv(*prometheusPort)

@ -1,12 +1,25 @@
.PHONY: all build run gotool install clean help .PHONY: all build run gotool install clean help
BINARY_NAME=open_im_friend NAME=open_im_friend
BIN_DIR=../../../bin/ BIN_DIR=../../../bin/
OS:= $(or $(os),linux)
ARCH:=$(or $(arch),amd64)
all: gotool build all: gotool build
ifeq ($(OS),windows)
BINARY_NAME=${NAME}.exe
else
BINARY_NAME=${NAME}
endif
build: build:
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags="-w -s" CGO_ENABLED=0 GOOS=${OS} GOARCH=${ARCH} go build -ldflags="-w -s"
run: run:
@go run ./ @go run ./
@ -15,8 +28,7 @@ gotool:
go fmt ./ go fmt ./
go vet ./ go vet ./
install: install:build
make build
mv ${BINARY_NAME} ${BIN_DIR} mv ${BINARY_NAME} ${BIN_DIR}
clean: clean:

@ -14,7 +14,7 @@ func main() {
rpcPort := flag.Int("port", defaultPorts[0], "get RpcFriendPort from cmd,default 12000 as port") rpcPort := flag.Int("port", defaultPorts[0], "get RpcFriendPort from cmd,default 12000 as port")
prometheusPort := flag.Int("prometheus_port", config.Config.Prometheus.FriendPrometheusPort[0], "friendPrometheusPort default listen port") prometheusPort := flag.Int("prometheus_port", config.Config.Prometheus.FriendPrometheusPort[0], "friendPrometheusPort default listen port")
flag.Parse() flag.Parse()
fmt.Println("start friend rpc server, port: ", *rpcPort, "OpenIM version: ", constant.CurrentVersion, "\n") fmt.Println("start friend rpc server, port: ", *rpcPort, ", OpenIM version: ", constant.CurrentVersion, "\n")
rpcServer := friend.NewFriendServer(*rpcPort) rpcServer := friend.NewFriendServer(*rpcPort)
go func() { go func() {
err := promePkg.StartPromeSrv(*prometheusPort) err := promePkg.StartPromeSrv(*prometheusPort)

@ -1,12 +1,25 @@
.PHONY: all build run gotool install clean help .PHONY: all build run gotool install clean help
BINARY_NAME=open_im_group NAME=open_im_group
BIN_DIR=../../../bin/ BIN_DIR=../../../bin/
OS:= $(or $(os),linux)
ARCH:=$(or $(arch),amd64)
all: gotool build all: gotool build
ifeq ($(OS),windows)
BINARY_NAME=${NAME}.exe
else
BINARY_NAME=${NAME}
endif
build: build:
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags="-w -s" CGO_ENABLED=0 GOOS=${OS} GOARCH=${ARCH} go build -ldflags="-w -s"
run: run:
@go run ./ @go run ./
@ -15,8 +28,7 @@ gotool:
go fmt ./ go fmt ./
go vet ./ go vet ./
install: install:build
make build
mv ${BINARY_NAME} ${BIN_DIR} mv ${BINARY_NAME} ${BIN_DIR}
clean: clean:

@ -14,7 +14,7 @@ func main() {
rpcPort := flag.Int("port", defaultPorts[0], "get RpcGroupPort from cmd,default 16000 as port") rpcPort := flag.Int("port", defaultPorts[0], "get RpcGroupPort from cmd,default 16000 as port")
prometheusPort := flag.Int("prometheus_port", config.Config.Prometheus.GroupPrometheusPort[0], "groupPrometheusPort default listen port") prometheusPort := flag.Int("prometheus_port", config.Config.Prometheus.GroupPrometheusPort[0], "groupPrometheusPort default listen port")
flag.Parse() flag.Parse()
fmt.Println("start group rpc server, port: ", *rpcPort, "OpenIM version: ", constant.CurrentVersion, "\n") fmt.Println("start group rpc server, port: ", *rpcPort, ", OpenIM version: ", constant.CurrentVersion, "\n")
rpcServer := group.NewGroupServer(*rpcPort) rpcServer := group.NewGroupServer(*rpcPort)
go func() { go func() {
err := promePkg.StartPromeSrv(*prometheusPort) err := promePkg.StartPromeSrv(*prometheusPort)

@ -1,12 +1,24 @@
.PHONY: all build run gotool install clean help .PHONY: all build run gotool install clean help
BINARY_NAME=open_im_msg NAME=open_im_msg
BIN_DIR=../../../bin/ BIN_DIR=../../../bin/
OS:= $(or $(os),linux)
ARCH:=$(or $(arch),amd64)
all: gotool build all: gotool build
ifeq ($(OS),windows)
BINARY_NAME=${NAME}.exe
else
BINARY_NAME=${NAME}
endif
build: build:
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags="-w -s" CGO_ENABLED=0 GOOS=${OS} GOARCH=${ARCH} go build -ldflags="-w -s"
run: run:
@go run ./ @go run ./
@ -15,8 +27,7 @@ gotool:
go fmt ./ go fmt ./
go vet ./ go vet ./
install: install:build
make build
mv ${BINARY_NAME} ${BIN_DIR} mv ${BINARY_NAME} ${BIN_DIR}
clean: clean:

@ -14,7 +14,7 @@ func main() {
rpcPort := flag.Int("port", defaultPorts[0], "rpc listening port") rpcPort := flag.Int("port", defaultPorts[0], "rpc listening port")
prometheusPort := flag.Int("prometheus_port", config.Config.Prometheus.MessagePrometheusPort[0], "msgPrometheusPort default listen port") prometheusPort := flag.Int("prometheus_port", config.Config.Prometheus.MessagePrometheusPort[0], "msgPrometheusPort default listen port")
flag.Parse() flag.Parse()
fmt.Println("start msg rpc server, port: ", *rpcPort, "OpenIM version: ", constant.CurrentVersion, "\n") fmt.Println("start msg rpc server, port: ", *rpcPort, ", OpenIM version: ", constant.CurrentVersion, "\n")
rpcServer := msg.NewRpcChatServer(*rpcPort) rpcServer := msg.NewRpcChatServer(*rpcPort)
go func() { go func() {
err := promePkg.StartPromeSrv(*prometheusPort) err := promePkg.StartPromeSrv(*prometheusPort)

@ -1,12 +1,24 @@
.PHONY: all build run gotool install clean help .PHONY: all build run gotool install clean help
BINARY_NAME=open_im_office NAME=open_im_office
BIN_DIR=../../../bin/ BIN_DIR=../../../bin/
OS:= $(or $(os),linux)
ARCH:=$(or $(arch),amd64)
all: gotool build all: gotool build
ifeq ($(OS),windows)
BINARY_NAME=${NAME}.exe
else
BINARY_NAME=${NAME}
endif
build: build:
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags="-w -s" CGO_ENABLED=0 GOOS=${OS} GOARCH=${ARCH} go build -ldflags="-w -s"
run: run:
@go run ./ @go run ./
@ -15,8 +27,7 @@ gotool:
go fmt ./ go fmt ./
go vet ./ go vet ./
install: install:build
make build
mv ${BINARY_NAME} ${BIN_DIR} mv ${BINARY_NAME} ${BIN_DIR}
clean: clean:

@ -14,7 +14,7 @@ func main() {
rpcPort := flag.Int("port", defaultPorts[0], "rpc listening port") rpcPort := flag.Int("port", defaultPorts[0], "rpc listening port")
prometheusPort := flag.Int("prometheus_port", config.Config.Prometheus.OfficePrometheusPort[0], "officePrometheusPort default listen port") prometheusPort := flag.Int("prometheus_port", config.Config.Prometheus.OfficePrometheusPort[0], "officePrometheusPort default listen port")
flag.Parse() flag.Parse()
fmt.Println("start office rpc server, port: ", *rpcPort, "OpenIM version: ", constant.CurrentVersion, "\n") fmt.Println("start office rpc server, port: ", *rpcPort, ", OpenIM version: ", constant.CurrentVersion, "\n")
rpcServer := rpc.NewOfficeServer(*rpcPort) rpcServer := rpc.NewOfficeServer(*rpcPort)
go func() { go func() {
err := promePkg.StartPromeSrv(*prometheusPort) err := promePkg.StartPromeSrv(*prometheusPort)

@ -1,12 +1,24 @@
.PHONY: all build run gotool install clean help .PHONY: all build run gotool install clean help
BINARY_NAME=open_im_organization NAME=open_im_organization
BIN_DIR=../../../bin/ BIN_DIR=../../../bin/
OS:= $(or $(os),linux)
ARCH:=$(or $(arch),amd64)
all: gotool build all: gotool build
ifeq ($(OS),windows)
BINARY_NAME=${NAME}.exe
else
BINARY_NAME=${NAME}
endif
build: build:
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags="-w -s" CGO_ENABLED=0 GOOS=${OS} GOARCH=${ARCH} go build -ldflags="-w -s"
run: run:
@go run ./ @go run ./
@ -15,8 +27,7 @@ gotool:
go fmt ./ go fmt ./
go vet ./ go vet ./
install: install:build
make build
mv ${BINARY_NAME} ${BIN_DIR} mv ${BINARY_NAME} ${BIN_DIR}
clean: clean:

@ -14,7 +14,7 @@ func main() {
rpcPort := flag.Int("port", defaultPorts[0], "get RpcOrganizationPort from cmd,default 11200 as port") rpcPort := flag.Int("port", defaultPorts[0], "get RpcOrganizationPort from cmd,default 11200 as port")
prometheusPort := flag.Int("prometheus_port", config.Config.Prometheus.OrganizationPrometheusPort[0], "organizationPrometheusPort default listen port") prometheusPort := flag.Int("prometheus_port", config.Config.Prometheus.OrganizationPrometheusPort[0], "organizationPrometheusPort default listen port")
flag.Parse() flag.Parse()
fmt.Println("start organization rpc server, port: ", *rpcPort, "OpenIM version: ", constant.CurrentVersion, "\n") fmt.Println("start organization rpc server, port: ", *rpcPort, ", OpenIM version: ", constant.CurrentVersion, "\n")
rpcServer := organization.NewServer(*rpcPort) rpcServer := organization.NewServer(*rpcPort)
go func() { go func() {
err := promePkg.StartPromeSrv(*prometheusPort) err := promePkg.StartPromeSrv(*prometheusPort)

@ -1,12 +1,24 @@
.PHONY: all build run gotool install clean help .PHONY: all build run gotool install clean help
BINARY_NAME=open_im_user NAME=open_im_user
BIN_DIR=../../../bin/ BIN_DIR=../../../bin/
OS:= $(or $(os),linux)
ARCH:=$(or $(arch),amd64)
all: gotool build all: gotool build
ifeq ($(OS),windows)
BINARY_NAME=${NAME}.exe
else
BINARY_NAME=${NAME}
endif
build: build:
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags="-w -s" CGO_ENABLED=0 GOOS=${OS} GOARCH=${ARCH} go build -ldflags="-w -s"
run: run:
@go run ./ @go run ./
@ -15,8 +27,7 @@ gotool:
go fmt ./ go fmt ./
go vet ./ go vet ./
install: install:build
make build
mv ${BINARY_NAME} ${BIN_DIR} mv ${BINARY_NAME} ${BIN_DIR}
clean: clean:

@ -14,7 +14,7 @@ func main() {
rpcPort := flag.Int("port", defaultPorts[0], "rpc listening port") rpcPort := flag.Int("port", defaultPorts[0], "rpc listening port")
prometheusPort := flag.Int("prometheus_port", config.Config.Prometheus.UserPrometheusPort[0], "userPrometheusPort default listen port") prometheusPort := flag.Int("prometheus_port", config.Config.Prometheus.UserPrometheusPort[0], "userPrometheusPort default listen port")
flag.Parse() flag.Parse()
fmt.Println("start user rpc server, port: ", *rpcPort, "OpenIM version: ", constant.CurrentVersion, "\n") fmt.Println("start user rpc server, port: ", *rpcPort, ", OpenIM version: ", constant.CurrentVersion, "\n")
rpcServer := user.NewUserServer(*rpcPort) rpcServer := user.NewUserServer(*rpcPort)
go func() { go func() {
err := promePkg.StartPromeSrv(*prometheusPort) err := promePkg.StartPromeSrv(*prometheusPort)

@ -9,6 +9,7 @@ etcd:
etcdAddr: [ 127.0.0.1:2379 ] #单机部署时,默认即可 etcdAddr: [ 127.0.0.1:2379 ] #单机部署时,默认即可
userName: userName:
password: password:
secret:
mysql: mysql:
dbMysqlAddress: [ 127.0.0.1:13306 ] #mysql地址 目前仅支持单机,默认即可 dbMysqlAddress: [ 127.0.0.1:13306 ] #mysql地址 目前仅支持单机,默认即可
@ -35,7 +36,7 @@ mongo:
dbPassword: #mongo密码建议先不设置 dbPassword: #mongo密码建议先不设置
dbMaxPoolSize: 100 dbMaxPoolSize: 100
dbRetainChatRecords: 3650 #mongo保存离线消息时间根据需求修改 dbRetainChatRecords: 3650 #mongo保存离线消息时间根据需求修改
chatRecordsClearTime: "* * * * *" # 每天凌晨3点清除消息该配置和linux定时任务一样 清理操作建议设置在用户活跃少的时候 # 0 3 * * * chatRecordsClearTime: "0 3 * * *" # 每天凌晨3点清除消息该配置和linux定时任务一样 清理操作建议设置在用户活跃少的时候 # 0 3 * * *
redis: redis:
dbAddress: [ 127.0.0.1:16379 ] #redis地址 单机时填写一个地址即可使用redis集群时候填写集群中多个节点地址主从地址都可以填写增加容灾能力默认即可 dbAddress: [ 127.0.0.1:16379 ] #redis地址 单机时填写一个地址即可使用redis集群时候填写集群中多个节点地址主从地址都可以填写增加容灾能力默认即可
@ -61,12 +62,15 @@ kafka:
ms2pschat: ms2pschat:
addr: [ 127.0.0.1:9092 ] #kafka配置默认即可 addr: [ 127.0.0.1:9092 ] #kafka配置默认即可
topic: "ms2ps_chat" #消息push topic: "ms2ps_chat" #消息push
msgtomodify:
addr: [ 127.0.0.1:9092 ] #kafka配置默认即可
topic: "msg_to_modify"
consumergroupid: consumergroupid:
msgToTransfer: mongo msgToTransfer: mongo
msgToMongo: mongo_ex msgToMongo: mongo_ex
msgToMySql: mysql msgToMySql: mysql
msgToPush: push msgToPush: push
msgToModify: modify
#---------------Internal service configuration---------------------# #---------------Internal service configuration---------------------#
@ -111,16 +115,16 @@ sdk:
#对象存储服务以下配置二选一目前支持两种腾讯云和minio二者配置好其中一种即可如果使用minio参考https://doc.rentsoft.cn/#/qa/minio搭建minio服务器 #对象存储服务以下配置二选一目前支持两种腾讯云和minio二者配置好其中一种即可如果使用minio参考https://doc.rentsoft.cn/#/qa/minio搭建minio服务器
credential: #腾讯cos发送图片、视频、文件时需要请自行申请后替换必须修改 credential: #腾讯cos发送图片、视频、文件时需要请自行申请后替换必须修改
tencent: tencent:
appID: 1302656840 appID:
region: ap-chengdu region:
bucket: echat-1302656840 bucket:
secretID: AKIDGNYVChzIQinu7QEgtNp0hnNgqcV8vZTC1 secretID:
secretKey: kz15vW83qM6dBUWIq681eBZA0c0vlIbe1 secretKey:
minio: #MinIO 发送图片、视频、文件时需要,请自行申请后替换,必须修改。 客户端初始化InitSDK中 object_storage参数为minio minio: #MinIO 发送图片、视频、文件时需要,请自行申请后替换,必须修改。 客户端初始化InitSDK中 object_storage参数为minio
bucket: openim # 存储内容桶 bucket: openim # 存储内容桶
appBucket: app # 存储app的桶 appBucket: app # 存储app的桶
location: us-east-1 location: us-east-1
endpoint: http://127.0.0.1:10005 #minio外网ip 这个ip是给客户端访问的 endpoint: #minio外网ip 这个ip是给客户端访问的
endpointInner: http://127.0.0.1:10005 #minio内网地址 如果im server 可以通过内网访问到 minio就可以 endpointInner: http://127.0.0.1:10005 #minio内网地址 如果im server 可以通过内网访问到 minio就可以
endpointInnerEnable: true #是否启用minio内网地址 启用可以让桶初始化IM server连接minio走内网地址访问 endpointInnerEnable: true #是否启用minio内网地址 启用可以让桶初始化IM server连接minio走内网地址访问
accessKeyID: accessKeyID:
@ -128,24 +132,24 @@ credential: #腾讯cos发送图片、视频、文件时需要请自行申
storageTime: 50 #文件在minio中保存的时间 storageTime: 50 #文件在minio中保存的时间
isDistributedMod: false # 是否分布式多硬盘部署 默认docker-compose中为false isDistributedMod: false # 是否分布式多硬盘部署 默认docker-compose中为false
ali: # ali oss ali: # ali oss
regionID: "oss-cn-beijing" regionID:
accessKeyID: "" accessKeyID:
accessKeySecret: "" accessKeySecret:
stsEndpoint: "sts.cn-beijing.aliyun.com" stsEndpoint:
ossEndpoint: "oss-cn-beijing.aliyuncs.com" ossEndpoint:
bucket: "bucket1" bucket:
finalHost: "http://bucket1.oss-cn-beijing.aliyuncs.com" finalHost:
stsDurationSeconds: 3600 stsDurationSeconds:
OssRoleArn: "acs:ram::xxx:role/xxx" OssRoleArn:
aws: aws:
accessKeyID: 1 #AssumeRole用户关联的accessKeyID accessKeyID: #AssumeRole用户关联的accessKeyID
accessKeySecret: 2 #AssumeRole用户关联的accessKeySecrect accessKeySecret: #AssumeRole用户关联的accessKeySecrect
region: ap-southeast-1 #分区 region: #分区
bucket: ouyang #桶 bucket: #桶
finalHost: ouyang.s3.ap-southeast-1.amazonaws.com #对外Host finalHost: #对外Host
roleArn: arn:aws:iam::192209831083:role/AWS_S3_FOR_OUYANG #RoleArn roleArn: #RoleArn
externalId: AssumeRoleExtend #角色扩展Id externalId: #角色扩展Id
roleSessionName: Required-AWS-ID-OPENIM #角色SESSION名称 roleSessionName: #角色SESSION名称
rpcport: #rpc服务端口 默认即可 rpcport: #rpc服务端口 默认即可
@ -212,20 +216,20 @@ longconnsvr:
push: push:
tpns: #腾讯推送,暂未测试 暂不要使用 tpns: #腾讯推送,暂未测试 暂不要使用
ios: ios:
accessID: 1600018281 accessID:
secretKey: 3cd68a77a95b89e5089a1aca523f318f secretKey:
android: android:
accessID: 111 accessID:
secretKey: 111 secretKey:
enable: false # true or false (bool) enable: false # true or false (bool)
jpns: #极光推送 在极光后台申请后,修改以下四项,必须修改 jpns: #极光推送 在极光后台申请后,修改以下四项,必须修改
appKey: cf47465a368f24c659608e7e appKey:
masterSecret: 02204efe3f3832947a236ee5 masterSecret:
pushUrl: "https://api.jpush.cn/v3/push" pushUrl:
pushIntent: "intent:#Intent;component=io.openim.app.enterprisechat/io.openim.app.enterprisechat.MainActivity;end" pushIntent:
enable: false # true or false (bool) enable: false # true or false (bool)
getui: #个推推送 getui: #个推推送
pushUrl: "https://restapi.getui.com/v2/$appId" pushUrl:
masterSecret: "" masterSecret: ""
appKey: "" appKey: ""
intent: "" intent: ""
@ -233,13 +237,13 @@ push:
channelID: "" channelID: ""
channelName: "" channelName: ""
fcm: #firebase cloud message 消息推送 fcm: #firebase cloud message 消息推送
serviceAccount: "openim-5c6c0-firebase-adminsdk-ppwol-8765884a78.json" #帐号文件,此处需要改修配置,并且这个文件放在 config目录下 serviceAccount: #帐号文件,此处需要改修配置,并且这个文件放在 config目录下
enable: false enable: false
mob: #袤博推送 mob: #袤博推送
appKey: "3377f689a25" #帐号文件,此处需要改修配置,并且这个文件放在 config目录下 appKey: #帐号文件,此处需要改修配置,并且这个文件放在 config目录下
pushUrl: "https://api.push.mob.com/v3/push/createPush" pushUrl:
scheme: "dianzhijiaunilinks://dianzhijia.com?page=rent" scheme:
appSecret: "77b4e20e94db3a776b87d8693be23e" appSecret:
enable: false enable: false
@ -307,6 +311,9 @@ callback:
callbackAfterSendGroupMsg: callbackAfterSendGroupMsg:
enable: false enable: false
callbackTimeOut: 2 callbackTimeOut: 2
callbackAfterConsumeGroupMsg:
enable: false
callbackTimeOut: 2
callbackMsgModify: callbackMsgModify:
enable: false enable: false
callbackTimeOut: 2 callbackTimeOut: 2
@ -340,6 +347,19 @@ callback:
enable: false enable: false
callbackTimeOut: 2 callbackTimeOut: 2
callbackFailedContinue: true # 回调超时是否继续 callbackFailedContinue: true # 回调超时是否继续
callbackBeforeMemberJoinGroup:
enable: false
callbackTimeOut: 2
callbackFailedContinue: true # 回调超时是否继续
callbackBeforeSetGroupMemberInfo:
enable: false
callbackTimeOut: 2
callbackFailedContinue: true # 回调超时是否继续
callbackSetMessageReactionExtensions:
enable: false
callbackTimeOut: 2
callbackFailedContinue: true # 回调超时是否继续
notification: notification:
groupCreated: groupCreated:
@ -660,6 +680,19 @@ notification:
defaultTips: defaultTips:
tips: "Remove a blocked user" tips: "Remove a blocked user"
friendInfoUpdated:
conversation:
reliabilityLevel: 2
unreadCount: false
offlinePush:
switch: true
title: "friend info updated"
desc: "friend info updated"
ext: "friend info updated"
defaultTips:
tips: "friend info updated"
#####################user######################### #####################user#########################
userInfoUpdated: userInfoUpdated:
conversation: conversation:
@ -735,18 +768,18 @@ demo:
#demo对外服务端口默认即可需要开放此端口或做nginx转发 #demo对外服务端口默认即可需要开放此端口或做nginx转发
openImDemoPort: [ 10004 ] openImDemoPort: [ 10004 ]
alismsverify: #阿里云短信配置在阿里云申请成功后修改以下四项enable为true则必须修改阿里云为默认短信验证方式 alismsverify: #阿里云短信配置在阿里云申请成功后修改以下四项enable为true则必须修改阿里云为默认短信验证方式
accessKeyId: LTAI5tJPkn4HuuePdiLdGqe7 accessKeyId:
accessKeySecret: 4n9OJ7ZCVN1U6KeHDAtOyNeVZcjOuV accessKeySecret:
signName: 托云信息技术 signName:
verificationCodeTemplateCode: SMS_226810164 verificationCodeTemplateCode:
enable: false enable: false
tencentsms: #腾讯云短信配置在腾讯云申请成功后修改以下选项enable为true则必须修改 tencentsms: #腾讯云短信配置在腾讯云申请成功后修改以下选项enable为true则必须修改
appID: 2400000648 appID:
region: "ap-singapore" region:
secretID: IKIDra4JPGsFMDwQedMq42lESQBgwwgBQQAe secretID:
secretKey: HI6fz4uUotjJdiX6QUIrAE2buxlKdgU2 secretKey:
signName: "" signName:
verificationCodeTemplateCode: 2902877 verificationCodeTemplateCode:
enable: true enable: true
superCode: 666666 #超级验证码,建议修改掉,收不到短信验证码时可以用此替代 superCode: 666666 #超级验证码,建议修改掉,收不到短信验证码时可以用此替代
needInvitationCode: false needInvitationCode: false
@ -754,11 +787,11 @@ demo:
codeTTL: 60 codeTTL: 60
useSuperCode: true useSuperCode: true
mail: #仅支持qq邮箱具体操作参考 https://service.mail.qq.com/cgi-bin/help?subtype=1&id=28&no=1001256 必须修改 mail: #仅支持qq邮箱具体操作参考 https://service.mail.qq.com/cgi-bin/help?subtype=1&id=28&no=1001256 必须修改
title: "openIM" title:
senderMail: "765567899@qq.com" senderMail:
senderAuthorizationCode: "gxyausfoevlzbfag" senderAuthorizationCode:
smtpAddr: "smtp.qq.com" smtpAddr:
smtpPort: 25 #需开放此端口 出口方向 smtpPort: #需开放此端口 出口方向
testDepartMentID: 001 testDepartMentID: 001
imAPIURL: http://127.0.0.1:10002 imAPIURL: http://127.0.0.1:10002
onboardProcess: false # 是否开启注册流程 onboardProcess: false # 是否开启注册流程

@ -1,18 +1,19 @@
etcd: etcd:
userName: userName:
password: password:
secret: openIM123 # etcd 配置密钥
mysql: mysql:
dbMysqlUserName: root #mysql用户名,建议修改 dbMysqlUserName: root #mysql用户名
dbMysqlPassword: openIM123 # mysql密码,建议修改 dbMysqlPassword: openIM123 # mysql密码
mongo: mongo:
dbUserName: root #mongo用户名,建议先不设置 dbUserName: root #mongo用户名
dbPassword: openIM123 #mongo密码,建议先不设置 dbPassword: openIM123 #mongo密码
redis: redis:
dbUserName: dbUserName:
dbPassWord: openIM123 #redis密码 建议修改 dbPassWord: openIM123 #redis密码
kafka: kafka:
SASLUserName: SASLUserName:
@ -20,22 +21,23 @@ kafka:
credential: credential:
minio: minio:
endpoint: 127.0.0.1:10005 #发图片视频文件需要填写 endpoint: http://127.0.0.1:10005 #发图片视频文件需要填写
accessKeyID: root accessKeyID: root
secretAccessKey: openIM123 secretAccessKey: openIM123
secret: tuoyun secret: tuoyun #建议修改
tokenpolicy: tokenpolicy:
accessSecret: "open_im_server" #token生成相关,默认即可 accessSecret: "open_im_server" #token生成相关 建议修改
accessExpire: 90 #token过期时间 默认即可 accessExpire: 90 #token过期时间 默认即可
messageverify: messageverify:
friendVerify: false friendVerify: false #发送消息是否验证好友关系 false表示不验证好友关系
push: push:
getui: #个推推送 getui:
masterSecret: "" pushUrl: "https://restapi.getui.com/v2/$appId"
appKey: "" masterSecret: "" #需添加
enable: false appKey: "" #需添加
enable: false #true启动个推推送

@ -79,11 +79,11 @@ cmsapi:
credential: credential:
tencent: tencent:
appID: 1302656840 appID:
region: ap-chengdu region:
bucket: echat-1302656840 bucket:
secretID: AKIDGNYVChzIQinu7QEgtNp0hnNgqcV8vZTC secretID:
secretKey: kz15vW83qM6dBUWIq681eBZA0c0vlIbe secretKey:
rpcport: rpcport:
@ -131,16 +131,16 @@ longconnsvr:
push: push:
tpns: tpns:
ios: ios:
accessID: 1600018281 accessID:
secretKey: 3cd68a77a95b89e5089a1aca523f318f secretKey:
android: android:
accessID: 111 accessID:
secretKey: 111 secretKey:
jpns: jpns:
appKey: cf47465a368f24c659608e7e appKey:
masterSecret: 02204efe3f3832947a236ee5 masterSecret:
pushUrl: "https://api.jpush.cn/v3/push" pushUrl:
pushIntent: "intent:#Intent;component=io.openim.app.enterprisechat/io.openim.app.enterprisechat.MainActivity;end" pushIntent:
manager: manager:
appManagerUid: ["openIM123456","openIM654321"] appManagerUid: ["openIM123456","openIM654321"]
secrets: ["openIM1","openIM2"] secrets: ["openIM1","openIM2"]
@ -168,16 +168,16 @@ demoswitch: true
demo: demo:
openImDemoPort: [ 42233 ] openImDemoPort: [ 42233 ]
alismsverify: alismsverify:
accessKeyId: LTAI5tJPkn4HuuePdiLdGqe71 accessKeyId:
accessKeySecret: 4n9OJ7ZCVN1U6KeHDAtOyNeVZcjOuV1 accessKeySecret:
signName: OpenIM Corporation signName:
verificationCodeTemplateCode: SMS_2268101641 verificationCodeTemplateCode:
superCode: 666666 superCode: 666666
mail: mail:
title: "openIM" title:
senderMail: "1765567899@qq.com" senderMail:
senderAuthorizationCode: "1gxyausfoevlzbfag" senderAuthorizationCode:
smtpAddr: "smtp.qq.com" smtpAddr:
smtpPort: 25 smtpPort:

@ -15,7 +15,7 @@ spec:
spec: spec:
containers: containers:
- name: admin-cms - name: admin-cms
image: openim/admin_cms:v2.3.4 image: openim/admin_cms:v2.3.8
# imagePullPolicy: Always #每次启动都重新拉取镜像 # imagePullPolicy: Always #每次启动都重新拉取镜像
ports: ports:
- containerPort: 10200 - containerPort: 10200
@ -26,10 +26,17 @@ spec:
env: env:
- name: CONFIG_NAME - name: CONFIG_NAME
value: "/Open-IM-Server" value: "/Open-IM-Server"
- name: USUAL_CONFIG_NAME
value: "/Open-IM-Server"
volumes: volumes:
- name: config - name: config
configMap: projected:
sources:
- configMap:
name: openim-config name: openim-config
- configMap:
name: openim-usualconfig
strategy: #更新策略 strategy: #更新策略
type: RollingUpdate # 滚动更新 type: RollingUpdate # 滚动更新

@ -15,7 +15,7 @@ spec:
spec: spec:
containers: containers:
- name: api - name: api
image: openim/api:v2.3.4 image: openim/api:v2.3.8
# imagePullPolicy: Always # imagePullPolicy: Always
ports: ports:
- containerPort: 10002 - containerPort: 10002
@ -26,10 +26,16 @@ spec:
env: env:
- name: CONFIG_NAME - name: CONFIG_NAME
value: "/Open-IM-Server" value: "/Open-IM-Server"
- name: USUAL_CONFIG_NAME
value: "/Open-IM-Server"
volumes: volumes:
- name: config - name: config
configMap: projected:
sources:
- configMap:
name: openim-config name: openim-config
- configMap:
name: openim-usualconfig
strategy: #更新策略 strategy: #更新策略
type: RollingUpdate # 滚动更新 type: RollingUpdate # 滚动更新
--- ---

@ -15,7 +15,7 @@ spec:
spec: spec:
containers: containers:
- name: auth - name: auth
image: openim/auth:v2.3.4 image: openim/auth:v2.3.8
# imagePullPolicy: Always # imagePullPolicy: Always
ports: ports:
- containerPort: 10160 - containerPort: 10160
@ -26,9 +26,15 @@ spec:
env: env:
- name: CONFIG_NAME - name: CONFIG_NAME
value: "/Open-IM-Server" value: "/Open-IM-Server"
- name: USUAL_CONFIG_NAME
value: "/Open-IM-Server"
volumes: volumes:
- name: config - name: config
configMap: projected:
sources:
- configMap:
name: openim-config name: openim-config
- configMap:
name: openim-usualconfig
strategy: #更新策略 strategy: #更新策略
type: RollingUpdate # 滚动更新 type: RollingUpdate # 滚动更新

@ -2,7 +2,7 @@
source ./path_info.cfg source ./path_info.cfg
# images version # images version
version=v2.3.4 version=v2.3.8
git pull git pull
cd ../script/; ./build_all_service.sh cd ../script/; ./build_all_service.sh
cd ../deploy_k8s/ cd ../deploy_k8s/

@ -15,7 +15,7 @@ spec:
spec: spec:
containers: containers:
- name: cache - name: cache
image: openim/cache:v2.3.4 image: openim/cache:v2.3.8
# imagePullPolicy: Always # imagePullPolicy: Always
ports: ports:
- containerPort: 10240 - containerPort: 10240
@ -26,9 +26,15 @@ spec:
env: env:
- name: CONFIG_NAME - name: CONFIG_NAME
value: "/Open-IM-Server" value: "/Open-IM-Server"
- name: USUAL_CONFIG_NAME
value: "/Open-IM-Server"
volumes: volumes:
- name: config - name: config
configMap: projected:
sources:
- configMap:
name: openim-config name: openim-config
- configMap:
name: openim-usualconfig
strategy: #更新策略 strategy: #更新策略
type: RollingUpdate # 滚动更新 type: RollingUpdate # 滚动更新

@ -15,7 +15,7 @@ spec:
spec: spec:
containers: containers:
- name: cms-api - name: cms-api
image: openim/cms_api:v2.3.4 image: openim/cms_api:v2.3.8
imagePullPolicy: Always imagePullPolicy: Always
ports: ports:
- containerPort: 10006 - containerPort: 10006
@ -26,10 +26,16 @@ spec:
env: env:
- name: CONFIG_NAME - name: CONFIG_NAME
value: "/Open-IM-Server" value: "/Open-IM-Server"
- name: USUAL_CONFIG_NAME
value: "/Open-IM-Server"
volumes: volumes:
- name: config - name: config
configMap: projected:
sources:
- configMap:
name: openim-config name: openim-config
- configMap:
name: openim-usualconfig
strategy: #更新策略 strategy: #更新策略
type: RollingUpdate # 滚动更新 type: RollingUpdate # 滚动更新
--- ---

@ -15,7 +15,7 @@ spec:
spec: spec:
containers: containers:
- name: conversation - name: conversation
image: openim/conversation:v2.3.4 image: openim/conversation:v2.3.8
# imagePullPolicy: Always # imagePullPolicy: Always
ports: ports:
- containerPort: 10230 - containerPort: 10230
@ -26,11 +26,17 @@ spec:
env: env:
- name: CONFIG_NAME - name: CONFIG_NAME
value: "/Open-IM-Server" value: "/Open-IM-Server"
- name: USUAL_CONFIG_NAME
value: "/Open-IM-Server"
volumes: volumes:
- name: config - name: config
configMap: projected:
sources:
- configMap:
name: openim-config name: openim-config
- configMap:
name: openim-usualconfig
strategy: #更新策略 strategy: #更新策略
type: RollingUpdate # 滚动更新 type: RollingUpdate # 滚动更新

@ -15,7 +15,7 @@ spec:
spec: spec:
containers: containers:
- name: demo - name: demo
image: openim/demo:v2.3.4 image: openim/demo:v2.3.8
imagePullPolicy: Always imagePullPolicy: Always
ports: ports:
- containerPort: 10004 - containerPort: 10004
@ -26,10 +26,16 @@ spec:
env: env:
- name: CONFIG_NAME - name: CONFIG_NAME
value: "/Open-IM-Server" value: "/Open-IM-Server"
- name: USUAL_CONFIG_NAME
value: "/Open-IM-Server"
volumes: volumes:
- name: config - name: config
configMap: projected:
sources:
- configMap:
name: openim-config name: openim-config
- configMap:
name: openim-usualconfig
strategy: #更新策略 strategy: #更新策略
type: RollingUpdate # 滚动更新 type: RollingUpdate # 滚动更新
--- ---

@ -15,7 +15,7 @@ spec:
spec: spec:
containers: containers:
- name: friend - name: friend
image: openim/friend:v2.3.4 image: openim/friend:v2.3.8
# imagePullPolicy: Always # imagePullPolicy: Always
ports: ports:
- containerPort: 10120 - containerPort: 10120
@ -26,10 +26,16 @@ spec:
env: env:
- name: CONFIG_NAME - name: CONFIG_NAME
value: "/Open-IM-Server" value: "/Open-IM-Server"
- name: USUAL_CONFIG_NAME
value: "/Open-IM-Server"
volumes: volumes:
- name: config - name: config
configMap: projected:
sources:
- configMap:
name: openim-config name: openim-config
- configMap:
name: openim-usualconfig
strategy: #更新策略 strategy: #更新策略
type: RollingUpdate # 滚动更新 type: RollingUpdate # 滚动更新

@ -15,7 +15,7 @@ spec:
spec: spec:
containers: containers:
- name: group - name: group
image: openim/group:v2.3.4 image: openim/group:v2.3.8
# imagePullPolicy: Always # imagePullPolicy: Always
ports: ports:
- containerPort: 10150 - containerPort: 10150
@ -26,9 +26,15 @@ spec:
env: env:
- name: CONFIG_NAME - name: CONFIG_NAME
value: "/Open-IM-Server" value: "/Open-IM-Server"
- name: USUAL_CONFIG_NAME
value: "/Open-IM-Server"
volumes: volumes:
- name: config - name: config
configMap: projected:
sources:
- configMap:
name: openim-config name: openim-config
- configMap:
name: openim-usualconfig
strategy: #更新策略 strategy: #更新策略
type: RollingUpdate # 滚动更新 type: RollingUpdate # 滚动更新

@ -9,17 +9,17 @@
6. 将rpcRegisterIP修改为空, 此地址为每个rpc注册到ETCD的地址, 置空每个rpc将会将pod地址注册到ETCD, 才能正确rpc请求(重要) 6. 将rpcRegisterIP修改为空, 此地址为每个rpc注册到ETCD的地址, 置空每个rpc将会将pod地址注册到ETCD, 才能正确rpc请求(重要)
7. 如果使用minio作为对象存储, 还需要修改minio的地址 7. 如果使用minio作为对象存储, 还需要修改minio的地址
8. 其他如果使用离线推送,需要修改push离线推送配置 8. 其他如果使用离线推送,需要修改push离线推送配置
9. 修改demo中的imAPIURL字段为openIM api的ingress或者service地址, 需要让demo的pod能正确请求到(重要)
10. 其他非必须配置修改, 如短信,推送等
### 2. 项目根目录创建im configMap到k8s openim namespace ### 2. 项目根目录创建im configMap到k8s openim namespace
1. 为open-IM项目创建单独命名空间 1. 为open-IM项目创建单独命名空间
``` ```
kubectl create namespace openim kubectl create namespace openim
``` ```
2. 在项目根目录通过config/config.yaml 2. 修改config.yaml后在项目根目录创建configmap, config/usualConfig.yaml只需要挂载不需要修改配置
``` ```
kubectl -n openim create configmap config --from-file=config/config.yaml kubectl -n openim create configmap openim-config --from-file=config/config.yaml
kubectl -n openim create configmap openim-usualconfig --from-file=config/usualConfig.yaml
``` ```
查看configmap 查看configmap
``` ```

@ -15,7 +15,7 @@ spec:
spec: spec:
containers: containers:
- name: msg - name: msg
image: openim/msg:v2.3.4 image: openim/msg:v2.3.8
# imagePullPolicy: Always # imagePullPolicy: Always
ports: ports:
- containerPort: 10130 - containerPort: 10130
@ -26,9 +26,15 @@ spec:
env: env:
- name: CONFIG_NAME - name: CONFIG_NAME
value: "/Open-IM-Server" value: "/Open-IM-Server"
- name: USUAL_CONFIG_NAME
value: "/Open-IM-Server"
volumes: volumes:
- name: config - name: config
configMap: projected:
sources:
- configMap:
name: openim-config name: openim-config
- configMap:
name: openim-usualconfig
strategy: #更新策略 strategy: #更新策略
type: RollingUpdate # 滚动更新 type: RollingUpdate # 滚动更新

@ -15,7 +15,7 @@ spec:
spec: spec:
containers: containers:
- name: msg-gateway - name: msg-gateway
image: openim/msg_gateway:v2.3.4 image: openim/msg_gateway:v2.3.8
# imagePullPolicy: Always # imagePullPolicy: Always
ports: ports:
- name: rpc-port - name: rpc-port
@ -29,10 +29,16 @@ spec:
env: env:
- name: CONFIG_NAME - name: CONFIG_NAME
value: "/Open-IM-Server" value: "/Open-IM-Server"
- name: USUAL_CONFIG_NAME
value: "/Open-IM-Server"
volumes: volumes:
- name: config - name: config
configMap: projected:
sources:
- configMap:
name: openim-config name: openim-config
- configMap:
name: openim-usualconfig
strategy: #更新策略 strategy: #更新策略
type: RollingUpdate # 滚动更新 type: RollingUpdate # 滚动更新
--- ---

@ -15,7 +15,7 @@ spec:
spec: spec:
containers: containers:
- name: msg-transfer - name: msg-transfer
image: openim/msg_transfer:v2.3.4 image: openim/msg_transfer:v2.3.8
# imagePullPolicy: Always # imagePullPolicy: Always
volumeMounts: volumeMounts:
- name: config - name: config
@ -24,9 +24,15 @@ spec:
env: env:
- name: CONFIG_NAME - name: CONFIG_NAME
value: "/Open-IM-Server" value: "/Open-IM-Server"
- name: USUAL_CONFIG_NAME
value: "/Open-IM-Server"
volumes: volumes:
- name: config - name: config
configMap: projected:
sources:
- configMap:
name: openim-config name: openim-config
- configMap:
name: openim-usualconfig
strategy: #更新策略 strategy: #更新策略
type: RollingUpdate # 滚动更新 type: RollingUpdate # 滚动更新

@ -15,7 +15,7 @@ spec:
spec: spec:
containers: containers:
- name: office - name: office
image: openim/office:v2.3.4 image: openim/office:v2.3.8
# imagePullPolicy: Always # imagePullPolicy: Always
ports: ports:
- containerPort: 10210 - containerPort: 10210
@ -26,9 +26,15 @@ spec:
env: env:
- name: CONFIG_NAME - name: CONFIG_NAME
value: "/Open-IM-Server" value: "/Open-IM-Server"
- name: USUAL_CONFIG_NAME
value: "/Open-IM-Server"
volumes: volumes:
- name: config - name: config
configMap: projected:
sources:
- configMap:
name: openim-config name: openim-config
- configMap:
name: openim-usualconfig
strategy: #更新策略 strategy: #更新策略
type: RollingUpdate # 滚动更新 type: RollingUpdate # 滚动更新

@ -15,7 +15,7 @@ spec:
spec: spec:
containers: containers:
- name: organization - name: organization
image: openim/organization:v2.3.4 image: openim/organization:v2.3.8
# imagePullPolicy: Always # imagePullPolicy: Always
ports: ports:
- containerPort: 10220 - containerPort: 10220
@ -26,9 +26,15 @@ spec:
env: env:
- name: CONFIG_NAME - name: CONFIG_NAME
value: "/Open-IM-Server" value: "/Open-IM-Server"
- name: USUAL_CONFIG_NAME
value: "/Open-IM-Server"
volumes: volumes:
- name: config - name: config
configMap: projected:
sources:
- configMap:
name: openim-config name: openim-config
- configMap:
name: openim-usualconfig
strategy: #更新策略 strategy: #更新策略
type: RollingUpdate # 滚动更新 type: RollingUpdate # 滚动更新

@ -15,7 +15,7 @@ spec:
spec: spec:
containers: containers:
- name: push - name: push
image: openim/push:v2.3.4 image: openim/push:v2.3.8
# imagePullPolicy: Always # imagePullPolicy: Always
ports: ports:
- containerPort: 10170 - containerPort: 10170
@ -26,9 +26,15 @@ spec:
env: env:
- name: CONFIG_NAME - name: CONFIG_NAME
value: "/Open-IM-Server" value: "/Open-IM-Server"
- name: USUAL_CONFIG_NAME
value: "/Open-IM-Server"
volumes: volumes:
- name: config - name: config
configMap: projected:
sources:
- configMap:
name: openim-config name: openim-config
- configMap:
name: openim-usualconfig
strategy: #更新策略 strategy: #更新策略
type: RollingUpdate # 滚动更新 type: RollingUpdate # 滚动更新

@ -15,7 +15,7 @@ spec:
spec: spec:
containers: containers:
- name: sdk-server - name: sdk-server
image: openim/sdk_server:v2.3.4 image: openim/sdk_server:v2.3.8
# imagePullPolicy: Always # imagePullPolicy: Always
ports: ports:
- containerPort: 10003 - containerPort: 10003

@ -15,7 +15,7 @@ spec:
spec: spec:
containers: containers:
- name: user - name: user
image: openim/user:v2.3.4 image: openim/user:v2.3.8
# imagePullPolicy: Always # imagePullPolicy: Always
volumeMounts: volumeMounts:
- name: config - name: config
@ -24,9 +24,15 @@ spec:
env: env:
- name: CONFIG_NAME - name: CONFIG_NAME
value: "/Open-IM-Server" value: "/Open-IM-Server"
- name: USUAL_CONFIG_NAME
value: "/Open-IM-Server"
volumes: volumes:
- name: config - name: config
configMap: projected:
sources:
- configMap:
name: openim-config name: openim-config
- configMap:
name: openim-usualconfig
strategy: #更新策略 strategy: #更新策略
type: RollingUpdate # 滚动更新 type: RollingUpdate # 滚动更新

@ -115,7 +115,7 @@ services:
open_im_server: open_im_server:
image: openim/open_im_server:v2.3.4 image: openim/open_im_server:v2.3.8
container_name: open_im_server container_name: open_im_server
volumes: volumes:
- ./logs:/Open-IM-Server/logs - ./logs:/Open-IM-Server/logs
@ -138,11 +138,33 @@ services:
max-size: "1g" max-size: "1g"
max-file: "2" max-file: "2"
open_im_enterprise:
image: openim/open_im_enterprise:v1.0.3
container_name: open_im_enterprise
volumes:
- ./logs:/Open-IM-Enterprise/logs
- ./docker-compose_cfg/config.yaml:/Open-IM-Enterprise/config/config.yaml
restart: always
depends_on:
- mysql
- mongodb
- redis
- etcd
- minio
- open_im_server
network_mode: "host"
logging:
driver: json-file
options:
max-size: "1g"
max-file: "2"
environment:
CONFIG_NAME: "/Open-IM-Enterprise"
prometheus: prometheus:
image: prom/prometheus image: prom/prometheus
volumes: volumes:
- ${DATA_DIR}/docker-compose_cfg/prometheus-compose.yml:/etc/prometheus/prometheus.yml - ./docker-compose_cfg/prometheus-compose.yml:/etc/prometheus/prometheus.yml
# - ${DATA_DIR}/components/prometheus_data:/prometheus
container_name: prometheus container_name: prometheus
# ports: # ports:
# - 9091:9091 # - 9091:9091
@ -156,9 +178,9 @@ services:
volumes: volumes:
# - ./grafana/dashboards/dashboard.json:/var/lib/grafana/dashboards/dashboard.json # - ./grafana/dashboards/dashboard.json:/var/lib/grafana/dashboards/dashboard.json
# - ./grafana/provisioning/dashboard.yaml:/etc/grafana/provisioning/dashboards/dashboard.yaml # - ./grafana/provisioning/dashboard.yaml:/etc/grafana/provisioning/dashboards/dashboard.yaml
- ${DATA_DIR}/docker-compose_cfg/datasource-compose.yaml:/etc/grafana/provisioning/datasources/datasource.yaml - ./docker-compose_cfg/datasource-compose.yaml:/etc/grafana/provisioning/datasources/datasource.yaml
- ${DATA_DIR}/docker-compose_cfg/grafana.ini:/etc/grafana/grafana.ini - ./docker-compose_cfg/grafana.ini:/etc/grafana/grafana.ini
- ${DATA_DIR}/docker-compose_cfg/node-exporter-full_rev1.json:/var/lib/grafana/dashboards/node-exporter-full_rev1.json - ./docker-compose_cfg/node-exporter-full_rev1.json:/var/lib/grafana/dashboards/node-exporter-full_rev1.json
container_name: grafana container_name: grafana
# ports: # ports:
# - 10007:10007 # - 10007:10007

@ -0,0 +1,65 @@
#---------------Infrastructure configuration---------------------#
etcd:
etcdSchema: openim #默认即可
etcdAddr: [ 127.0.0.1:2379 ] #单机部署时,默认即可
userName:
password:
secret: openIM123
mysql:
dbMysqlDatabaseName: admin_chat # 数据库名字 默认即可
# 默认管理员账号
admin:
defaultAccount:
account: [ "admin1", "admin2" ]
defaultPassword: [ "password1", "password2" ]
openIMUserID: [ "openIM123456", "openIMAdmin" ]
faceURL: [ "", "" ]
nickname: [ "admin1", "admin2" ]
level: [ 1, 100 ]
adminapi:
openImAdminApiPort: [ 10009 ] #管理后台api服务端口默认即可需要开放此端口或做nginx转发
listenIP: 0.0.0.0
chatapi:
openImChatApiPort: [ 10008 ] #登录注册默认即可需要开放此端口或做nginx转发
listenIP: 0.0.0.0
rpcport: # rpc服务端口 默认即可
openImAdminPort: [ 30200 ]
openImChatPort: [ 30300 ]
rpcregistername: #rpc注册服务名默认即可
openImChatName: Chat
openImAdminCMSName: Admin
chat:
codeTTL: 300 #短信验证码有效时间(秒)
superVerificationCode: 666666 # 超级验证码
alismsverify: #阿里云短信配置,在阿里云申请成功后修改以下四项
accessKeyId:
accessKeySecret:
signName:
verificationCodeTemplateCode:
oss:
tempDir: enterprise-temp # 临时密钥上传的目录
dataDir: enterprise-data # 最终存放目录
aliyun:
endpoint: https://oss-cn-chengdu.aliyuncs.com
accessKeyID: ""
accessKeySecret: ""
bucket: ""
tencent:
BucketURL: ""
serviceURL: https://cos.COS_REGION.myqcloud.com
secretID: ""
secretKey: ""
sessionToken: ""
bucket: ""
use: "minio"

@ -29,7 +29,6 @@ require (
github.com/gorilla/websocket v1.4.2 github.com/gorilla/websocket v1.4.2
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
github.com/jinzhu/copier v0.3.4 github.com/jinzhu/copier v0.3.4
github.com/jinzhu/gorm v1.9.16
github.com/jonboulle/clockwork v0.2.2 // indirect github.com/jonboulle/clockwork v0.2.2 // indirect
github.com/lestrrat-go/file-rotatelogs v2.4.0+incompatible github.com/lestrrat-go/file-rotatelogs v2.4.0+incompatible
github.com/lestrrat-go/strftime v1.0.4 // indirect github.com/lestrrat-go/strftime v1.0.4 // indirect

@ -368,8 +368,6 @@ github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZ
github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc=
github.com/jinzhu/copier v0.3.4 h1:mfU6jI9PtCeUjkjQ322dlff9ELjGDu975C2p/nrubVI= github.com/jinzhu/copier v0.3.4 h1:mfU6jI9PtCeUjkjQ322dlff9ELjGDu975C2p/nrubVI=
github.com/jinzhu/copier v0.3.4/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= github.com/jinzhu/copier v0.3.4/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg=
github.com/jinzhu/gorm v1.9.16 h1:+IyIjPEABKRpsu/F8OvDPy9fyQlgsg2luMV2ZIH5i5o=
github.com/jinzhu/gorm v1.9.16/go.mod h1:G3LB3wezTOWM2ITLzPxEXgSkOXAntiLHS7UdBefADcs=
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/jinzhu/now v1.0.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= github.com/jinzhu/now v1.0.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=

@ -1,7 +1,17 @@
#!/usr/bin/env bash #!/usr/bin/env bash
internet_ip=`curl ifconfig.me -s`
echo $internet_ip
source .env
echo $MINIO_ENDPOINT
if [ $MINIO_ENDPOINT == "http://127.0.0.1:10005" ]; then
sed -i "s/127.0.0.1/${internet_ip}/" .env
fi
cd script ; cd script ;
chmod +x *.sh ; chmod +x *.sh ;
./init_pwd.sh
./env_check.sh; ./env_check.sh;
cd .. ; cd .. ;
docker-compose up -d; docker-compose up -d;

@ -199,7 +199,6 @@ func ForceLogout(c *gin.Context) {
c.JSON(http.StatusBadRequest, gin.H{"errCode": 400, "errMsg": errMsg}) c.JSON(http.StatusBadRequest, gin.H{"errCode": 400, "errMsg": errMsg})
return return
} }
req := &rpc.ForceLogoutReq{} req := &rpc.ForceLogoutReq{}
utils.CopyStructFields(req, &params) utils.CopyStructFields(req, &params)

@ -9,7 +9,6 @@ import (
"net/http" "net/http"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/jinzhu/gorm"
) )
func SetClientInitConfig(c *gin.Context) { func SetClientInitConfig(c *gin.Context) {
@ -62,11 +61,10 @@ func GetClientInitConfig(c *gin.Context) {
} }
config, err := imdb.GetClientInitConfig() config, err := imdb.GetClientInitConfig()
if err != nil { if err != nil {
if !gorm.IsRecordNotFoundError(err) {
log.NewError(req.OperationID, utils.GetSelfFuncName(), err.Error()) log.NewError(req.OperationID, utils.GetSelfFuncName(), err.Error())
c.JSON(http.StatusInternalServerError, gin.H{"errCode": 500, "errMsg": err.Error()}) c.JSON(http.StatusInternalServerError, gin.H{"errCode": 500, "errMsg": err.Error()})
return return
}
} }
resp.Data.DiscoverPageURL = config.DiscoverPageURL resp.Data.DiscoverPageURL = config.DiscoverPageURL
log.NewInfo(req.OperationID, utils.GetSelfFuncName(), "resp ", resp) log.NewInfo(req.OperationID, utils.GetSelfFuncName(), "resp ", resp)

@ -370,6 +370,47 @@ func InviteUserToGroup(c *gin.Context) {
log.NewInfo(req.OperationID, "InviteUserToGroup api return ", resp) log.NewInfo(req.OperationID, "InviteUserToGroup api return ", resp)
c.JSON(http.StatusOK, resp) c.JSON(http.StatusOK, resp)
} }
func InviteUserToGroups(c *gin.Context) {
params := api.InviteUserToGroupsReq{}
if err := c.BindJSON(&params); err != nil {
log.NewError("0", "BindJSON failed ", err.Error())
c.JSON(http.StatusBadRequest, gin.H{"errCode": 400, "errMsg": err.Error()})
return
}
req := &rpc.InviteUserToGroupsReq{}
utils.CopyStructFields(req, &params)
var ok bool
var errInfo string
ok, req.OpUserID, errInfo = token_verify.GetUserIDFromToken(c.Request.Header.Get("token"), req.OperationID)
if !ok {
errMsg := req.OperationID + " " + "GetUserIDFromToken failed " + errInfo + " token:" + c.Request.Header.Get("token")
log.NewError(req.OperationID, errMsg)
c.JSON(http.StatusBadRequest, gin.H{"errCode": 500, "errMsg": errMsg})
return
}
log.NewInfo(req.OperationID, "InviteUserToGroup args ", req.String())
etcdConn := getcdv3.GetDefaultConn(config.Config.Etcd.EtcdSchema, strings.Join(config.Config.Etcd.EtcdAddr, ","), config.Config.RpcRegisterName.OpenImGroupName, req.OperationID)
if etcdConn == nil {
errMsg := req.OperationID + "getcdv3.GetDefaultConn == nil"
log.NewError(req.OperationID, errMsg)
c.JSON(http.StatusInternalServerError, gin.H{"errCode": 500, "errMsg": errMsg})
return
}
client := rpc.NewGroupClient(etcdConn)
RpcResp, err := client.InviteUserToGroups(context.Background(), req)
if err != nil {
log.NewError(req.OperationID, "InviteUserToGroup failed ", err.Error(), req.String())
c.JSON(http.StatusInternalServerError, gin.H{"errCode": 500, "errMsg": err.Error()})
return
}
resp := api.InviteUserToGroupResp{CommResp: api.CommResp{ErrCode: RpcResp.ErrCode, ErrMsg: RpcResp.ErrMsg}}
log.NewInfo(req.OperationID, "InviteUserToGroups api return ", resp)
c.JSON(http.StatusOK, resp)
}
// @Summary 创建群组 // @Summary 创建群组
// @Description 创建群组 // @Description 创建群组

@ -38,6 +38,7 @@ func SetOptions(options map[string]bool, value bool) {
func newUserSendMsgReq(params *api.ManagementSendMsgReq) *pbChat.SendMsgReq { func newUserSendMsgReq(params *api.ManagementSendMsgReq) *pbChat.SendMsgReq {
var newContent string var newContent string
options := make(map[string]bool, 5)
var err error var err error
switch params.ContentType { switch params.ContentType {
case constant.Text: case constant.Text:
@ -52,13 +53,20 @@ func newUserSendMsgReq(params *api.ManagementSendMsgReq) *pbChat.SendMsgReq {
fallthrough fallthrough
case constant.File: case constant.File:
fallthrough fallthrough
case constant.CustomNotTriggerConversation:
fallthrough
case constant.CustomOnlineOnly:
fallthrough
case constant.AtText:
fallthrough
case constant.AdvancedRevoke: case constant.AdvancedRevoke:
utils.SetSwitchFromOptions(options, constant.IsUnreadCount, false)
newContent = utils.StructToJsonString(params.Content) newContent = utils.StructToJsonString(params.Content)
case constant.Revoke: case constant.Revoke:
utils.SetSwitchFromOptions(options, constant.IsUnreadCount, false)
newContent = params.Content["revokeMsgClientID"].(string) newContent = params.Content["revokeMsgClientID"].(string)
default: default:
} }
options := make(map[string]bool, 5)
if params.IsOnlineOnly { if params.IsOnlineOnly {
SetOptions(options, false) SetOptions(options, false)
} }
@ -151,6 +159,8 @@ func ManagementSendMsg(c *gin.Context) {
data = CustomElem{} data = CustomElem{}
case constant.CustomOnlineOnly: case constant.CustomOnlineOnly:
data = CustomElem{} data = CustomElem{}
case constant.AtText:
data = AtElem{}
//case constant.HasReadReceipt: //case constant.HasReadReceipt:
//case constant.Typing: //case constant.Typing:
//case constant.Quote: //case constant.Quote:
@ -465,6 +475,10 @@ type FileElem struct {
type AtElem struct { type AtElem struct {
Text string `mapstructure:"text"` Text string `mapstructure:"text"`
AtUserList []string `mapstructure:"atUserList"` AtUserList []string `mapstructure:"atUserList"`
AtUsersInfo []struct {
AtUserID string `json:"atUserID,omitempty"`
GroupNickname string `json:"groupNickname,omitempty"`
} `json:"atUsersInfo,omitempty"`
IsAtSelf bool `mapstructure:"isAtSelf"` IsAtSelf bool `mapstructure:"isAtSelf"`
} }
type LocationElem struct { type LocationElem struct {

@ -0,0 +1,207 @@
package msg
import (
api "Open_IM/pkg/base_info"
"Open_IM/pkg/common/config"
"Open_IM/pkg/common/constant"
"Open_IM/pkg/common/log"
"Open_IM/pkg/common/token_verify"
"Open_IM/pkg/grpc-etcdv3/getcdv3"
rpc "Open_IM/pkg/proto/msg"
"Open_IM/pkg/utils"
"context"
"github.com/gin-gonic/gin"
"net/http"
"strings"
)
func SetMessageReactionExtensions(c *gin.Context) {
var (
req api.SetMessageReactionExtensionsReq
resp api.SetMessageReactionExtensionsResp
reqPb rpc.SetMessageReactionExtensionsReq
)
if err := c.BindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"errCode": 400, "errMsg": err.Error()})
return
}
log.NewInfo(req.OperationID, utils.GetSelfFuncName(), "req:", req)
if err := utils.CopyStructFields(&reqPb, &req); err != nil {
log.NewDebug(req.OperationID, utils.GetSelfFuncName(), "CopyStructFields", err.Error())
}
var ok bool
var errInfo string
ok, reqPb.OpUserID, errInfo, reqPb.OpUserIDPlatformID = token_verify.GetUserIDAndPlatformIDFromToken(c.Request.Header.Get("token"), req.OperationID)
if !ok {
errMsg := req.OperationID + " " + "GetUserIDFromToken failed " + errInfo + " token:" + c.Request.Header.Get("token")
log.NewError(req.OperationID, errMsg)
c.JSON(http.StatusBadRequest, gin.H{"errCode": 500, "errMsg": errMsg})
return
}
grpcConn := getcdv3.GetDefaultConn(config.Config.Etcd.EtcdSchema, strings.Join(config.Config.Etcd.EtcdAddr, ","), config.Config.RpcRegisterName.OpenImMsgName, req.OperationID)
if grpcConn == nil {
errMsg := req.OperationID + " getcdv3.GetDefaultConn == nil"
log.NewError(req.OperationID, errMsg)
c.JSON(http.StatusInternalServerError, gin.H{"errCode": 500, "errMsg": errMsg})
return
}
msgClient := rpc.NewMsgClient(grpcConn)
respPb, err := msgClient.SetMessageReactionExtensions(context.Background(), &reqPb)
if err != nil {
log.NewError(req.OperationID, utils.GetSelfFuncName(), "DelMsgList failed", err.Error(), reqPb)
c.JSON(http.StatusInternalServerError, gin.H{"errCode": constant.ErrServer.ErrCode, "errMsg": constant.ErrServer.ErrMsg + err.Error()})
return
}
resp.ErrCode = respPb.ErrCode
resp.ErrMsg = respPb.ErrMsg
resp.Data.ResultKeyValue = respPb.Result
resp.Data.MsgFirstModifyTime = reqPb.MsgFirstModifyTime
resp.Data.IsReact = reqPb.IsReact
log.NewInfo(req.OperationID, utils.GetSelfFuncName(), resp)
c.JSON(http.StatusOK, resp)
}
func GetMessageListReactionExtensions(c *gin.Context) {
var (
req api.GetMessageListReactionExtensionsReq
resp api.GetMessageListReactionExtensionsResp
reqPb rpc.GetMessageListReactionExtensionsReq
)
if err := c.BindJSON(&req); err != nil {
c.JSON(http.StatusOK, gin.H{"errCode": 400, "errMsg": err.Error()})
return
}
log.NewInfo(req.OperationID, utils.GetSelfFuncName(), "req:", req)
if err := utils.CopyStructFields(&reqPb, &req); err != nil {
log.NewDebug(req.OperationID, utils.GetSelfFuncName(), "CopyStructFields", err.Error())
}
var ok bool
var errInfo string
ok, reqPb.OpUserID, errInfo = token_verify.GetUserIDFromToken(c.Request.Header.Get("token"), req.OperationID)
if !ok {
errMsg := req.OperationID + " " + "GetUserIDFromToken failed " + errInfo + " token:" + c.Request.Header.Get("token")
log.NewError(req.OperationID, errMsg)
c.JSON(http.StatusOK, gin.H{"errCode": 500, "errMsg": errMsg})
return
}
grpcConn := getcdv3.GetDefaultConn(config.Config.Etcd.EtcdSchema, strings.Join(config.Config.Etcd.EtcdAddr, ","), config.Config.RpcRegisterName.OpenImMsgName, req.OperationID)
if grpcConn == nil {
errMsg := req.OperationID + " getcdv3.GetDefaultConn == nil"
log.NewError(req.OperationID, errMsg)
c.JSON(http.StatusInternalServerError, gin.H{"errCode": 500, "errMsg": errMsg})
return
}
msgClient := rpc.NewMsgClient(grpcConn)
respPb, err := msgClient.GetMessageListReactionExtensions(context.Background(), &reqPb)
if err != nil {
log.NewError(req.OperationID, utils.GetSelfFuncName(), "DelMsgList failed", err.Error(), reqPb)
c.JSON(http.StatusInternalServerError, gin.H{"errCode": constant.ErrServer.ErrCode, "errMsg": constant.ErrServer.ErrMsg + err.Error()})
return
}
resp.ErrCode = respPb.ErrCode
resp.ErrMsg = respPb.ErrMsg
resp.Data = respPb.SingleMessageResult
log.NewInfo(req.OperationID, utils.GetSelfFuncName(), resp)
c.JSON(http.StatusOK, resp)
}
func AddMessageReactionExtensions(c *gin.Context) {
var (
req api.AddMessageReactionExtensionsReq
resp api.AddMessageReactionExtensionsResp
reqPb rpc.AddMessageReactionExtensionsReq
)
if err := c.BindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"errCode": 400, "errMsg": err.Error()})
return
}
log.NewInfo(req.OperationID, utils.GetSelfFuncName(), "req:", req)
if err := utils.CopyStructFields(&reqPb, &req); err != nil {
log.NewDebug(req.OperationID, utils.GetSelfFuncName(), "CopyStructFields", err.Error())
}
var ok bool
var errInfo string
ok, reqPb.OpUserID, errInfo, reqPb.OpUserIDPlatformID = token_verify.GetUserIDAndPlatformIDFromToken(c.Request.Header.Get("token"), req.OperationID)
if !ok {
errMsg := req.OperationID + " " + "GetUserIDFromToken failed " + errInfo + " token:" + c.Request.Header.Get("token")
log.NewError(req.OperationID, errMsg)
c.JSON(http.StatusBadRequest, gin.H{"errCode": 500, "errMsg": errMsg})
return
}
grpcConn := getcdv3.GetDefaultConn(config.Config.Etcd.EtcdSchema, strings.Join(config.Config.Etcd.EtcdAddr, ","), config.Config.RpcRegisterName.OpenImMsgName, req.OperationID)
if grpcConn == nil {
errMsg := req.OperationID + " getcdv3.GetDefaultConn == nil"
log.NewError(req.OperationID, errMsg)
c.JSON(http.StatusInternalServerError, gin.H{"errCode": 500, "errMsg": errMsg})
return
}
msgClient := rpc.NewMsgClient(grpcConn)
respPb, err := msgClient.AddMessageReactionExtensions(context.Background(), &reqPb)
if err != nil {
log.NewError(req.OperationID, utils.GetSelfFuncName(), "DelMsgList failed", err.Error(), reqPb)
c.JSON(http.StatusInternalServerError, gin.H{"errCode": constant.ErrServer.ErrCode, "errMsg": constant.ErrServer.ErrMsg + err.Error()})
return
}
resp.ErrCode = respPb.ErrCode
resp.ErrMsg = respPb.ErrMsg
resp.Data.ResultKeyValue = respPb.Result
resp.Data.MsgFirstModifyTime = respPb.MsgFirstModifyTime
resp.Data.IsReact = respPb.IsReact
log.NewInfo(req.OperationID, utils.GetSelfFuncName(), resp)
c.JSON(http.StatusOK, resp)
}
func DeleteMessageReactionExtensions(c *gin.Context) {
var (
req api.DeleteMessageReactionExtensionsReq
resp api.DeleteMessageReactionExtensionsResp
reqPb rpc.DeleteMessageListReactionExtensionsReq
)
if err := c.BindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"errCode": 400, "errMsg": err.Error()})
return
}
log.NewInfo(req.OperationID, utils.GetSelfFuncName(), "req:", req)
if err := utils.CopyStructFields(&reqPb, &req); err != nil {
log.NewDebug(req.OperationID, utils.GetSelfFuncName(), "CopyStructFields", err.Error())
}
var ok bool
var errInfo string
ok, reqPb.OpUserID, errInfo = token_verify.GetUserIDFromToken(c.Request.Header.Get("token"), req.OperationID)
if !ok {
errMsg := req.OperationID + " " + "GetUserIDFromToken failed " + errInfo + " token:" + c.Request.Header.Get("token")
log.NewError(req.OperationID, errMsg)
c.JSON(http.StatusBadRequest, gin.H{"errCode": 500, "errMsg": errMsg})
return
}
grpcConn := getcdv3.GetDefaultConn(config.Config.Etcd.EtcdSchema, strings.Join(config.Config.Etcd.EtcdAddr, ","), config.Config.RpcRegisterName.OpenImMsgName, req.OperationID)
if grpcConn == nil {
errMsg := req.OperationID + " getcdv3.GetDefaultConn == nil"
log.NewError(req.OperationID, errMsg)
c.JSON(http.StatusInternalServerError, gin.H{"errCode": 500, "errMsg": errMsg})
return
}
msgClient := rpc.NewMsgClient(grpcConn)
respPb, err := msgClient.DeleteMessageReactionExtensions(context.Background(), &reqPb)
if err != nil {
log.NewError(req.OperationID, utils.GetSelfFuncName(), "DelMsgList failed", err.Error(), reqPb)
c.JSON(http.StatusInternalServerError, gin.H{"errCode": constant.ErrServer.ErrCode, "errMsg": constant.ErrServer.ErrMsg + err.Error()})
return
}
resp.ErrCode = respPb.ErrCode
resp.ErrMsg = respPb.ErrMsg
resp.Data = respPb.Result
log.NewInfo(req.OperationID, utils.GetSelfFuncName(), resp)
c.JSON(http.StatusOK, resp)
}

@ -58,6 +58,40 @@ func init() {
} }
} }
func GetUserToken(c *gin.Context) {
var (
req apiStruct.GetUserTokenRequest
resp apiStruct.GetUserTokenResponse
reqPb pbAdmin.GetUserTokenReq
respPb *pbAdmin.GetUserTokenResp
)
if err := c.BindJSON(&req); err != nil {
log.NewError(req.OperationID, utils.GetSelfFuncName(), err.Error())
c.JSON(http.StatusBadRequest, gin.H{"errCode": 400, "errMsg": err.Error()})
return
}
reqPb.OperationID = req.OperationID
reqPb.UserID = req.UserID
reqPb.PlatformID = req.PlatFormID
etcdConn := getcdv3.GetDefaultConn(config.Config.Etcd.EtcdSchema, strings.Join(config.Config.Etcd.EtcdAddr, ","), config.Config.RpcRegisterName.OpenImAdminCMSName, reqPb.OperationID)
if etcdConn == nil {
errMsg := reqPb.OperationID + "getcdv3.GetDefaultConn == nil"
log.NewError(reqPb.OperationID, errMsg)
c.JSON(http.StatusInternalServerError, gin.H{"errCode": 500, "errMsg": errMsg})
return
}
client := pbAdmin.NewAdminCMSClient(etcdConn)
respPb, err := client.GetUserToken(context.Background(), &reqPb)
if err != nil {
log.NewError(reqPb.OperationID, utils.GetSelfFuncName(), "rpc failed", err.Error())
c.JSON(http.StatusInternalServerError, gin.H{"errCode": 500, "errMsg": err.Error()})
return
}
resp.Token = respPb.Token
resp.ExpTime = respPb.ExpTime
c.JSON(http.StatusOK, gin.H{"errCode": respPb.CommonResp.ErrCode, "errMsg": respPb.CommonResp.ErrMsg, "data": resp})
}
// register // register
func AdminLogin(c *gin.Context) { func AdminLogin(c *gin.Context) {
var ( var (

@ -1,6 +1,7 @@
package middleware package middleware
import ( import (
"Open_IM/pkg/common/config"
"Open_IM/pkg/common/log" "Open_IM/pkg/common/log"
"Open_IM/pkg/common/token_verify" "Open_IM/pkg/common/token_verify"
"Open_IM/pkg/utils" "Open_IM/pkg/utils"
@ -20,6 +21,11 @@ func JWTAuth() gin.HandlerFunc {
c.JSON(http.StatusOK, gin.H{"errCode": 400, "errMsg": errInfo}) c.JSON(http.StatusOK, gin.H{"errCode": 400, "errMsg": errInfo})
return return
} else { } else {
if !utils.IsContain(userID, config.Config.Manager.AppManagerUid) {
c.Abort()
c.JSON(http.StatusOK, gin.H{"errCode": 400, "errMsg": "user is not admin"})
return
}
log.NewInfo("0", utils.GetSelfFuncName(), "failed: ", errInfo) log.NewInfo("0", utils.GetSelfFuncName(), "failed: ", errInfo)
} }
} }

@ -29,6 +29,8 @@ func NewGinRouter() *gin.Engine {
{ {
adminRouterGroup.POST("/login", admin.AdminLogin) adminRouterGroup.POST("/login", admin.AdminLogin)
adminRouterGroup.Use(middleware.JWTAuth()) adminRouterGroup.Use(middleware.JWTAuth())
adminRouterGroup.POST("/get_user_token", admin.GetUserToken)
adminRouterGroup.POST("/add_user_register_add_friend_id", admin.AddUserRegisterAddFriendIDList) adminRouterGroup.POST("/add_user_register_add_friend_id", admin.AddUserRegisterAddFriendIDList)
adminRouterGroup.POST("/reduce_user_register_reduce_friend_id", admin.ReduceUserRegisterAddFriendIDList) adminRouterGroup.POST("/reduce_user_register_reduce_friend_id", admin.ReduceUserRegisterAddFriendIDList)
adminRouterGroup.POST("/get_user_register_reduce_friend_id_list", admin.GetUserRegisterAddFriendIDList) adminRouterGroup.POST("/get_user_register_reduce_friend_id_list", admin.GetUserRegisterAddFriendIDList)

@ -7,11 +7,12 @@ import (
"Open_IM/pkg/common/log" "Open_IM/pkg/common/log"
server_api_params "Open_IM/pkg/proto/sdk_ws" server_api_params "Open_IM/pkg/proto/sdk_ws"
"Open_IM/pkg/utils" "Open_IM/pkg/utils"
goRedis "github.com/go-redis/redis/v8"
"github.com/golang/protobuf/proto"
"math" "math"
"strconv" "strconv"
"strings" "strings"
goRedis "github.com/go-redis/redis/v8"
"github.com/golang/protobuf/proto"
) )
const oldestList = 0 const oldestList = 0
@ -33,9 +34,7 @@ func ResetUserGroupMinSeq(operationID, groupID string, userIDList []string) erro
log.NewError(operationID, utils.GetSelfFuncName(), "GetGroupUserMinSeq failed", groupID, userID, err.Error()) log.NewError(operationID, utils.GetSelfFuncName(), "GetGroupUserMinSeq failed", groupID, userID, err.Error())
continue continue
} }
if userMinSeq > uint64(minSeq) { if userMinSeq < uint64(minSeq) {
err = db.DB.SetGroupUserMinSeq(groupID, userID, userMinSeq)
} else {
err = db.DB.SetGroupUserMinSeq(groupID, userID, uint64(minSeq)) err = db.DB.SetGroupUserMinSeq(groupID, userID, uint64(minSeq))
} }
if err != nil { if err != nil {
@ -54,7 +53,20 @@ func DeleteMongoMsgAndResetRedisSeq(operationID, userID string) error {
if minSeq == 0 { if minSeq == 0 {
return nil return nil
} }
log.NewDebug(operationID, utils.GetSelfFuncName(), "delMsgIDMap: ", delStruct, "minSeq", minSeq) log.NewDebug(operationID, utils.GetSelfFuncName(), "delMsgIDStruct: ", delStruct, "minSeq", minSeq)
userCurrentMinSeq, err := db.DB.GetUserMinSeq(userID)
if err != nil && err != goRedis.Nil {
return err
}
userCurrentMaxSeq, err := db.DB.GetUserMaxSeq(userID)
if err != nil && err != goRedis.Nil {
return err
}
if userCurrentMinSeq > userCurrentMaxSeq {
minSeq = uint32(userCurrentMaxSeq)
}
err = db.DB.SetUserMinSeq(userID, minSeq) err = db.DB.SetUserMinSeq(userID, minSeq)
return utils.Wrap(err, "") return utils.Wrap(err, "")
} }
@ -82,7 +94,7 @@ func (d *delMsgRecursionStruct) getSetMinSeq() uint32 {
// index 0....19(del) 20...69 // index 0....19(del) 20...69
// seq 70 // seq 70
// set minSeq 21 // set minSeq 21
// recursion // recursion 删除list并且返回设置的最小seq
func deleteMongoMsg(operationID string, ID string, index int64, delStruct *delMsgRecursionStruct) (uint32, error) { func deleteMongoMsg(operationID string, ID string, index int64, delStruct *delMsgRecursionStruct) (uint32, error) {
// find from oldest list // find from oldest list
msgs, err := db.DB.GetUserMsgListByIndex(ID, index) msgs, err := db.DB.GetUserMsgListByIndex(ID, index)
@ -105,50 +117,53 @@ func deleteMongoMsg(operationID string, ID string, index int64, delStruct *delMs
if len(msgs.Msg) > db.GetSingleGocMsgNum() { if len(msgs.Msg) > db.GetSingleGocMsgNum() {
log.NewWarn(operationID, utils.GetSelfFuncName(), "msgs too large", len(msgs.Msg), msgs.UID) log.NewWarn(operationID, utils.GetSelfFuncName(), "msgs too large", len(msgs.Msg), msgs.UID)
} }
for i, msg := range msgs.Msg { if msgs.Msg[len(msgs.Msg)-1].SendTime+(int64(config.Config.Mongo.DBRetainChatRecords)*24*60*60*1000) < utils.GetCurrentTimestampByMill() && msgListIsFull(msgs) {
// 找到列表中不需要删除的消息了, 表示为递归到最后一个块 delStruct.delUidList = append(delStruct.delUidList, msgs.UID)
if utils.GetCurrentTimestampByMill() < msg.SendTime+(int64(config.Config.Mongo.DBRetainChatRecords)*24*60*60*1000) { lastMsgPb := &server_api_params.MsgData{}
log.NewDebug(operationID, ID, "find uid", msgs.UID) err = proto.Unmarshal(msgs.Msg[len(msgs.Msg)-1].Msg, lastMsgPb)
// 删除块失败 递归结束 返回0 if err != nil {
if err := delMongoMsgsPhysical(delStruct.delUidList); err != nil { log.NewError(operationID, utils.GetSelfFuncName(), err.Error(), len(msgs.Msg)-1, msgs.UID)
return 0, err return 0, utils.Wrap(err, "proto.Unmarshal failed")
} }
// unMarshall失败 块删除成功 设置为最小seq delStruct.minSeq = lastMsgPb.Seq + 1
msgPb := &server_api_params.MsgData{} log.NewDebug(operationID, utils.GetSelfFuncName(), msgs.UID, "add to delUidList", "minSeq", lastMsgPb.Seq+1)
if err = proto.Unmarshal(msg.Msg, msgPb); err != nil { } else {
return delStruct.getSetMinSeq(), utils.Wrap(err, "") var hasMarkDelFlag bool
for index, msg := range msgs.Msg {
if msg.SendTime == 0 {
continue
} }
// 如果不是块中第一个,就把前面比他早插入的全部设置空 seq字段除外。 msgPb := &server_api_params.MsgData{}
if i > 0 { err = proto.Unmarshal(msg.Msg, msgPb)
err = db.DB.ReplaceMsgToBlankByIndex(msgs.UID, i-1)
if err != nil { if err != nil {
log.NewError(operationID, utils.GetSelfFuncName(), err.Error(), msgs.UID, i) log.NewError(operationID, utils.GetSelfFuncName(), err.Error(), len(msgs.Msg)-1, msgs.UID)
return 0, utils.Wrap(err, "proto.Unmarshal failed")
}
if utils.GetCurrentTimestampByMill() > msg.SendTime+(int64(config.Config.Mongo.DBRetainChatRecords)*24*60*60*1000) {
msgPb.Status = constant.MsgDeleted
bytes, _ := proto.Marshal(msgPb)
msgs.Msg[index].Msg = bytes
msgs.Msg[index].SendTime = 0
hasMarkDelFlag = true
} else {
if err := delMongoMsgsPhysical(delStruct.delUidList); err != nil {
return 0, err
}
if hasMarkDelFlag {
log.NewInfo(operationID, ID, "hasMarkDelFlag", "index:", index, "msgPb:", msgPb, msgs.UID)
if err := db.DB.UpdateOneMsgList(msgs); err != nil {
return delStruct.getSetMinSeq(), utils.Wrap(err, "") return delStruct.getSetMinSeq(), utils.Wrap(err, "")
} }
} }
// 递归结束
return msgPb.Seq, nil return msgPb.Seq, nil
} }
} }
// 该列表中消息全部为老消息并且列表满了, 加入删除列表继续递归
lastMsgPb := &server_api_params.MsgData{}
err = proto.Unmarshal(msgs.Msg[len(msgs.Msg)-1].Msg, lastMsgPb)
if err != nil {
log.NewError(operationID, utils.GetSelfFuncName(), err.Error(), len(msgs.Msg)-1, msgs.UID)
return 0, utils.Wrap(err, "proto.Unmarshal failed")
}
delStruct.minSeq = lastMsgPb.Seq
if msgListIsFull(msgs) {
delStruct.delUidList = append(delStruct.delUidList, msgs.UID)
} }
log.NewDebug(operationID, ID, "continue", delStruct) log.NewDebug(operationID, ID, "continue to", delStruct)
// 继续递归 index+1 // 继续递归 index+1
seq, err := deleteMongoMsg(operationID, ID, index+1, delStruct) seq, err := deleteMongoMsg(operationID, ID, index+1, delStruct)
if err != nil {
return seq, utils.Wrap(err, "deleteMongoMsg failed") return seq, utils.Wrap(err, "deleteMongoMsg failed")
} }
return seq, nil
}
func msgListIsFull(chat *db.UserChat) bool { func msgListIsFull(chat *db.UserChat) bool {
index, _ := strconv.Atoi(strings.Split(chat.UID, ":")[1]) index, _ := strconv.Atoi(strings.Split(chat.UID, ":")[1])
@ -173,10 +188,11 @@ func checkMaxSeqWithMongo(operationID, ID string, diffusionType int) error {
} }
if err != nil { if err != nil {
if err == goRedis.Nil { if err == goRedis.Nil {
return nil
} } else {
return utils.Wrap(err, "GetUserMaxSeq failed") return utils.Wrap(err, "GetUserMaxSeq failed")
} }
}
msg, err := db.DB.GetNewestMsg(ID) msg, err := db.DB.GetNewestMsg(ID)
if err != nil { if err != nil {
return utils.Wrap(err, "GetNewestMsg failed") return utils.Wrap(err, "GetNewestMsg failed")
@ -184,17 +200,10 @@ func checkMaxSeqWithMongo(operationID, ID string, diffusionType int) error {
if msg == nil { if msg == nil {
return nil return nil
} }
var seqMongo uint32 if math.Abs(float64(msg.Seq-uint32(seqRedis))) > 10 {
msgPb := &server_api_params.MsgData{} log.NewWarn(operationID, utils.GetSelfFuncName(), "seqMongo, seqRedis", msg.Seq, seqRedis, ID, "redis maxSeq is different with msg.Seq > 10", "status: ", msg.Status, msg.SendTime)
err = proto.Unmarshal(msg.Msg, msgPb)
if err != nil {
return utils.Wrap(err, "")
}
seqMongo = msgPb.Seq
if math.Abs(float64(seqMongo-uint32(seqRedis))) > 10 {
log.NewWarn(operationID, utils.GetSelfFuncName(), seqMongo, seqRedis, "redis maxSeq is different with msg.Seq > 10", ID, diffusionType)
} else { } else {
log.NewInfo(operationID, utils.GetSelfFuncName(), diffusionType, ID, "seq and msg OK", seqMongo, seqRedis) log.NewInfo(operationID, utils.GetSelfFuncName(), "seqMongo, seqRedis", msg.Seq, seqRedis, ID, "seq and msg OK", "status:", msg.Status, msg.SendTime)
} }
return nil return nil
} }

@ -2,33 +2,225 @@ package cronTask
import ( import (
"Open_IM/pkg/common/constant" "Open_IM/pkg/common/constant"
"Open_IM/pkg/common/log" "Open_IM/pkg/common/db"
server_api_params "Open_IM/pkg/proto/sdk_ws"
"Open_IM/pkg/utils" "Open_IM/pkg/utils"
"context"
"fmt"
"strconv"
"github.com/go-redis/redis/v8"
"github.com/golang/protobuf/proto"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
"gopkg.in/mgo.v2/bson"
"testing" "testing"
"time"
) )
var (
redisClient *redis.Client
mongoClient *mongo.Collection
)
func GenUserChat(startSeq, stopSeq, delSeq, index uint32, userID string) *db.UserChat {
chat := &db.UserChat{UID: userID + ":" + strconv.Itoa(int(index))}
for i := startSeq; i <= stopSeq; i++ {
msg := server_api_params.MsgData{
SendID: "sendID1",
RecvID: "recvID1",
GroupID: "",
ClientMsgID: "xxx",
ServerMsgID: "xxx",
SenderPlatformID: 1,
SenderNickname: "testNickName",
SenderFaceURL: "testFaceURL",
SessionType: 1,
MsgFrom: 100,
ContentType: 101,
Content: []byte("testFaceURL"),
Seq: uint32(i),
SendTime: time.Now().Unix(),
CreateTime: time.Now().Unix(),
Status: 1,
}
bytes, _ := proto.Marshal(&msg)
var sendTime int64
if i <= delSeq {
sendTime = 10000
} else {
sendTime = utils.GetCurrentTimestampByMill()
}
chat.Msg = append(chat.Msg, db.MsgInfo{SendTime: int64(sendTime), Msg: bytes})
}
return chat
}
func SetUserMaxSeq(userID string, seq int) error {
return redisClient.Set(context.Background(), "REDIS_USER_INCR_SEQ"+userID, seq, 0).Err()
}
func GetUserMinSeq(userID string) (uint64, error) {
key := "REDIS_USER_MIN_SEQ:" + userID
seq, err := redisClient.Get(context.Background(), key).Result()
return uint64(utils.StringToInt(seq)), err
}
func CreateChat(userChat *db.UserChat) error {
_, err := mongoClient.InsertOne(context.Background(), userChat)
return err
}
func DelChat(uid string, index int) error {
_, err := mongoClient.DeleteOne(context.Background(), bson.M{"uid": uid + ":" + strconv.Itoa(index)})
return err
}
func TestDeleteMongoMsgAndResetRedisSeq(t *testing.T) { func TestDeleteMongoMsgAndResetRedisSeq(t *testing.T) {
operationID := getCronTaskOperationID() operationID := getCronTaskOperationID()
testUserIDList := []string{"test_del_id1", "test_del_id2", "test_del_id3", "test_del_id4", "test_del_id5"} redisClient = redis.NewClient(&redis.Options{
for _, userID := range testUserIDList { Addr: "127.0.0.1:16379",
operationID = userID + "-" + operationID Password: "openIM123", // no password set
if err := DeleteMongoMsgAndResetRedisSeq(operationID, userID); err != nil { DB: 0, // use default DB
t.Error("checkMaxSeqWithMongo failed", userID) })
mongoUri := fmt.Sprintf("mongodb://%s:%s@%s/%s?maxPoolSize=%d&authSource=admin",
"root", "openIM123", "127.0.0.1:37017",
"openIM", 100)
client, err := mongo.Connect(context.TODO(), options.Client().ApplyURI(mongoUri))
mongoClient = client.Database("openIM").Collection("msg")
testUID1 := "test_del_id1"
err = DelChat(testUID1, 0)
err = SetUserMaxSeq(testUID1, 600)
userChat := GenUserChat(1, 600, 200, 0, testUID1)
err = CreateChat(userChat)
if err := DeleteMongoMsgAndResetRedisSeq(operationID, testUID1); err != nil {
t.Error("checkMaxSeqWithMongo failed", testUID1)
}
if err := checkMaxSeqWithMongo(operationID, testUID1, constant.WriteDiffusion); err != nil {
t.Error("checkMaxSeqWithMongo failed", testUID1)
}
minSeq, err := GetUserMinSeq(testUID1)
if err != nil {
t.Error("err is not nil", testUID1, err.Error())
}
if minSeq != 201 {
t.Error("test1 is not the same", "minSeq:", minSeq, "targetSeq", 201)
} }
if err := checkMaxSeqWithMongo(operationID, userID, constant.WriteDiffusion); err != nil {
t.Error("checkMaxSeqWithMongo failed", userID) testUID2 := "test_del_id2"
err = DelChat(testUID2, 0)
err = DelChat(testUID2, 1)
err = SetUserMaxSeq(testUID2, 7000)
userChat = GenUserChat(1, 4999, 5000, 0, testUID2)
userChat2 := GenUserChat(5000, 7000, 6000, 1, testUID2)
err = CreateChat(userChat)
err = CreateChat(userChat2)
if err := DeleteMongoMsgAndResetRedisSeq(operationID, testUID2); err != nil {
t.Error("checkMaxSeqWithMongo failed", testUID2)
}
if err := checkMaxSeqWithMongo(operationID, testUID2, constant.WriteDiffusion); err != nil {
t.Error("checkMaxSeqWithMongo failed", testUID2)
}
minSeq, err = GetUserMinSeq(testUID2)
if err != nil {
t.Error("err is not nil", testUID2, err.Error())
}
if minSeq != 6001 {
t.Error("test2 is not the same", "minSeq:", minSeq, "targetSeq", 6001)
}
testUID3 := "test_del_id3"
err = DelChat(testUID3, 0)
err = SetUserMaxSeq(testUID3, 4999)
userChat = GenUserChat(1, 4999, 5000, 0, testUID3)
err = CreateChat(userChat)
if err := DeleteMongoMsgAndResetRedisSeq(operationID, testUID3); err != nil {
t.Error("checkMaxSeqWithMongo failed", testUID3)
}
if err := checkMaxSeqWithMongo(operationID, testUID3, constant.WriteDiffusion); err != nil {
t.Error("checkMaxSeqWithMongo failed", testUID3)
}
minSeq, err = GetUserMinSeq(testUID3)
if err != nil {
t.Error("err is not nil", testUID3, err.Error())
}
if minSeq != 5000 {
t.Error("test3 is not the same", "minSeq:", minSeq, "targetSeq", 5000)
} }
testUID4 := "test_del_id4"
err = DelChat(testUID4, 0)
err = DelChat(testUID4, 1)
err = DelChat(testUID4, 2)
err = SetUserMaxSeq(testUID4, 12000)
userChat = GenUserChat(1, 4999, 5000, 0, testUID4)
userChat2 = GenUserChat(5000, 9999, 10000, 1, testUID4)
userChat3 := GenUserChat(10000, 12000, 11000, 2, testUID4)
err = CreateChat(userChat)
err = CreateChat(userChat2)
err = CreateChat(userChat3)
if err := DeleteMongoMsgAndResetRedisSeq(operationID, testUID4); err != nil {
t.Error("checkMaxSeqWithMongo failed", testUID4)
}
if err := checkMaxSeqWithMongo(operationID, testUID4, constant.WriteDiffusion); err != nil {
t.Error("checkMaxSeqWithMongo failed", testUID4)
}
minSeq, err = GetUserMinSeq(testUID4)
if err != nil {
t.Error("err is not nil", testUID4, err.Error())
}
if minSeq != 11001 {
t.Error("test4 is not the same", "minSeq:", minSeq, "targetSeq", 11001)
}
testUID5 := "test_del_id5"
err = DelChat(testUID5, 0)
err = DelChat(testUID5, 1)
err = SetUserMaxSeq(testUID5, 9999)
userChat = GenUserChat(1, 4999, 5000, 0, testUID5)
userChat2 = GenUserChat(5000, 9999, 10000, 1, testUID5)
err = CreateChat(userChat)
err = CreateChat(userChat2)
if err := DeleteMongoMsgAndResetRedisSeq(operationID, testUID5); err != nil {
t.Error("checkMaxSeqWithMongo failed", testUID4)
}
if err := checkMaxSeqWithMongo(operationID, testUID5, constant.WriteDiffusion); err != nil {
t.Error("checkMaxSeqWithMongo failed", testUID5)
}
minSeq, err = GetUserMinSeq(testUID5)
if err != nil {
t.Error("err is not nil", testUID5, err.Error())
}
if minSeq != 10000 {
t.Error("test5 is not the same", "minSeq:", minSeq, "targetSeq", 10000)
} }
testWorkingGroupIDList := []string{"test_del_id1", "test_del_id2", "test_del_id3", "test_del_id4", "test_del_id5"} testUID6 := "test_del_id6"
for _, groupID := range testWorkingGroupIDList { err = DelChat(testUID5, 0)
operationID = groupID + "-" + operationID err = DelChat(testUID5, 1)
log.NewDebug(operationID, utils.GetSelfFuncName(), "groupID:", groupID, "userIDList:", testUserIDList) err = DelChat(testUID5, 2)
if err := ResetUserGroupMinSeq(operationID, groupID, testUserIDList); err != nil { err = DelChat(testUID5, 3)
t.Error("checkMaxSeqWithMongo failed", groupID) userChat = GenUserChat(1, 4999, 5000, 0, testUID6)
userChat2 = GenUserChat(5000, 9999, 10000, 1, testUID6)
userChat3 = GenUserChat(10000, 14999, 13000, 2, testUID6)
userChat4 := GenUserChat(15000, 19999, 0, 3, testUID6)
err = CreateChat(userChat)
err = CreateChat(userChat2)
err = CreateChat(userChat3)
err = CreateChat(userChat4)
if err := DeleteMongoMsgAndResetRedisSeq(operationID, testUID6); err != nil {
t.Error("checkMaxSeqWithMongo failed", testUID6)
}
if err := checkMaxSeqWithMongo(operationID, testUID6, constant.WriteDiffusion); err != nil {
t.Error("checkMaxSeqWithMongo failed", testUID6)
} }
if err := checkMaxSeqWithMongo(operationID, groupID, constant.ReadDiffusion); err != nil { minSeq, err = GetUserMinSeq(testUID6)
t.Error("checkMaxSeqWithMongo failed", groupID) if err != nil {
t.Error("err is not nil", testUID6, err.Error())
} }
if minSeq != 13001 {
t.Error("test3 is not the same", "minSeq:", minSeq, "targetSeq", 13001)
} }
} }

@ -8,73 +8,94 @@ import (
"Open_IM/pkg/common/log" "Open_IM/pkg/common/log"
"Open_IM/pkg/utils" "Open_IM/pkg/utils"
"fmt" "fmt"
"github.com/robfig/cron/v3"
"time" "time"
"github.com/robfig/cron/v3"
) )
const cronTaskOperationID = "cronTaskOperationID-" const cronTaskOperationID = "cronTaskOperationID-"
func StartCronTask() { func StartCronTask(userID, workingGroupID string) {
log.NewPrivateLog("cron") log.NewPrivateLog("cron")
log.NewInfo(utils.OperationIDGenerator(), "start cron task", "cron config", config.Config.Mongo.ChatRecordsClearTime) log.NewInfo(utils.OperationIDGenerator(), "start cron task", "cron config", config.Config.Mongo.ChatRecordsClearTime)
c := cron.New()
fmt.Println("cron config", config.Config.Mongo.ChatRecordsClearTime) fmt.Println("cron config", config.Config.Mongo.ChatRecordsClearTime)
_, err := c.AddFunc(config.Config.Mongo.ChatRecordsClearTime, func() { if userID != "" {
// user msg clear operationID := getCronTaskOperationID()
StartClearMsg(operationID, []string{userID})
}
if workingGroupID != "" {
operationID := getCronTaskOperationID()
StartClearWorkingGroupMsg(operationID, []string{workingGroupID})
}
if userID != "" || workingGroupID != "" {
fmt.Println("clear msg finished")
return
}
c := cron.New()
_, err := c.AddFunc(config.Config.Mongo.ChatRecordsClearTime, ClearAll)
if err != nil {
fmt.Println("start cron failed", err.Error(), config.Config.Mongo.ChatRecordsClearTime)
panic(err)
}
c.Start()
fmt.Println("start cron task success")
for {
time.Sleep(10 * time.Second)
}
}
func getCronTaskOperationID() string {
return cronTaskOperationID + utils.OperationIDGenerator()
}
func ClearAll() {
operationID := getCronTaskOperationID() operationID := getCronTaskOperationID()
log.NewInfo(operationID, "====================== start del cron task ======================") log.NewInfo(operationID, "====================== start del cron task ======================")
var err error
userIDList, err := im_mysql_model.SelectAllUserID() userIDList, err := im_mysql_model.SelectAllUserID()
if err == nil { if err == nil {
StartClearMsg(operationID, userIDList)
} else {
log.NewError(operationID, utils.GetSelfFuncName(), err.Error())
}
// working group msg clear
workingGroupIDList, err := im_mysql_model.GetGroupIDListByGroupType(constant.WorkingGroup)
if err == nil {
StartClearWorkingGroupMsg(operationID, workingGroupIDList)
} else {
log.NewError(operationID, utils.GetSelfFuncName(), err.Error())
}
log.NewInfo(operationID, "====================== start del cron finished ======================")
}
func StartClearMsg(operationID string, userIDList []string) {
log.NewDebug(operationID, utils.GetSelfFuncName(), "userIDList: ", userIDList) log.NewDebug(operationID, utils.GetSelfFuncName(), "userIDList: ", userIDList)
userIDList = []string{"4158779020"}
for _, userID := range userIDList { for _, userID := range userIDList {
if err := DeleteMongoMsgAndResetRedisSeq(operationID, userID); err != nil { if err := DeleteMongoMsgAndResetRedisSeq(operationID, userID); err != nil {
log.NewError(operationID, utils.GetSelfFuncName(), err.Error(), userID) log.NewError(operationID, utils.GetSelfFuncName(), err.Error(), userID)
} }
//if err := checkMaxSeqWithMongo(operationID, userID, constant.WriteDiffusion); err != nil { if err := checkMaxSeqWithMongo(operationID, userID, constant.WriteDiffusion); err != nil {
// log.NewError(operationID, utils.GetSelfFuncName(), userID, err) log.NewError(operationID, utils.GetSelfFuncName(), userID, err)
//}
} }
} else {
log.NewError(operationID, utils.GetSelfFuncName(), err.Error())
} }
return }
// working group msg clear
workingGroupIDList, err := im_mysql_model.GetGroupIDListByGroupType(constant.WorkingGroup) func StartClearWorkingGroupMsg(operationID string, workingGroupIDList []string) {
if err == nil {
log.NewDebug(operationID, utils.GetSelfFuncName(), "workingGroupIDList: ", workingGroupIDList) log.NewDebug(operationID, utils.GetSelfFuncName(), "workingGroupIDList: ", workingGroupIDList)
for _, groupID := range workingGroupIDList { for _, groupID := range workingGroupIDList {
userIDList, err = rocksCache.GetGroupMemberIDListFromCache(groupID) userIDList, err := rocksCache.GetGroupMemberIDListFromCache(groupID)
if err != nil { if err != nil {
log.NewError(operationID, utils.GetSelfFuncName(), err.Error(), groupID) log.NewError(operationID, utils.GetSelfFuncName(), err.Error(), groupID)
continue continue
} }
log.NewDebug(operationID, utils.GetSelfFuncName(), "groupID:", groupID, "userIDList:", userIDList) log.NewDebug(operationID, utils.GetSelfFuncName(), "groupID:", groupID, "workingGroupIDList:", userIDList)
if err := ResetUserGroupMinSeq(operationID, groupID, userIDList); err != nil { if err := ResetUserGroupMinSeq(operationID, groupID, userIDList); err != nil {
log.NewError(operationID, utils.GetSelfFuncName(), err.Error(), groupID, userIDList) log.NewError(operationID, utils.GetSelfFuncName(), err.Error(), groupID, userIDList)
} }
//if err := checkMaxSeqWithMongo(operationID, groupID, constant.ReadDiffusion); err != nil { if err := checkMaxSeqWithMongo(operationID, groupID, constant.ReadDiffusion); err != nil {
// log.NewError(operationID, utils.GetSelfFuncName(), groupID, err) log.NewError(operationID, utils.GetSelfFuncName(), groupID, err)
//}
} }
} else {
log.NewError(operationID, utils.GetSelfFuncName(), err.Error())
} }
log.NewInfo(operationID, "====================== start del cron finished ======================")
})
if err != nil {
fmt.Println("start cron failed", err.Error(), config.Config.Mongo.ChatRecordsClearTime)
panic(err)
}
c.Start()
fmt.Println("start cron task success")
for {
time.Sleep(10 * time.Second)
}
}
func getCronTaskOperationID() string {
return cronTaskOperationID + utils.OperationIDGenerator()
} }

@ -1,6 +0,0 @@
package main
//
//func main() {
// db.DB.BatchInsertChat()
//}

@ -10,7 +10,6 @@ import (
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
//"github.com/jinzhu/gorm"
"net/http" "net/http"
"time" "time"
) )

@ -9,7 +9,7 @@ import (
"time" "time"
) )
func callbackUserOnline(operationID, userID string, platformID int, token string) cbApi.CommonCallbackResp { func callbackUserOnline(operationID, userID string, platformID int, token string, isAppBackground bool, connID string) cbApi.CommonCallbackResp {
callbackResp := cbApi.CommonCallbackResp{OperationID: operationID} callbackResp := cbApi.CommonCallbackResp{OperationID: operationID}
if !config.Config.Callback.CallbackUserOnline.Enable { if !config.Config.Callback.CallbackUserOnline.Enable {
return callbackResp return callbackResp
@ -26,6 +26,8 @@ func callbackUserOnline(operationID, userID string, platformID int, token string
UserID: userID, UserID: userID,
}, },
Seq: int(time.Now().UnixNano() / 1e6), Seq: int(time.Now().UnixNano() / 1e6),
IsAppBackground: isAppBackground,
ConnID: connID,
} }
callbackUserOnlineResp := &cbApi.CallbackUserOnlineResp{CommonCallbackResp: &callbackResp} callbackUserOnlineResp := &cbApi.CallbackUserOnlineResp{CommonCallbackResp: &callbackResp}
if err := http.CallBackPostReturn(config.Config.Callback.CallbackUrl, constant.CallbackUserOnlineCommand, callbackUserOnlineReq, callbackUserOnlineResp, config.Config.Callback.CallbackUserOnline.CallbackTimeOut); err != nil { if err := http.CallBackPostReturn(config.Config.Callback.CallbackUrl, constant.CallbackUserOnlineCommand, callbackUserOnlineReq, callbackUserOnlineResp, config.Config.Callback.CallbackUserOnline.CallbackTimeOut); err != nil {
@ -35,7 +37,7 @@ func callbackUserOnline(operationID, userID string, platformID int, token string
return callbackResp return callbackResp
} }
func callbackUserOffline(operationID, userID string, platformID int) cbApi.CommonCallbackResp { func callbackUserOffline(operationID, userID string, platformID int, connID string) cbApi.CommonCallbackResp {
callbackResp := cbApi.CommonCallbackResp{OperationID: operationID} callbackResp := cbApi.CommonCallbackResp{OperationID: operationID}
if !config.Config.Callback.CallbackUserOffline.Enable { if !config.Config.Callback.CallbackUserOffline.Enable {
return callbackResp return callbackResp
@ -51,6 +53,7 @@ func callbackUserOffline(operationID, userID string, platformID int) cbApi.Commo
UserID: userID, UserID: userID,
}, },
Seq: int(time.Now().UnixNano() / 1e6), Seq: int(time.Now().UnixNano() / 1e6),
ConnID: connID,
} }
callbackUserOfflineResp := &cbApi.CallbackUserOfflineResp{CommonCallbackResp: &callbackResp} callbackUserOfflineResp := &cbApi.CallbackUserOfflineResp{CommonCallbackResp: &callbackResp}
if err := http.CallBackPostReturn(config.Config.Callback.CallbackUrl, constant.CallbackUserOfflineCommand, callbackOfflineReq, callbackUserOfflineResp, config.Config.Callback.CallbackUserOffline.CallbackTimeOut); err != nil { if err := http.CallBackPostReturn(config.Config.Callback.CallbackUrl, constant.CallbackUserOfflineCommand, callbackOfflineReq, callbackUserOfflineResp, config.Config.Callback.CallbackUserOffline.CallbackTimeOut); err != nil {

@ -15,10 +15,12 @@ import (
"bytes" "bytes"
"context" "context"
"encoding/gob" "encoding/gob"
"github.com/golang/protobuf/proto"
"github.com/gorilla/websocket"
"runtime" "runtime"
"strings" "strings"
"github.com/golang/protobuf/proto"
"github.com/gorilla/websocket"
"google.golang.org/grpc"
) )
func (ws *WServer) msgParse(conn *UserConn, binaryMsg []byte) { func (ws *WServer) msgParse(conn *UserConn, binaryMsg []byte) {
@ -40,7 +42,12 @@ func (ws *WServer) msgParse(conn *UserConn, binaryMsg []byte) {
return return
} }
log.NewInfo(m.OperationID, "Basic Info Authentication Success", m.SendID, m.MsgIncr, m.ReqIdentifier) log.NewInfo(m.OperationID, "Basic Info Authentication Success", m.SendID, m.MsgIncr, m.ReqIdentifier)
if m.SendID != conn.userID {
if err = conn.Close(); err != nil {
log.NewError(m.OperationID, "close ws conn failed", conn.userID, "send id", m.SendID, err.Error())
return
}
}
switch m.ReqIdentifier { switch m.ReqIdentifier {
case constant.WSGetNewestSeq: case constant.WSGetNewestSeq:
log.NewInfo(m.OperationID, "getSeqReq ", m.SendID, m.MsgIncr, m.ReqIdentifier) log.NewInfo(m.OperationID, "getSeqReq ", m.SendID, m.MsgIncr, m.ReqIdentifier)
@ -60,6 +67,9 @@ func (ws *WServer) msgParse(conn *UserConn, binaryMsg []byte) {
case constant.WsLogoutMsg: case constant.WsLogoutMsg:
log.NewInfo(m.OperationID, "conn.Close()", m.SendID, m.MsgIncr, m.ReqIdentifier) log.NewInfo(m.OperationID, "conn.Close()", m.SendID, m.MsgIncr, m.ReqIdentifier)
ws.userLogoutReq(conn, &m) ws.userLogoutReq(conn, &m)
case constant.WsSetBackgroundStatus:
log.NewInfo(m.OperationID, "WsSetBackgroundStatus", m.SendID, m.MsgIncr, m.ReqIdentifier)
ws.setUserDeviceBackground(conn, &m)
default: default:
log.Error(m.OperationID, "ReqIdentifier failed ", m.SendID, m.MsgIncr, m.ReqIdentifier) log.Error(m.OperationID, "ReqIdentifier failed ", m.SendID, m.MsgIncr, m.ReqIdentifier)
} }
@ -142,7 +152,8 @@ func (ws *WServer) pullMsgBySeqListReq(conn *UserConn, m *Req) {
return return
} }
msgClient := pbChat.NewMsgClient(grpcConn) msgClient := pbChat.NewMsgClient(grpcConn)
reply, err := msgClient.PullMessageBySeqList(context.Background(), &rpcReq) maxSizeOption := grpc.MaxCallRecvMsgSize(1024 * 1024 * 20)
reply, err := msgClient.PullMessageBySeqList(context.Background(), &rpcReq, maxSizeOption)
if err != nil { if err != nil {
log.NewError(rpcReq.OperationID, "pullMsgBySeqListReq err", err.Error()) log.NewError(rpcReq.OperationID, "pullMsgBySeqListReq err", err.Error())
nReply.ErrCode = 200 nReply.ErrCode = 200
@ -175,8 +186,10 @@ func (ws *WServer) pullMsgBySeqListResp(conn *UserConn, m *Req, pb *sdk_ws.PullM
} }
func (ws *WServer) userLogoutReq(conn *UserConn, m *Req) { func (ws *WServer) userLogoutReq(conn *UserConn, m *Req) {
log.NewInfo(m.OperationID, "Ws call success to userLogoutReq start", m.SendID, m.ReqIdentifier, m.MsgIncr, string(m.Data)) log.NewInfo(m.OperationID, "Ws call success to userLogoutReq start", m.SendID, m.ReqIdentifier, m.MsgIncr, string(m.Data))
rpcReq := push.DelUserPushTokenReq{} rpcReq := push.DelUserPushTokenReq{}
rpcReq.UserID = m.SendID rpcReq.UserID = m.SendID
rpcReq.PlatformID = conn.PlatformID
rpcReq.OperationID = m.OperationID rpcReq.OperationID = m.OperationID
grpcConn := getcdv3.GetDefaultConn(config.Config.Etcd.EtcdSchema, strings.Join(config.Config.Etcd.EtcdAddr, ","), config.Config.RpcRegisterName.OpenImPushName, m.OperationID) grpcConn := getcdv3.GetDefaultConn(config.Config.Etcd.EtcdSchema, strings.Join(config.Config.Etcd.EtcdAddr, ","), config.Config.RpcRegisterName.OpenImPushName, m.OperationID)
if grpcConn == nil { if grpcConn == nil {
@ -389,3 +402,28 @@ func SetTokenKicked(userID string, platformID int, operationID string) {
return return
} }
} }
func (ws *WServer) setUserDeviceBackground(conn *UserConn, m *Req) {
isPass, errCode, errMsg, pData := ws.argsValidate(m, constant.WsSetBackgroundStatus, m.OperationID)
if isPass {
req := pData.(*sdk_ws.SetAppBackgroundStatusReq)
conn.IsBackground = req.IsBackground
callbackResp := callbackUserOnline(m.OperationID, conn.userID, int(conn.PlatformID), conn.token, conn.IsBackground, conn.connID)
if callbackResp.ErrCode != 0 {
log.NewError(m.OperationID, utils.GetSelfFuncName(), "callbackUserOffline failed", callbackResp)
}
log.NewInfo(m.OperationID, "SetUserDeviceBackground", "success", *conn, req.IsBackground)
}
ws.setUserDeviceBackgroundResp(conn, m, errCode, errMsg)
}
func (ws *WServer) setUserDeviceBackgroundResp(conn *UserConn, m *Req, errCode int32, errMsg string) {
mReply := Resp{
ReqIdentifier: m.ReqIdentifier,
MsgIncr: m.MsgIncr,
OperationID: m.OperationID,
ErrCode: errCode,
ErrMsg: errMsg,
}
ws.sendMsg(conn, mReply)
}

@ -101,47 +101,48 @@ func (r *RPCServer) run() {
} }
} }
func (r *RPCServer) OnlinePushMsg(_ context.Context, in *pbRelay.OnlinePushMsgReq) (*pbRelay.OnlinePushMsgResp, error) { func (r *RPCServer) OnlinePushMsg(_ context.Context, in *pbRelay.OnlinePushMsgReq) (*pbRelay.OnlinePushMsgResp, error) {
log.NewInfo(in.OperationID, "PushMsgToUser is arriving", in.String()) //log.NewInfo(in.OperationID, "PushMsgToUser is arriving", in.String())
var resp []*pbRelay.SingleMsgToUserPlatform //var resp []*pbRelay.SingleMsgToUserPlatform
msgBytes, _ := proto.Marshal(in.MsgData) //msgBytes, _ := proto.Marshal(in.MsgData)
mReply := Resp{ //mReply := Resp{
ReqIdentifier: constant.WSPushMsg, // ReqIdentifier: constant.WSPushMsg,
OperationID: in.OperationID, // OperationID: in.OperationID,
Data: msgBytes, // Data: msgBytes,
} //}
var replyBytes bytes.Buffer //var replyBytes bytes.Buffer
enc := gob.NewEncoder(&replyBytes) //enc := gob.NewEncoder(&replyBytes)
err := enc.Encode(mReply) //err := enc.Encode(mReply)
if err != nil { //if err != nil {
log.NewError(in.OperationID, "data encode err", err.Error()) // log.NewError(in.OperationID, "data encode err", err.Error())
} //}
var tag bool //var tag bool
recvID := in.PushToUserID //recvID := in.PushToUserID
for _, v := range r.platformList { //for _, v := range r.platformList {
if conn := ws.getUserConn(recvID, v); conn != nil { // if conn := ws.getUserConn(recvID, v); conn != nil {
tag = true // tag = true
resultCode := sendMsgToUser(conn, replyBytes.Bytes(), in, v, recvID) // resultCode := sendMsgToUser(conn, replyBytes.Bytes(), in, v, recvID)
temp := &pbRelay.SingleMsgToUserPlatform{ // temp := &pbRelay.SingleMsgToUserPlatform{
ResultCode: resultCode, // ResultCode: resultCode,
RecvID: recvID, // RecvID: recvID,
RecvPlatFormID: int32(v), // RecvPlatFormID: int32(v),
} // }
resp = append(resp, temp) // resp = append(resp, temp)
} else { // } else {
temp := &pbRelay.SingleMsgToUserPlatform{ // temp := &pbRelay.SingleMsgToUserPlatform{
ResultCode: -1, // ResultCode: -1,
RecvID: recvID, // RecvID: recvID,
RecvPlatFormID: int32(v), // RecvPlatFormID: int32(v),
} // }
resp = append(resp, temp) // resp = append(resp, temp)
} // }
} //}
if !tag { //if !tag {
log.NewDebug(in.OperationID, "push err ,no matched ws conn not in map", in.String()) // log.NewDebug(in.OperationID, "push err ,no matched ws conn not in map", in.String())
} //}
return &pbRelay.OnlinePushMsgResp{ //return &pbRelay.OnlinePushMsgResp{
Resp: resp, // Resp: resp,
}, nil //}, nil
return nil, nil
} }
func (r *RPCServer) GetUsersOnlineStatus(_ context.Context, req *pbRelay.GetUsersOnlineStatusReq) (*pbRelay.GetUsersOnlineStatusResp, error) { func (r *RPCServer) GetUsersOnlineStatus(_ context.Context, req *pbRelay.GetUsersOnlineStatusReq) (*pbRelay.GetUsersOnlineStatusResp, error) {
log.NewInfo(req.OperationID, "rpc GetUsersOnlineStatus arrived server", req.String()) log.NewInfo(req.OperationID, "rpc GetUsersOnlineStatus arrived server", req.String())
@ -154,11 +155,13 @@ func (r *RPCServer) GetUsersOnlineStatus(_ context.Context, req *pbRelay.GetUser
temp := new(pbRelay.GetUsersOnlineStatusResp_SuccessResult) temp := new(pbRelay.GetUsersOnlineStatusResp_SuccessResult)
temp.UserID = userID temp.UserID = userID
userConnMap := ws.getUserAllCons(userID) userConnMap := ws.getUserAllCons(userID)
for platform, userConn := range userConnMap { for platform, userConns := range userConnMap {
if userConn != nil { if len(userConns) != 0 {
ps := new(pbRelay.GetUsersOnlineStatusResp_SuccessDetail) ps := new(pbRelay.GetUsersOnlineStatusResp_SuccessDetail)
ps.Platform = constant.PlatformIDToName(platform) ps.Platform = constant.PlatformIDToName(platform)
ps.Status = constant.OnlineStatus ps.Status = constant.OnlineStatus
ps.ConnID = userConns[0].connID
ps.IsBackground = userConns[0].IsBackground
temp.Status = constant.OnlineStatus temp.Status = constant.OnlineStatus
temp.DetailPlatformStatus = append(temp.DetailPlatformStatus, ps) temp.DetailPlatformStatus = append(temp.DetailPlatformStatus, ps)
} }
@ -194,104 +197,166 @@ func (r *RPCServer) SuperGroupOnlineBatchPushOneMsg(_ context.Context, req *pbRe
UserID: v, UserID: v,
} }
userConnMap := ws.getUserAllCons(v) userConnMap := ws.getUserAllCons(v)
for platform, userConn := range userConnMap { for platform, userConns := range userConnMap {
if userConn != nil { if len(userConns) != 0 {
log.NewWarn(req.OperationID, "conns is ", len(userConns), platform, userConns)
for _, userConn := range userConns {
temp := &pbRelay.SingleMsgToUserPlatform{
RecvID: v,
RecvPlatFormID: int32(platform),
}
if !userConn.IsBackground || req.MsgData.ContentType == constant.SuperGroupUpdateNotification || req.MsgData.ContentType == constant.SignalingNotification {
resultCode := sendMsgBatchToUser(userConn, replyBytes.Bytes(), req, platform, v) resultCode := sendMsgBatchToUser(userConn, replyBytes.Bytes(), req, platform, v)
if resultCode == 0 && utils.IsContainInt(platform, r.pushTerminal) { if resultCode == 0 && utils.IsContainInt(platform, r.pushTerminal) {
tempT.OnlinePush = true tempT.OnlinePush = true
promePkg.PromeInc(promePkg.MsgOnlinePushSuccessCounter) promePkg.PromeInc(promePkg.MsgOnlinePushSuccessCounter)
log.Info(req.OperationID, "PushSuperMsgToUser is success By Ws", "args", req.String(), "recvPlatForm", constant.PlatformIDToName(platform), "recvID", v) log.Info(req.OperationID, "PushSuperMsgToUser is success By Ws", "args", req.String(), "recvPlatForm", constant.PlatformIDToName(platform), "recvID", v, "background status", userConn.IsBackground, userConn.userID)
temp := &pbRelay.SingleMsgToUserPlatform{ temp.ResultCode = resultCode
ResultCode: resultCode, if req.MsgData.ContentType == constant.SignalingNotification && userConn.IsBackground {
RecvID: v, log.Info(req.OperationID, "recv signalingNotification backgroud", req.MsgData.String())
RecvPlatFormID: int32(platform), temp.ResultCode = -2
tempT.OnlinePush = false
} }
resp = append(resp, temp) resp = append(resp, temp)
} }
} else {
temp.ResultCode = -2
resp = append(resp, temp)
}
}
} }
} }
tempT.Resp = resp tempT.Resp = resp
singleUserResult = append(singleUserResult, tempT) singleUserResult = append(singleUserResult, tempT)
} }
return &pbRelay.OnlineBatchPushOneMsgResp{ return &pbRelay.OnlineBatchPushOneMsgResp{
SinglePushResult: singleUserResult, SinglePushResult: singleUserResult,
}, nil }, nil
} }
func (r *RPCServer) OnlineBatchPushOneMsg(_ context.Context, req *pbRelay.OnlineBatchPushOneMsgReq) (*pbRelay.OnlineBatchPushOneMsgResp, error) { func (r *RPCServer) SuperGroupBackgroundOnlinePush(_ context.Context, req *pbRelay.OnlineBatchPushOneMsgReq) (*pbRelay.OnlineBatchPushOneMsgResp, error) {
log.NewInfo(req.OperationID, "BatchPushMsgToUser is arriving", req.String()) log.NewInfo(req.OperationID, "BatchPushMsgToUser is arriving", req.String())
var singleUserResult []*pbRelay.SingelMsgToUserResultList var singleUserResult []*pbRelay.SingelMsgToUserResultList
//r.GetBatchMsgForPush(req.OperationID,req.MsgData,req.PushToUserIDList,)
msgBytes, _ := proto.Marshal(req.MsgData)
mReply := Resp{
ReqIdentifier: constant.WSPushMsg,
OperationID: req.OperationID,
Data: msgBytes,
}
var replyBytes bytes.Buffer
enc := gob.NewEncoder(&replyBytes)
err := enc.Encode(mReply)
if err != nil {
log.NewError(req.OperationID, "data encode err", err.Error())
}
for _, v := range req.PushToUserIDList { for _, v := range req.PushToUserIDList {
var resp []*pbRelay.SingleMsgToUserPlatform var resp []*pbRelay.SingleMsgToUserPlatform
tempT := &pbRelay.SingelMsgToUserResultList{ tempT := &pbRelay.SingelMsgToUserResultList{
UserID: v, UserID: v,
} }
userConnMap := ws.getUserAllCons(v) userConnMap := ws.getUserAllCons(v)
var platformList []int for platform, userConns := range userConnMap {
for k, _ := range userConnMap { if len(userConns) != 0 {
platformList = append(platformList, k) for _, userConn := range userConns {
}
log.Debug(req.OperationID, "GetSingleUserMsgForPushPlatforms begin", req.MsgData.Seq, v, platformList, req.MsgData.String())
needPushMapList := r.GetSingleUserMsgForPushPlatforms(req.OperationID, req.MsgData, v, platformList)
log.Debug(req.OperationID, "GetSingleUserMsgForPushPlatforms end", req.MsgData.Seq, v, platformList, len(needPushMapList))
for platform, list := range needPushMapList {
if list != nil {
log.Debug(req.OperationID, "needPushMapList ", "userID: ", v, "platform: ", platform, "push msg num:")
//for _, v := range list {
// log.Debug(req.OperationID, "req.MsgData.MsgDataList begin", "len: ", len(req.MsgData.MsgDataList), v.String())
// req.MsgData.MsgDataList = append(req.MsgData.MsgDataList, v)
// log.Debug(req.OperationID, "req.MsgData.MsgDataList end", "len: ", len(req.MsgData.MsgDataList))
//}
msgBytes, err := proto.Marshal(list)
if err != nil {
log.Error(req.OperationID, "proto marshal err", err.Error())
continue
}
req.MsgData.MsgDataList = msgBytes
//req.MsgData.MsgDataList = append(req.MsgData.MsgDataList, v)
log.Debug(req.OperationID, "r.encodeWsData no string")
//log.Debug(req.OperationID, "r.encodeWsData data0 list ", req.MsgData.MsgDataList[0].String())
log.Debug(req.OperationID, "r.encodeWsData ", req.MsgData.String())
replyBytes, err := r.encodeWsData(req.MsgData, req.OperationID)
if err != nil {
log.Error(req.OperationID, "encodeWsData failed ", req.MsgData.String())
continue
}
log.Debug(req.OperationID, "encodeWsData", "len: ", replyBytes.Len())
resultCode := sendMsgBatchToUser(userConnMap[platform], replyBytes.Bytes(), req, platform, v)
if resultCode == 0 && utils.IsContainInt(platform, r.pushTerminal) {
tempT.OnlinePush = true
log.Info(req.OperationID, "PushSuperMsgToUser is success By Ws", "args", req.String(), "recv PlatForm", constant.PlatformIDToName(platform), "recvID", v)
temp := &pbRelay.SingleMsgToUserPlatform{ temp := &pbRelay.SingleMsgToUserPlatform{
ResultCode: resultCode,
RecvID: v, RecvID: v,
RecvPlatFormID: int32(platform), RecvPlatFormID: int32(platform),
} }
resp = append(resp, temp) resultCode := sendMsgBatchToUser(userConn, replyBytes.Bytes(), req, platform, v)
} if resultCode == 0 && utils.IsContainInt(platform, r.pushTerminal) {
} else {
if utils.IsContainInt(platform, r.pushTerminal) {
tempT.OnlinePush = true tempT.OnlinePush = true
temp := &pbRelay.SingleMsgToUserPlatform{ promePkg.PromeInc(promePkg.MsgOnlinePushSuccessCounter)
ResultCode: 0, log.Info(req.OperationID, "PushSuperMsgToUser is success By Ws", "args", req.String(), "recvPlatForm", constant.PlatformIDToName(platform), "recvID", v)
RecvID: v, temp.ResultCode = resultCode
RecvPlatFormID: int32(platform),
}
resp = append(resp, temp) resp = append(resp, temp)
} }
} }
}
} }
tempT.Resp = resp tempT.Resp = resp
singleUserResult = append(singleUserResult, tempT) singleUserResult = append(singleUserResult, tempT)
} }
return &pbRelay.OnlineBatchPushOneMsgResp{ return &pbRelay.OnlineBatchPushOneMsgResp{
SinglePushResult: singleUserResult, SinglePushResult: singleUserResult,
}, nil }, nil
} }
func (r *RPCServer) OnlineBatchPushOneMsg(_ context.Context, req *pbRelay.OnlineBatchPushOneMsgReq) (*pbRelay.OnlineBatchPushOneMsgResp, error) {
//log.NewInfo(req.OperationID, "BatchPushMsgToUser is arriving", req.String())
//var singleUserResult []*pbRelay.SingelMsgToUserResultList
//
//for _, v := range req.PushToUserIDList {
// var resp []*pbRelay.SingleMsgToUserPlatform
// tempT := &pbRelay.SingelMsgToUserResultList{
// UserID: v,
// }
// userConnMap := ws.getUserAllCons(v)
// var platformList []int
// for k, _ := range userConnMap {
// platformList = append(platformList, k)
// }
// log.Debug(req.OperationID, "GetSingleUserMsgForPushPlatforms begin", req.MsgData.Seq, v, platformList, req.MsgData.String())
// needPushMapList := r.GetSingleUserMsgForPushPlatforms(req.OperationID, req.MsgData, v, platformList)
// log.Debug(req.OperationID, "GetSingleUserMsgForPushPlatforms end", req.MsgData.Seq, v, platformList, len(needPushMapList))
// for platform, list := range needPushMapList {
// if list != nil {
// log.Debug(req.OperationID, "needPushMapList ", "userID: ", v, "platform: ", platform, "push msg num:")
// //for _, v := range list {
// // log.Debug(req.OperationID, "req.MsgData.MsgDataList begin", "len: ", len(req.MsgData.MsgDataList), v.String())
// // req.MsgData.MsgDataList = append(req.MsgData.MsgDataList, v)
// // log.Debug(req.OperationID, "req.MsgData.MsgDataList end", "len: ", len(req.MsgData.MsgDataList))
// //}
// msgBytes, err := proto.Marshal(list)
// if err != nil {
// log.Error(req.OperationID, "proto marshal err", err.Error())
// continue
// }
// req.MsgData.MsgDataList = msgBytes
// //req.MsgData.MsgDataList = append(req.MsgData.MsgDataList, v)
// log.Debug(req.OperationID, "r.encodeWsData no string")
// //log.Debug(req.OperationID, "r.encodeWsData data0 list ", req.MsgData.MsgDataList[0].String())
//
// log.Debug(req.OperationID, "r.encodeWsData ", req.MsgData.String())
// replyBytes, err := r.encodeWsData(req.MsgData, req.OperationID)
// if err != nil {
// log.Error(req.OperationID, "encodeWsData failed ", req.MsgData.String())
// continue
// }
// log.Debug(req.OperationID, "encodeWsData", "len: ", replyBytes.Len())
// resultCode := sendMsgBatchToUser(userConnMap[platform], replyBytes.Bytes(), req, platform, v)
// if resultCode == 0 && utils.IsContainInt(platform, r.pushTerminal) {
// tempT.OnlinePush = true
// log.Info(req.OperationID, "PushSuperMsgToUser is success By Ws", "args", req.String(), "recv PlatForm", constant.PlatformIDToName(platform), "recvID", v)
// temp := &pbRelay.SingleMsgToUserPlatform{
// ResultCode: resultCode,
// RecvID: v,
// RecvPlatFormID: int32(platform),
// }
// resp = append(resp, temp)
// }
// } else {
// if utils.IsContainInt(platform, r.pushTerminal) {
// tempT.OnlinePush = true
// temp := &pbRelay.SingleMsgToUserPlatform{
// ResultCode: 0,
// RecvID: v,
// RecvPlatFormID: int32(platform),
// }
// resp = append(resp, temp)
// }
// }
// }
// tempT.Resp = resp
// singleUserResult = append(singleUserResult, tempT)
//}
//return &pbRelay.OnlineBatchPushOneMsgResp{
// SinglePushResult: singleUserResult,
//}, nil
return nil, nil
}
func (r *RPCServer) encodeWsData(wsData *sdk_ws.MsgData, operationID string) (bytes.Buffer, error) { func (r *RPCServer) encodeWsData(wsData *sdk_ws.MsgData, operationID string) (bytes.Buffer, error) {
log.Debug(operationID, "encodeWsData begin", wsData.String()) log.Debug(operationID, "encodeWsData begin", wsData.String())
msgBytes, err := proto.Marshal(wsData) msgBytes, err := proto.Marshal(wsData)
@ -321,10 +386,11 @@ func (r *RPCServer) KickUserOffline(_ context.Context, req *pbRelay.KickUserOffl
log.NewWarn(req.OperationID, "SetTokenKicked ", v, req.PlatformID, req.OperationID) log.NewWarn(req.OperationID, "SetTokenKicked ", v, req.PlatformID, req.OperationID)
SetTokenKicked(v, int(req.PlatformID), req.OperationID) SetTokenKicked(v, int(req.PlatformID), req.OperationID)
oldConnMap := ws.getUserAllCons(v) oldConnMap := ws.getUserAllCons(v)
if conn, ok := oldConnMap[int(req.PlatformID)]; ok { // user->map[platform->conn] if conns, ok := oldConnMap[int(req.PlatformID)]; ok { // user->map[platform->conn]
log.NewWarn(req.OperationID, "send kick msg, close connection ", req.PlatformID, v) log.NewWarn(req.OperationID, "send kick msg, close connection ", req.PlatformID, v)
ws.sendKickMsg(conn) for _, conn := range conns {
conn.Close() ws.sendKickMsg(conn, req.OperationID)
}
} }
} }
return &pbRelay.KickUserOfflineResp{}, nil return &pbRelay.KickUserOfflineResp{}, nil

@ -107,6 +107,18 @@ func (ws *WServer) argsValidate(m *Req, r int32, operationID string) (isPass boo
} }
return true, 0, "", data return true, 0, "", data
case constant.WsSetBackgroundStatus:
data := open_im_sdk.SetAppBackgroundStatusReq{}
if err := proto.Unmarshal(m.Data, &data); err != nil {
log.Error(operationID, "Decode Data struct err", err.Error(), r)
return false, 203, err.Error(), nil
}
if err := validate.Struct(data); err != nil {
log.Error(operationID, "data args validate err", err.Error(), r)
return false, 204, err.Error(), nil
}
return true, 0, "", &data
default: default:
} }
return false, 204, "args err", nil return false, 204, "args err", nil

@ -11,8 +11,11 @@ import (
pbRelay "Open_IM/pkg/proto/relay" pbRelay "Open_IM/pkg/proto/relay"
"Open_IM/pkg/utils" "Open_IM/pkg/utils"
"bytes" "bytes"
"compress/gzip"
"context" "context"
"encoding/gob" "encoding/gob"
"io/ioutil"
"strconv"
"strings" "strings"
go_redis "github.com/go-redis/redis/v8" go_redis "github.com/go-redis/redis/v8"
@ -29,22 +32,26 @@ import (
type UserConn struct { type UserConn struct {
*websocket.Conn *websocket.Conn
w *sync.Mutex w *sync.Mutex
platformID int32 PlatformID int32
PushedMaxSeq uint32 PushedMaxSeq uint32
IsCompress bool
userID string
IsBackground bool
token string
connID string
} }
type WServer struct { type WServer struct {
wsAddr string wsAddr string
wsMaxConnNum int wsMaxConnNum int
wsUpGrader *websocket.Upgrader wsUpGrader *websocket.Upgrader
wsConnToUser map[*UserConn]map[int]string wsUserToConn map[string]map[int][]*UserConn
wsUserToConn map[string]map[int]*UserConn
} }
func (ws *WServer) onInit(wsPort int) { func (ws *WServer) onInit(wsPort int) {
ws.wsAddr = ":" + utils.IntToString(wsPort) ws.wsAddr = ":" + utils.IntToString(wsPort)
ws.wsMaxConnNum = config.Config.LongConnSvr.WebsocketMaxConnNum ws.wsMaxConnNum = config.Config.LongConnSvr.WebsocketMaxConnNum
ws.wsConnToUser = make(map[*UserConn]map[int]string) ws.wsUserToConn = make(map[string]map[int][]*UserConn)
ws.wsUserToConn = make(map[string]map[int]*UserConn)
ws.wsUpGrader = &websocket.Upgrader{ ws.wsUpGrader = &websocket.Upgrader{
HandshakeTimeout: time.Duration(config.Config.LongConnSvr.WebsocketTimeOut) * time.Second, HandshakeTimeout: time.Duration(config.Config.LongConnSvr.WebsocketTimeOut) * time.Second,
ReadBufferSize: config.Config.LongConnSvr.WebsocketMaxMsgLen, ReadBufferSize: config.Config.LongConnSvr.WebsocketMaxMsgLen,
@ -69,15 +76,15 @@ func (ws *WServer) wsHandler(w http.ResponseWriter, r *http.Request) {
operationID = utils.OperationIDGenerator() operationID = utils.OperationIDGenerator()
} }
log.Debug(operationID, utils.GetSelfFuncName(), " args: ", query) log.Debug(operationID, utils.GetSelfFuncName(), " args: ", query)
if ws.headerCheck(w, r, operationID) { if isPass, compression := ws.headerCheck(w, r, operationID); isPass {
conn, err := ws.wsUpGrader.Upgrade(w, r, nil) //Conn is obtained through the upgraded escalator conn, err := ws.wsUpGrader.Upgrade(w, r, nil) //Conn is obtained through the upgraded escalator
if err != nil { if err != nil {
log.Error(operationID, "upgrade http conn err", err.Error(), query) log.Error(operationID, "upgrade http conn err", err.Error(), query)
return return
} else { } else {
newConn := &UserConn{conn, new(sync.Mutex), utils.StringToInt32(query["platformID"][0]), 0} newConn := &UserConn{conn, new(sync.Mutex), utils.StringToInt32(query["platformID"][0]), 0, compression, query["sendID"][0], false, query["token"][0], utils.Md5(conn.RemoteAddr().String() + "_" + strconv.Itoa(int(utils.GetCurrentTimestampByMill())))}
userCount++ userCount++
ws.addUserConn(query["sendID"][0], utils.StringToInt(query["platformID"][0]), newConn, query["token"][0], operationID) ws.addUserConn(query["sendID"][0], utils.StringToInt(query["platformID"][0]), newConn, query["token"][0], newConn.connID, operationID)
go ws.readMsg(newConn) go ws.readMsg(newConn)
} }
} else { } else {
@ -97,6 +104,30 @@ func (ws *WServer) readMsg(conn *UserConn) {
ws.delUserConn(conn) ws.delUserConn(conn)
return return
} }
if messageType == websocket.CloseMessage {
log.NewWarn("", "WS receive error ", " userIP", conn.RemoteAddr().String(), "userUid", "platform", "error", string(msg))
userCount--
ws.delUserConn(conn)
return
}
log.NewDebug("", "size", utils.ByteSize(uint64(len(msg))))
if conn.IsCompress {
buff := bytes.NewBuffer(msg)
reader, err := gzip.NewReader(buff)
if err != nil {
log.NewWarn("", "un gzip read failed")
continue
}
msg, err = ioutil.ReadAll(reader)
if err != nil {
log.NewWarn("", "ReadAll failed")
continue
}
err = reader.Close()
if err != nil {
log.NewWarn("", "reader close failed")
}
}
ws.msgParse(conn, msg) ws.msgParse(conn, msg)
} }
} }
@ -110,6 +141,17 @@ func (ws *WServer) SetWriteTimeout(conn *UserConn, timeout int) {
func (ws *WServer) writeMsg(conn *UserConn, a int, msg []byte) error { func (ws *WServer) writeMsg(conn *UserConn, a int, msg []byte) error {
conn.w.Lock() conn.w.Lock()
defer conn.w.Unlock() defer conn.w.Unlock()
if conn.IsCompress {
var buffer bytes.Buffer
gz := gzip.NewWriter(&buffer)
if _, err := gz.Write(msg); err != nil {
return utils.Wrap(err, "")
}
if err := gz.Close(); err != nil {
return utils.Wrap(err, "")
}
msg = buffer.Bytes()
}
conn.SetWriteDeadline(time.Now().Add(time.Duration(60) * time.Second)) conn.SetWriteDeadline(time.Now().Add(time.Duration(60) * time.Second))
return conn.WriteMessage(a, msg) return conn.WriteMessage(a, msg)
} }
@ -151,6 +193,7 @@ func (ws *WServer) MultiTerminalLoginCheckerWithLock(uid string, platformID int,
defer rwLock.Unlock() defer rwLock.Unlock()
log.NewInfo(operationID, utils.GetSelfFuncName(), " rpc args: ", uid, platformID, token) log.NewInfo(operationID, utils.GetSelfFuncName(), " rpc args: ", uid, platformID, token)
switch config.Config.MultiLoginPolicy { switch config.Config.MultiLoginPolicy {
case constant.DefalutNotKick:
case constant.PCAndOther: case constant.PCAndOther:
if constant.PlatformNameToClass(constant.PlatformIDToName(platformID)) == constant.TerminalPC { if constant.PlatformNameToClass(constant.PlatformIDToName(platformID)) == constant.TerminalPC {
return return
@ -158,8 +201,11 @@ func (ws *WServer) MultiTerminalLoginCheckerWithLock(uid string, platformID int,
fallthrough fallthrough
case constant.AllLoginButSameTermKick: case constant.AllLoginButSameTermKick:
if oldConnMap, ok := ws.wsUserToConn[uid]; ok { // user->map[platform->conn] if oldConnMap, ok := ws.wsUserToConn[uid]; ok { // user->map[platform->conn]
if oldConn, ok := oldConnMap[platformID]; ok { if oldConns, ok := oldConnMap[platformID]; ok {
log.NewDebug(operationID, uid, platformID, "kick old conn") log.NewDebug(operationID, uid, platformID, "kick old conn")
for _, conn := range oldConns {
ws.sendKickMsg(conn, operationID)
}
m, err := db.DB.GetTokenMapByUidPid(uid, constant.PlatformIDToName(platformID)) m, err := db.DB.GetTokenMapByUidPid(uid, constant.PlatformIDToName(platformID))
if err != nil && err != go_redis.Nil { if err != nil && err != go_redis.Nil {
log.NewError(operationID, "get token from redis err", err.Error(), uid, constant.PlatformIDToName(platformID)) log.NewError(operationID, "get token from redis err", err.Error(), uid, constant.PlatformIDToName(platformID))
@ -182,16 +228,12 @@ func (ws *WServer) MultiTerminalLoginCheckerWithLock(uid string, platformID int,
log.NewError(operationID, "SetTokenMapByUidPid err", err.Error(), uid, platformID, m) log.NewError(operationID, "SetTokenMapByUidPid err", err.Error(), uid, platformID, m)
return return
} }
err = oldConn.Close()
delete(oldConnMap, platformID) delete(oldConnMap, platformID)
ws.wsUserToConn[uid] = oldConnMap ws.wsUserToConn[uid] = oldConnMap
if len(oldConnMap) == 0 { if len(oldConnMap) == 0 {
delete(ws.wsUserToConn, uid) delete(ws.wsUserToConn, uid)
} }
delete(ws.wsConnToUser, oldConn)
if err != nil {
log.NewError(operationID, "conn close err", err.Error(), uid, platformID)
}
} else { } else {
log.NewWarn(operationID, "abnormal uid-conn ", uid, platformID, oldConnMap[platformID]) log.NewWarn(operationID, "abnormal uid-conn ", uid, platformID, oldConnMap[platformID])
} }
@ -206,6 +248,7 @@ func (ws *WServer) MultiTerminalLoginCheckerWithLock(uid string, platformID int,
func (ws *WServer) MultiTerminalLoginChecker(uid string, platformID int, newConn *UserConn, token string, operationID string) { func (ws *WServer) MultiTerminalLoginChecker(uid string, platformID int, newConn *UserConn, token string, operationID string) {
switch config.Config.MultiLoginPolicy { switch config.Config.MultiLoginPolicy {
case constant.DefalutNotKick:
case constant.PCAndOther: case constant.PCAndOther:
if constant.PlatformNameToClass(constant.PlatformIDToName(platformID)) == constant.TerminalPC { if constant.PlatformNameToClass(constant.PlatformIDToName(platformID)) == constant.TerminalPC {
return return
@ -213,9 +256,11 @@ func (ws *WServer) MultiTerminalLoginChecker(uid string, platformID int, newConn
fallthrough fallthrough
case constant.AllLoginButSameTermKick: case constant.AllLoginButSameTermKick:
if oldConnMap, ok := ws.wsUserToConn[uid]; ok { // user->map[platform->conn] if oldConnMap, ok := ws.wsUserToConn[uid]; ok { // user->map[platform->conn]
if oldConn, ok := oldConnMap[platformID]; ok { if oldConns, ok := oldConnMap[platformID]; ok {
log.NewDebug(operationID, uid, platformID, "kick old conn") log.NewDebug(operationID, uid, platformID, "kick old conn")
ws.sendKickMsg(oldConn) for _, conn := range oldConns {
ws.sendKickMsg(conn, operationID)
}
m, err := db.DB.GetTokenMapByUidPid(uid, constant.PlatformIDToName(platformID)) m, err := db.DB.GetTokenMapByUidPid(uid, constant.PlatformIDToName(platformID))
if err != nil && err != go_redis.Nil { if err != nil && err != go_redis.Nil {
log.NewError(operationID, "get token from redis err", err.Error(), uid, constant.PlatformIDToName(platformID)) log.NewError(operationID, "get token from redis err", err.Error(), uid, constant.PlatformIDToName(platformID))
@ -238,16 +283,11 @@ func (ws *WServer) MultiTerminalLoginChecker(uid string, platformID int, newConn
log.NewError(operationID, "SetTokenMapByUidPid err", err.Error(), uid, platformID, m) log.NewError(operationID, "SetTokenMapByUidPid err", err.Error(), uid, platformID, m)
return return
} }
err = oldConn.Close()
delete(oldConnMap, platformID) delete(oldConnMap, platformID)
ws.wsUserToConn[uid] = oldConnMap ws.wsUserToConn[uid] = oldConnMap
if len(oldConnMap) == 0 { if len(oldConnMap) == 0 {
delete(ws.wsUserToConn, uid) delete(ws.wsUserToConn, uid)
} }
delete(ws.wsConnToUser, oldConn)
if err != nil {
log.NewError(operationID, "conn close err", err.Error(), uid, platformID)
}
callbackResp := callbackUserKickOff(operationID, uid, platformID) callbackResp := callbackUserKickOff(operationID, uid, platformID)
if callbackResp.ErrCode != 0 { if callbackResp.ErrCode != 0 {
log.NewError(operationID, utils.GetSelfFuncName(), "callbackUserOffline failed", callbackResp) log.NewError(operationID, utils.GetSelfFuncName(), "callbackUserOffline failed", callbackResp)
@ -264,11 +304,12 @@ func (ws *WServer) MultiTerminalLoginChecker(uid string, platformID int, newConn
case constant.WebAndOther: case constant.WebAndOther:
} }
} }
func (ws *WServer) sendKickMsg(oldConn *UserConn) { func (ws *WServer) sendKickMsg(oldConn *UserConn, operationID string) {
mReply := Resp{ mReply := Resp{
ReqIdentifier: constant.WSKickOnlineMsg, ReqIdentifier: constant.WSKickOnlineMsg,
ErrCode: constant.ErrTokenInvalid.ErrCode, ErrCode: constant.ErrTokenInvalid.ErrCode,
ErrMsg: constant.ErrTokenInvalid.ErrMsg, ErrMsg: constant.ErrTokenInvalid.ErrMsg,
OperationID: operationID,
} }
var b bytes.Buffer var b bytes.Buffer
enc := gob.NewEncoder(&b) enc := gob.NewEncoder(&b)
@ -281,36 +322,42 @@ func (ws *WServer) sendKickMsg(oldConn *UserConn) {
if err != nil { if err != nil {
log.NewError(mReply.OperationID, mReply.ReqIdentifier, mReply.ErrCode, mReply.ErrMsg, "sendKickMsg WS WriteMsg error", oldConn.RemoteAddr().String(), err.Error()) log.NewError(mReply.OperationID, mReply.ReqIdentifier, mReply.ErrCode, mReply.ErrMsg, "sendKickMsg WS WriteMsg error", oldConn.RemoteAddr().String(), err.Error())
} }
errClose := oldConn.Close()
if errClose != nil {
log.NewError(mReply.OperationID, mReply.ReqIdentifier, mReply.ErrCode, mReply.ErrMsg, "close old conn error", oldConn.RemoteAddr().String(), err.Error())
}
} }
func (ws *WServer) addUserConn(uid string, platformID int, conn *UserConn, token string, operationID string) { func (ws *WServer) addUserConn(uid string, platformID int, conn *UserConn, token string, connID, operationID string) {
rwLock.Lock() rwLock.Lock()
defer rwLock.Unlock() defer rwLock.Unlock()
log.Info(operationID, utils.GetSelfFuncName(), " args: ", uid, platformID, conn, token, "ip: ", conn.RemoteAddr().String()) log.Info(operationID, utils.GetSelfFuncName(), " args: ", uid, platformID, conn, token, "ip: ", conn.RemoteAddr().String())
callbackResp := callbackUserOnline(operationID, uid, platformID, token) callbackResp := callbackUserOnline(operationID, uid, platformID, token, false, connID)
if callbackResp.ErrCode != 0 { if callbackResp.ErrCode != 0 {
log.NewError(operationID, utils.GetSelfFuncName(), "callbackUserOnline resp:", callbackResp) log.NewError(operationID, utils.GetSelfFuncName(), "callbackUserOnline resp:", callbackResp)
} }
go ws.MultiTerminalLoginRemoteChecker(uid, int32(platformID), token, operationID) go ws.MultiTerminalLoginRemoteChecker(uid, int32(platformID), token, operationID)
ws.MultiTerminalLoginChecker(uid, platformID, conn, token, operationID) ws.MultiTerminalLoginChecker(uid, platformID, conn, token, operationID)
if oldConnMap, ok := ws.wsUserToConn[uid]; ok { if oldConnMap, ok := ws.wsUserToConn[uid]; ok {
oldConnMap[platformID] = conn if conns, ok := oldConnMap[platformID]; ok {
conns = append(conns, conn)
oldConnMap[platformID] = conns
} else {
var conns []*UserConn
conns = append(conns, conn)
oldConnMap[platformID] = conns
}
ws.wsUserToConn[uid] = oldConnMap ws.wsUserToConn[uid] = oldConnMap
log.Debug(operationID, "user not first come in, add conn ", uid, platformID, conn, oldConnMap) log.Debug(operationID, "user not first come in, add conn ", uid, platformID, conn, oldConnMap)
} else { } else {
i := make(map[int]*UserConn) i := make(map[int][]*UserConn)
i[platformID] = conn var conns []*UserConn
conns = append(conns, conn)
i[platformID] = conns
ws.wsUserToConn[uid] = i ws.wsUserToConn[uid] = i
log.Debug(operationID, "user first come in, new user, conn", uid, platformID, conn, ws.wsUserToConn[uid]) log.Debug(operationID, "user first come in, new user, conn", uid, platformID, conn, ws.wsUserToConn[uid])
} }
if oldStringMap, ok := ws.wsConnToUser[conn]; ok {
oldStringMap[platformID] = uid
ws.wsConnToUser[conn] = oldStringMap
} else {
i := make(map[int]string)
i[platformID] = uid
ws.wsConnToUser[conn] = i
}
count := 0 count := 0
for _, v := range ws.wsUserToConn { for _, v := range ws.wsUserToConn {
count = count + len(v) count = count + len(v)
@ -323,56 +370,67 @@ func (ws *WServer) delUserConn(conn *UserConn) {
rwLock.Lock() rwLock.Lock()
defer rwLock.Unlock() defer rwLock.Unlock()
operationID := utils.OperationIDGenerator() operationID := utils.OperationIDGenerator()
var uid string platform := int(conn.PlatformID)
var platform int
if oldStringMap, ok := ws.wsConnToUser[conn]; ok { if oldConnMap, ok := ws.wsUserToConn[conn.userID]; ok { // only recycle self conn
for k, v := range oldStringMap { if oldconns, okMap := oldConnMap[platform]; okMap {
platform = k
uid = v var a []*UserConn
for _, client := range oldconns {
if client != conn {
a = append(a, client)
} }
if oldConnMap, ok := ws.wsUserToConn[uid]; ok { }
if len(a) != 0 {
oldConnMap[platform] = a
} else {
delete(oldConnMap, platform) delete(oldConnMap, platform)
ws.wsUserToConn[uid] = oldConnMap }
}
ws.wsUserToConn[conn.userID] = oldConnMap
if len(oldConnMap) == 0 { if len(oldConnMap) == 0 {
delete(ws.wsUserToConn, uid) delete(ws.wsUserToConn, conn.userID)
} }
count := 0 count := 0
for _, v := range ws.wsUserToConn { for _, v := range ws.wsUserToConn {
count = count + len(v) count = count + len(v)
} }
log.Debug(operationID, "WS delete operation", "", "wsUser deleted", ws.wsUserToConn, "disconnection_uid", uid, "disconnection_platform", platform, "online_user_num", len(ws.wsUserToConn), "online_conn_num", count) log.Debug(operationID, "WS delete operation", "", "wsUser deleted", ws.wsUserToConn, "disconnection_uid", conn.userID, "disconnection_platform", platform, "online_user_num", len(ws.wsUserToConn), "online_conn_num", count)
} else {
log.Debug(operationID, "WS delete operation", "", "wsUser deleted", ws.wsUserToConn, "disconnection_uid", uid, "disconnection_platform", platform, "online_user_num", len(ws.wsUserToConn))
} }
delete(ws.wsConnToUser, conn)
}
err := conn.Close() err := conn.Close()
if err != nil { if err != nil {
log.Error(operationID, " close err", "", "uid", uid, "platform", platform) log.Error(operationID, " close err", "", "uid", conn.userID, "platform", platform)
}
if conn.PlatformID == 0 || conn.connID == "" {
log.NewWarn(operationID, utils.GetSelfFuncName(), "PlatformID or connID is null", conn.PlatformID, conn.connID)
} }
callbackResp := callbackUserOffline(operationID, uid, platform) callbackResp := callbackUserOffline(operationID, conn.userID, int(conn.PlatformID), conn.connID)
if callbackResp.ErrCode != 0 { if callbackResp.ErrCode != 0 {
log.NewError(operationID, utils.GetSelfFuncName(), "callbackUserOffline failed", callbackResp) log.NewError(operationID, utils.GetSelfFuncName(), "callbackUserOffline failed", callbackResp)
} }
promePkg.PromeGaugeDec(promePkg.OnlineUserGauge) promePkg.PromeGaugeDec(promePkg.OnlineUserGauge)
}
func (ws *WServer) getUserConn(uid string, platform int) *UserConn {
rwLock.RLock()
defer rwLock.RUnlock()
if connMap, ok := ws.wsUserToConn[uid]; ok {
if conn, flag := connMap[platform]; flag {
return conn
} }
}
return nil // func (ws *WServer) getUserConn(uid string, platform int) *UserConn {
} // rwLock.RLock()
func (ws *WServer) getUserAllCons(uid string) map[int]*UserConn { // defer rwLock.RUnlock()
// if connMap, ok := ws.wsUserToConn[uid]; ok {
// if conn, flag := connMap[platform]; flag {
// return conn
// }
// }
// return nil
// }
func (ws *WServer) getUserAllCons(uid string) map[int][]*UserConn {
rwLock.RLock() rwLock.RLock()
defer rwLock.RUnlock() defer rwLock.RUnlock()
if connMap, ok := ws.wsUserToConn[uid]; ok { if connMap, ok := ws.wsUserToConn[uid]; ok {
newConnMap := make(map[int]*UserConn) newConnMap := make(map[int][]*UserConn)
for k, v := range connMap { for k, v := range connMap {
newConnMap[k] = v newConnMap[k] = v
} }
@ -394,7 +452,7 @@ func (ws *WServer) getUserAllCons(uid string) map[int]*UserConn {
// } // }
// return "", 0 // return "", 0
// } // }
func (ws *WServer) headerCheck(w http.ResponseWriter, r *http.Request, operationID string) bool { func (ws *WServer) headerCheck(w http.ResponseWriter, r *http.Request, operationID string) (isPass, compression bool) {
status := http.StatusUnauthorized status := http.StatusUnauthorized
query := r.URL.Query() query := r.URL.Query()
if len(query["token"]) != 0 && len(query["sendID"]) != 0 && len(query["platformID"]) != 0 { if len(query["token"]) != 0 && len(query["sendID"]) != 0 && len(query["platformID"]) != 0 {
@ -446,10 +504,16 @@ func (ws *WServer) headerCheck(w http.ResponseWriter, r *http.Request, operation
w.Header().Set("Sec-Websocket-Version", "13") w.Header().Set("Sec-Websocket-Version", "13")
w.Header().Set("ws_err_msg", err.Error()) w.Header().Set("ws_err_msg", err.Error())
http.Error(w, err.Error(), status) http.Error(w, err.Error(), status)
return false return false, false
} else { } else {
log.Info(operationID, "Connection Authentication Success", "", "token ", query["token"][0], "userID ", query["sendID"][0], "platformID ", query["platformID"][0]) if r.Header.Get("compression") == "gzip" {
return true compression = true
}
if len(query["compression"]) != 0 && query["compression"][0] == "gzip" {
compression = true
}
log.Info(operationID, "Connection Authentication Success", "", "token ", query["token"][0], "userID ", query["sendID"][0], "platformID ", query["platformID"][0], "compression", compression)
return true, compression
} }
} else { } else {
status = int(constant.ErrArgs.ErrCode) status = int(constant.ErrArgs.ErrCode)
@ -458,6 +522,6 @@ func (ws *WServer) headerCheck(w http.ResponseWriter, r *http.Request, operation
errMsg := "args err, need token, sendID, platformID" errMsg := "args err, need token, sendID, platformID"
w.Header().Set("ws_err_msg", errMsg) w.Header().Set("ws_err_msg", errMsg)
http.Error(w, errMsg, status) http.Error(w, errMsg, status)
return false return false, false
} }
} }

@ -0,0 +1,62 @@
package logic
import (
cbApi "Open_IM/pkg/call_back_struct"
"Open_IM/pkg/common/callback"
"Open_IM/pkg/common/config"
"Open_IM/pkg/common/constant"
"Open_IM/pkg/common/http"
"Open_IM/pkg/common/log"
pbChat "Open_IM/pkg/proto/msg"
"Open_IM/pkg/utils"
http2 "net/http"
)
func callbackAfterConsumeGroupMsg(msg []*pbChat.MsgDataToMQ, triggerID string) cbApi.CommonCallbackResp {
callbackResp := cbApi.CommonCallbackResp{OperationID: triggerID}
if !config.Config.Callback.CallbackAfterConsumeGroupMsg.Enable {
return callbackResp
}
for _, v := range msg {
if v.MsgData.SessionType == constant.SuperGroupChatType || v.MsgData.SessionType == constant.GroupChatType {
commonCallbackReq := copyCallbackCommonReqStruct(v)
commonCallbackReq.CallbackCommand = constant.CallbackAfterConsumeGroupMsgCommand
req := cbApi.CallbackAfterConsumeGroupMsgReq{
CommonCallbackReq: commonCallbackReq,
GroupID: v.MsgData.GroupID,
}
resp := &cbApi.CallbackAfterConsumeGroupMsgResp{CommonCallbackResp: &callbackResp}
defer log.NewDebug(triggerID, utils.GetSelfFuncName(), req, *resp)
if err := http.CallBackPostReturn(config.Config.Callback.CallbackUrl, constant.CallbackAfterConsumeGroupMsgCommand, req, resp, config.Config.Callback.CallbackAfterConsumeGroupMsg.CallbackTimeOut); err != nil {
callbackResp.ErrCode = http2.StatusInternalServerError
callbackResp.ErrMsg = err.Error()
return callbackResp
}
}
}
log.NewDebug(triggerID, utils.GetSelfFuncName(), msg)
return callbackResp
}
func copyCallbackCommonReqStruct(msg *pbChat.MsgDataToMQ) cbApi.CommonCallbackReq {
req := cbApi.CommonCallbackReq{
SendID: msg.MsgData.SendID,
ServerMsgID: msg.MsgData.ServerMsgID,
ClientMsgID: msg.MsgData.ClientMsgID,
OperationID: msg.OperationID,
SenderPlatformID: msg.MsgData.SenderPlatformID,
SenderNickname: msg.MsgData.SenderNickname,
SessionType: msg.MsgData.SessionType,
MsgFrom: msg.MsgData.MsgFrom,
ContentType: msg.MsgData.ContentType,
Status: msg.MsgData.Status,
CreateTime: msg.MsgData.CreateTime,
AtUserIDList: msg.MsgData.AtUserIDList,
SenderFaceURL: msg.MsgData.SenderFaceURL,
Content: callback.GetContent(msg.MsgData),
Seq: msg.MsgData.Seq,
Ex: msg.MsgData.Ex,
}
return req
}

@ -22,7 +22,9 @@ var (
persistentCH PersistentConsumerHandler persistentCH PersistentConsumerHandler
historyCH OnlineHistoryRedisConsumerHandler historyCH OnlineHistoryRedisConsumerHandler
historyMongoCH OnlineHistoryMongoConsumerHandler historyMongoCH OnlineHistoryMongoConsumerHandler
modifyCH ModifyMsgConsumerHandler
producer *kafka.Producer producer *kafka.Producer
producerToModify *kafka.Producer
producerToMongo *kafka.Producer producerToMongo *kafka.Producer
cmdCh chan Cmd2Value cmdCh chan Cmd2Value
onlineTopicStatus int onlineTopicStatus int
@ -43,11 +45,13 @@ func Init() {
persistentCH.Init() // ws2mschat save mysql persistentCH.Init() // ws2mschat save mysql
historyCH.Init(cmdCh) // historyCH.Init(cmdCh) //
historyMongoCH.Init() historyMongoCH.Init()
modifyCH.Init()
onlineTopicStatus = OnlineTopicVacancy onlineTopicStatus = OnlineTopicVacancy
//offlineHistoryCH.Init(cmdCh) //offlineHistoryCH.Init(cmdCh)
statistics.NewStatistics(&singleMsgSuccessCount, config.Config.ModuleName.MsgTransferName, fmt.Sprintf("%d second singleMsgCount insert to mongo", constant.StatisticsTimeInterval), constant.StatisticsTimeInterval) statistics.NewStatistics(&singleMsgSuccessCount, config.Config.ModuleName.MsgTransferName, fmt.Sprintf("%d second singleMsgCount insert to mongo", constant.StatisticsTimeInterval), constant.StatisticsTimeInterval)
statistics.NewStatistics(&groupMsgCount, config.Config.ModuleName.MsgTransferName, fmt.Sprintf("%d second groupMsgCount insert to mongo", constant.StatisticsTimeInterval), constant.StatisticsTimeInterval) statistics.NewStatistics(&groupMsgCount, config.Config.ModuleName.MsgTransferName, fmt.Sprintf("%d second groupMsgCount insert to mongo", constant.StatisticsTimeInterval), constant.StatisticsTimeInterval)
producer = kafka.NewKafkaProducer(config.Config.Kafka.Ms2pschat.Addr, config.Config.Kafka.Ms2pschat.Topic) producer = kafka.NewKafkaProducer(config.Config.Kafka.Ms2pschat.Addr, config.Config.Kafka.Ms2pschat.Topic)
producerToModify = kafka.NewKafkaProducer(config.Config.Kafka.MsgToModify.Addr, config.Config.Kafka.MsgToModify.Topic)
producerToMongo = kafka.NewKafkaProducer(config.Config.Kafka.MsgToMongo.Addr, config.Config.Kafka.MsgToMongo.Topic) producerToMongo = kafka.NewKafkaProducer(config.Config.Kafka.MsgToMongo.Addr, config.Config.Kafka.MsgToMongo.Topic)
} }
func Run(promethuesPort int) { func Run(promethuesPort int) {
@ -59,6 +63,7 @@ func Run(promethuesPort int) {
} }
go historyCH.historyConsumerGroup.RegisterHandleAndConsumer(&historyCH) go historyCH.historyConsumerGroup.RegisterHandleAndConsumer(&historyCH)
go historyMongoCH.historyConsumerGroup.RegisterHandleAndConsumer(&historyMongoCH) go historyMongoCH.historyConsumerGroup.RegisterHandleAndConsumer(&historyMongoCH)
go modifyCH.modifyMsgConsumerGroup.RegisterHandleAndConsumer(&modifyCH)
//go offlineHistoryCH.historyConsumerGroup.RegisterHandleAndConsumer(&offlineHistoryCH) //go offlineHistoryCH.historyConsumerGroup.RegisterHandleAndConsumer(&offlineHistoryCH)
go func() { go func() {
err := promePkg.StartPromeSrv(promethuesPort) err := promePkg.StartPromeSrv(promethuesPort)

@ -0,0 +1,159 @@
package logic
import (
"Open_IM/pkg/base_info"
"Open_IM/pkg/common/config"
"Open_IM/pkg/common/constant"
"Open_IM/pkg/common/db"
kfk "Open_IM/pkg/common/kafka"
"Open_IM/pkg/common/log"
pbMsg "Open_IM/pkg/proto/msg"
server_api_params "Open_IM/pkg/proto/sdk_ws"
"Open_IM/pkg/utils"
"encoding/json"
"github.com/Shopify/sarama"
"github.com/golang/protobuf/proto"
)
type ModifyMsgConsumerHandler struct {
msgHandle map[string]fcb
modifyMsgConsumerGroup *kfk.MConsumerGroup
}
func (mmc *ModifyMsgConsumerHandler) Init() {
mmc.msgHandle = make(map[string]fcb)
mmc.msgHandle[config.Config.Kafka.MsgToModify.Topic] = mmc.ModifyMsg
mmc.modifyMsgConsumerGroup = kfk.NewMConsumerGroup(&kfk.MConsumerGroupConfig{KafkaVersion: sarama.V2_0_0_0,
OffsetsInitial: sarama.OffsetNewest, IsReturnErr: false}, []string{config.Config.Kafka.MsgToModify.Topic},
config.Config.Kafka.MsgToModify.Addr, config.Config.Kafka.ConsumerGroupID.MsgToModify)
}
func (ModifyMsgConsumerHandler) Setup(_ sarama.ConsumerGroupSession) error { return nil }
func (ModifyMsgConsumerHandler) Cleanup(_ sarama.ConsumerGroupSession) error { return nil }
func (mmc *ModifyMsgConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSession,
claim sarama.ConsumerGroupClaim) error {
for msg := range claim.Messages() {
log.NewDebug("", "kafka get info to mysql", "ModifyMsgConsumerHandler", msg.Topic, "msgPartition", msg.Partition, "msg", string(msg.Value), "key", string(msg.Key))
if len(msg.Value) != 0 {
mmc.msgHandle[msg.Topic](msg, string(msg.Key), sess)
} else {
log.Error("", "msg get from kafka but is nil", msg.Key)
}
sess.MarkMessage(msg, "")
}
return nil
}
func (mmc *ModifyMsgConsumerHandler) ModifyMsg(cMsg *sarama.ConsumerMessage, msgKey string, _ sarama.ConsumerGroupSession) {
log.NewInfo("msg come here ModifyMsg!!!", "", "msg", string(cMsg.Value), msgKey)
msgFromMQ := pbMsg.MsgDataToModifyByMQ{}
err := proto.Unmarshal(cMsg.Value, &msgFromMQ)
if err != nil {
log.NewError(msgFromMQ.TriggerID, "msg_transfer Unmarshal msg err", "msg", string(cMsg.Value), "err", err.Error())
return
}
log.Debug(msgFromMQ.TriggerID, "proto.Unmarshal MsgDataToMQ", msgFromMQ.String())
for _, msgDataToMQ := range msgFromMQ.MessageList {
isReactionFromCache := utils.GetSwitchFromOptions(msgDataToMQ.MsgData.Options, constant.IsReactionFromCache)
if !isReactionFromCache {
continue
}
if msgDataToMQ.MsgData.ContentType == constant.ReactionMessageModifier {
notification := &base_info.ReactionMessageModifierNotification{}
if err := json.Unmarshal(msgDataToMQ.MsgData.Content, notification); err != nil {
continue
}
if notification.IsExternalExtensions {
log.NewInfo(msgDataToMQ.OperationID, "msg:", notification, "this is external extensions")
continue
}
if msgDataToMQ.MsgData.SessionType == constant.SuperGroupChatType && utils.GetSwitchFromOptions(msgDataToMQ.MsgData.Options, constant.IsHistory) {
if msgDataToMQ.MsgData.Seq == 0 {
log.NewError(msgDataToMQ.OperationID, "seq==0, error msg", msgDataToMQ.MsgData)
continue
}
msg, err := db.DB.GetMsgBySeqIndex(notification.SourceID, notification.Seq, msgDataToMQ.OperationID)
if (msg != nil && msg.Seq != notification.Seq) || err != nil {
if err != nil {
log.NewError(msgDataToMQ.OperationID, "GetMsgBySeqIndex failed", notification, err.Error())
}
msgs, indexes, err := db.DB.GetSuperGroupMsgBySeqListMongo(notification.SourceID, []uint32{notification.Seq}, msgDataToMQ.OperationID)
if err != nil {
log.NewError(msgDataToMQ.OperationID, "GetSuperGroupMsgBySeqListMongo failed", notification, err.Error())
continue
}
var index int
if len(msgs) < 1 || len(indexes) < 1 {
log.NewError(msgDataToMQ.OperationID, "GetSuperGroupMsgBySeqListMongo failed", notification, "len<1", msgs, indexes)
continue
} else {
msg = msgs[0]
index = indexes[msg.Seq]
}
msg.IsReact = true
if err := db.DB.ReplaceMsgByIndex(notification.SourceID, msg, index); err != nil {
log.NewError(msgDataToMQ.OperationID, "ReplaceMsgByIndex failed", notification.SourceID, *msg)
}
} else {
msg.IsReact = true
if err = db.DB.ReplaceMsgBySeq(notification.SourceID, msg, msgDataToMQ.OperationID); err != nil {
log.NewError(msgDataToMQ.OperationID, "ReplaceMsgBySeq failed", notification.SourceID, *msg)
}
}
}
if !notification.IsReact {
// first time to modify
var reactionExtensionList = make(map[string]db.KeyValue)
extendMsg := db.ExtendMsg{
ReactionExtensionList: reactionExtensionList,
ClientMsgID: notification.ClientMsgID,
MsgFirstModifyTime: notification.MsgFirstModifyTime,
}
for _, v := range notification.SuccessReactionExtensionList {
reactionExtensionList[v.TypeKey] = db.KeyValue{
TypeKey: v.TypeKey,
Value: v.Value,
LatestUpdateTime: v.LatestUpdateTime,
}
}
// modify old msg
if err := db.DB.InsertExtendMsg(notification.SourceID, notification.SessionType, &extendMsg); err != nil {
log.NewError(msgDataToMQ.OperationID, "MsgFirstModify InsertExtendMsg failed", notification.SourceID, notification.SessionType, extendMsg, err.Error())
continue
}
} else {
var reactionExtensionList = make(map[string]*server_api_params.KeyValue)
for _, v := range notification.SuccessReactionExtensionList {
reactionExtensionList[v.TypeKey] = &server_api_params.KeyValue{
TypeKey: v.TypeKey,
Value: v.Value,
LatestUpdateTime: v.LatestUpdateTime,
}
}
// is already modify
if err := db.DB.InsertOrUpdateReactionExtendMsgSet(notification.SourceID, notification.SessionType, notification.ClientMsgID, notification.MsgFirstModifyTime, reactionExtensionList); err != nil {
log.NewError(msgDataToMQ.OperationID, "InsertOrUpdateReactionExtendMsgSet failed")
}
}
} else if msgDataToMQ.MsgData.ContentType == constant.ReactionMessageDeleter {
notification := &base_info.ReactionMessageDeleteNotification{}
if err := json.Unmarshal(msgDataToMQ.MsgData.Content, notification); err != nil {
continue
}
if err := db.DB.DeleteReactionExtendMsgSet(notification.SourceID, notification.SessionType, notification.ClientMsgID, notification.MsgFirstModifyTime, notification.SuccessReactionExtensionList); err != nil {
log.NewError(msgDataToMQ.OperationID, "InsertOrUpdateReactionExtendMsgSet failed")
}
}
}
}
func UnMarshallSetReactionMsgContent(content []byte) (notification *base_info.ReactionMessageModifierNotification, err error) {
return notification, nil
}

@ -72,6 +72,7 @@ func (och *OnlineHistoryRedisConsumerHandler) Run(channelID int) {
storageMsgList := make([]*pbMsg.MsgDataToMQ, 0, 80) storageMsgList := make([]*pbMsg.MsgDataToMQ, 0, 80)
notStoragePushMsgList := make([]*pbMsg.MsgDataToMQ, 0, 80) notStoragePushMsgList := make([]*pbMsg.MsgDataToMQ, 0, 80)
log.Debug(triggerID, "msg arrived channel", "channel id", channelID, msgList, msgChannelValue.aggregationID, len(msgList)) log.Debug(triggerID, "msg arrived channel", "channel id", channelID, msgList, msgChannelValue.aggregationID, len(msgList))
var modifyMsgList []*pbMsg.MsgDataToMQ
for _, v := range msgList { for _, v := range msgList {
log.Debug(triggerID, "msg come to storage center", v.String()) log.Debug(triggerID, "msg come to storage center", v.String())
isHistory := utils.GetSwitchFromOptions(v.MsgData.Options, constant.IsHistory) isHistory := utils.GetSwitchFromOptions(v.MsgData.Options, constant.IsHistory)
@ -83,11 +84,15 @@ func (och *OnlineHistoryRedisConsumerHandler) Run(channelID int) {
if !(!isSenderSync && msgChannelValue.aggregationID == v.MsgData.SendID) { if !(!isSenderSync && msgChannelValue.aggregationID == v.MsgData.SendID) {
notStoragePushMsgList = append(notStoragePushMsgList, v) notStoragePushMsgList = append(notStoragePushMsgList, v)
} }
} }
if v.MsgData.ContentType == constant.ReactionMessageModifier || v.MsgData.ContentType == constant.ReactionMessageDeleter {
modifyMsgList = append(modifyMsgList, v)
}
}
if len(modifyMsgList) > 0 {
sendMessageToModifyMQ(msgChannelValue.aggregationID, triggerID, modifyMsgList)
} }
//switch msgChannelValue.msg.MsgData.SessionType { //switch msgChannelValue.msg.MsgData.SessionType {
//case constant.SingleChatType: //case constant.SingleChatType:
//case constant.GroupChatType: //case constant.GroupChatType:
@ -106,7 +111,12 @@ func (och *OnlineHistoryRedisConsumerHandler) Run(channelID int) {
singleMsgSuccessCountMutex.Lock() singleMsgSuccessCountMutex.Lock()
singleMsgSuccessCount += uint64(len(storageMsgList)) singleMsgSuccessCount += uint64(len(storageMsgList))
singleMsgSuccessCountMutex.Unlock() singleMsgSuccessCountMutex.Unlock()
callbackResp := callbackAfterConsumeGroupMsg(storageMsgList, triggerID)
if callbackResp.ErrCode != 0 {
log.NewError(triggerID, utils.GetSelfFuncName(), "callbackAfterConsumeGroupMsg resp: ", callbackResp)
}
och.SendMessageToMongoCH(msgChannelValue.aggregationID, triggerID, storageMsgList, lastSeq) och.SendMessageToMongoCH(msgChannelValue.aggregationID, triggerID, storageMsgList, lastSeq)
for _, v := range storageMsgList { for _, v := range storageMsgList {
sendMessageToPushMQ(v, msgChannelValue.aggregationID) sendMessageToPushMQ(v, msgChannelValue.aggregationID)
} }
@ -552,6 +562,17 @@ func sendMessageToPushMQ(message *pbMsg.MsgDataToMQ, pushToUserID string) {
return return
} }
func sendMessageToModifyMQ(aggregationID string, triggerID string, messages []*pbMsg.MsgDataToMQ) {
if len(messages) > 0 {
pid, offset, err := producerToModify.SendMessage(&pbMsg.MsgDataToModifyByMQ{AggregationID: aggregationID, MessageList: messages, TriggerID: triggerID}, aggregationID, triggerID)
if err != nil {
log.Error(triggerID, "kafka send failed", "send data", len(messages), "pid", pid, "offset", offset, "err", err.Error(), "key", aggregationID)
} else {
// log.NewWarn(m.OperationID, "sendMsgToKafka client msgID ", m.MsgData.ClientMsgID)
}
}
}
// String hashes a string to a unique hashcode. // String hashes a string to a unique hashcode.
// //
// crc32 returns a uint32, but for our use we need // crc32 returns a uint32, but for our use we need

@ -61,7 +61,6 @@ func (mc *OnlineHistoryMongoConsumerHandler) handleChatWs2Mongo(cMsg *sarama.Con
if unexistSeqList, err := db.DB.DelMsgBySeqList(DeleteMessageTips.UserID, DeleteMessageTips.SeqList, v.OperationID); err != nil { if unexistSeqList, err := db.DB.DelMsgBySeqList(DeleteMessageTips.UserID, DeleteMessageTips.SeqList, v.OperationID); err != nil {
log.NewError(v.OperationID, utils.GetSelfFuncName(), "DelMsgBySeqList args: ", DeleteMessageTips.UserID, DeleteMessageTips.SeqList, v.OperationID, err.Error(), unexistSeqList) log.NewError(v.OperationID, utils.GetSelfFuncName(), "DelMsgBySeqList args: ", DeleteMessageTips.UserID, DeleteMessageTips.SeqList, v.OperationID, err.Error(), unexistSeqList)
} }
} }
} }
} }

@ -28,6 +28,8 @@ var (
const ( const (
PushURL = "/push/single/alias" PushURL = "/push/single/alias"
AuthURL = "/auth" AuthURL = "/auth"
TaskURL = "/push/list/message"
BatchPushURL = "/push/list/alias"
) )
func init() { func init() {
@ -53,22 +55,41 @@ type AuthResp struct {
Token string `json:"token"` Token string `json:"token"`
} }
type PushReq struct { type TaskResp struct {
RequestID string `json:"request_id"` TaskID string `json:"taskID"`
Audience struct { }
type Settings struct {
TTL *int64 `json:"ttl"`
}
type Audience struct {
Alias []string `json:"alias"` Alias []string `json:"alias"`
} `json:"audience"` }
PushMessage struct {
Notification Notification `json:"notification,omitempty"` type PushMessage struct {
Transmission string `json:"transmission,omitempty"` Notification *Notification `json:"notification,omitempty"`
} `json:"push_message"` Transmission *string `json:"transmission,omitempty"`
PushChannel struct { }
Ios Ios `json:"ios"`
Android Android `json:"android"` type PushChannel struct {
} `json:"push_channel"` Ios *Ios `json:"ios"`
Android *Android `json:"android"`
}
type PushReq struct {
RequestID *string `json:"request_id"`
Settings *Settings `json:"settings"`
Audience *Audience `json:"audience"`
PushMessage *PushMessage `json:"push_message"`
PushChannel *PushChannel `json:"push_channel"`
IsAsync *bool `json:"is_async"`
Taskid *string `json:"taskid"`
} }
type Ios struct { type Ios struct {
NotiType *string `json:"type"`
AutoBadge *string `json:"auto_badge"`
Aps struct { Aps struct {
Sound string `json:"sound"` Sound string `json:"sound"`
Alert Alert `json:"alert"` Alert Alert `json:"alert"`
@ -119,9 +140,9 @@ func newGetuiClient() *Getui {
func (g *Getui) Push(userIDList []string, title, detailContent, operationID string, opts push.PushOpts) (resp string, err error) { func (g *Getui) Push(userIDList []string, title, detailContent, operationID string, opts push.PushOpts) (resp string, err error) {
token, err := db.DB.GetGetuiToken() token, err := db.DB.GetGetuiToken()
log.NewDebug(operationID, utils.GetSelfFuncName(), "token", token) log.NewDebug(operationID, utils.GetSelfFuncName(), "token", token, userIDList)
if err != nil { if err != nil {
log.NewError(operationID, utils.OperationIDGenerator(), "GetGetuiToken failed", err.Error()) log.NewError(operationID, utils.GetSelfFuncName(), "GetGetuiToken failed", err.Error())
} }
if token == "" || err != nil { if token == "" || err != nil {
token, err = g.getTokenAndSave2Redis(operationID) token, err = g.getTokenAndSave2Redis(operationID)
@ -130,47 +151,32 @@ func (g *Getui) Push(userIDList []string, title, detailContent, operationID stri
return "", utils.Wrap(err, "") return "", utils.Wrap(err, "")
} }
} }
pushReq := PushReq{
RequestID: utils.OperationIDGenerator(), pushReq := PushReq{PushMessage: &PushMessage{Notification: &Notification{
Audience: struct {
Alias []string `json:"alias"`
}{Alias: []string{userIDList[0]}},
}
pushReq.PushMessage.Notification = Notification{
Title: title, Title: title,
Body: detailContent, Body: detailContent,
ClickType: "startapp", ClickType: "startapp",
ChannelID: config.Config.Push.Getui.ChannelID, ChannelID: config.Config.Push.Getui.ChannelID,
ChannelName: config.Config.Push.Getui.ChannelName, ChannelName: config.Config.Push.Getui.ChannelName,
} }}}
pushReq.PushChannel.Ios.Aps.Sound = "default" pushReq.setPushChannel(title, detailContent)
pushReq.PushChannel.Ios.Aps.Alert = Alert{
Title: title,
Body: title,
}
pushReq.PushChannel.Android.Ups.Notification = Notification{
Title: title,
Body: title,
ClickType: "startapp",
}
pushReq.PushChannel.Android.Ups.Options = Options{
HW: struct {
DefaultSound bool `json:"/message/android/notification/default_sound"`
ChannelID string `json:"/message/android/notification/channel_id"`
Sound string `json:"/message/android/notification/sound"`
Importance string `json:"/message/android/notification/importance"`
}{ChannelID: "RingRing4", Sound: "/raw/ring001", Importance: "NORMAL"},
XM: struct {
ChannelID string `json:"/extra.channel_id"`
}{ChannelID: "high_system"},
VV: struct {
Classification int "json:\"/classification\""
}{
Classification: 1,
},
}
pushResp := PushResp{} pushResp := PushResp{}
if len(userIDList) > 1 {
taskID, err := g.GetTaskID(operationID, token, pushReq)
if err != nil {
return "", utils.Wrap(err, "GetTaskIDAndSave2Redis failed")
}
pushReq = PushReq{Audience: &Audience{Alias: userIDList}}
var IsAsync = true
pushReq.IsAsync = &IsAsync
pushReq.Taskid = &taskID
err = g.request(BatchPushURL, pushReq, token, &pushResp, operationID)
} else {
reqID := utils.OperationIDGenerator()
pushReq.RequestID = &reqID
pushReq.Audience = &Audience{Alias: []string{userIDList[0]}}
err = g.request(PushURL, pushReq, token, &pushResp, operationID) err = g.request(PushURL, pushReq, token, &pushResp, operationID)
}
switch err { switch err {
case TokenExpireError: case TokenExpireError:
token, err = g.getTokenAndSave2Redis(operationID) token, err = g.getTokenAndSave2Redis(operationID)
@ -209,6 +215,17 @@ func (g *Getui) Auth(operationID string, timeStamp int64) (token string, expireT
return respAuth.Token, int64(expire), err return respAuth.Token, int64(expire), err
} }
func (g *Getui) GetTaskID(operationID, token string, pushReq PushReq) (string, error) {
respTask := TaskResp{}
ttl := int64(1000 * 60 * 5)
pushReq.Settings = &Settings{TTL: &ttl}
err := g.request(TaskURL, pushReq, token, &respTask, operationID)
if err != nil {
return "", utils.Wrap(err, "")
}
return respTask.TaskID, nil
}
func (g *Getui) request(url string, content interface{}, token string, returnStruct interface{}, operationID string) error { func (g *Getui) request(url string, content interface{}, token string, returnStruct interface{}, operationID string) error {
con, err := json.Marshal(content) con, err := json.Marshal(content)
if err != nil { if err != nil {
@ -245,6 +262,41 @@ func (g *Getui) request(url string, content interface{}, token string, returnStr
return nil return nil
} }
func (pushReq *PushReq) setPushChannel(title string, body string) {
pushReq.PushChannel = &PushChannel{}
autoBadge := "+1"
pushReq.PushChannel.Ios = &Ios{AutoBadge: &autoBadge}
notify := "notify"
pushReq.PushChannel.Ios.NotiType = &notify
pushReq.PushChannel.Ios.Aps.Sound = "default"
pushReq.PushChannel.Ios.Aps.Alert = Alert{
Title: title,
Body: body,
}
pushReq.PushChannel.Android = &Android{}
pushReq.PushChannel.Android.Ups.Notification = Notification{
Title: title,
Body: body,
ClickType: "startapp",
}
pushReq.PushChannel.Android.Ups.Options = Options{
HW: struct {
DefaultSound bool `json:"/message/android/notification/default_sound"`
ChannelID string `json:"/message/android/notification/channel_id"`
Sound string `json:"/message/android/notification/sound"`
Importance string `json:"/message/android/notification/importance"`
}{ChannelID: "RingRing4", Sound: "/raw/ring001", Importance: "NORMAL"},
XM: struct {
ChannelID string `json:"/extra.channel_id"`
}{ChannelID: "high_system"},
VV: struct {
Classification int "json:\"/classification\""
}{
Classification: 1,
},
}
}
func (g *Getui) getTokenAndSave2Redis(operationID string) (token string, err error) { func (g *Getui) getTokenAndSave2Redis(operationID string) (token string, err error) {
token, expireTime, err := g.Auth(operationID, time.Now().UnixNano()/1e6) token, expireTime, err := g.Auth(operationID, time.Now().UnixNano()/1e6)
if err != nil { if err != nil {
@ -257,3 +309,17 @@ func (g *Getui) getTokenAndSave2Redis(operationID string) (token string, err err
} }
return token, nil return token, nil
} }
func (g *Getui) GetTaskIDAndSave2Redis(operationID, token string, pushReq PushReq) (taskID string, err error) {
ttl := int64(1000 * 60 * 60 * 24)
pushReq.Settings = &Settings{TTL: &ttl}
taskID, err = g.GetTaskID(operationID, token, pushReq)
if err != nil {
return "", utils.Wrap(err, "GetTaskIDAndSave2Redis failed")
}
err = db.DB.SetGetuiTaskID(taskID, 60*60*23)
if err != nil {
return "", utils.Wrap(err, "Auth failed")
}
return token, nil
}

@ -75,7 +75,7 @@ func callbackOnlinePush(operationID string, userIDList []string, msg *commonPb.M
}, },
UserIDList: userIDList, UserIDList: userIDList,
}, },
OfflinePushInfo: msg.OfflinePushInfo, //OfflinePushInfo: msg.OfflinePushInfo,
ClientMsgID: msg.ClientMsgID, ClientMsgID: msg.ClientMsgID,
SendID: msg.SendID, SendID: msg.SendID,
GroupID: msg.GroupID, GroupID: msg.GroupID,
@ -97,9 +97,9 @@ func callbackOnlinePush(operationID string, userIDList []string, msg *commonPb.M
} }
} }
if resp.ErrCode == constant.CallbackHandleSuccess && resp.ActionCode == constant.ActionAllow { if resp.ErrCode == constant.CallbackHandleSuccess && resp.ActionCode == constant.ActionAllow {
if resp.OfflinePushInfo != nil { //if resp.OfflinePushInfo != nil {
msg.OfflinePushInfo = resp.OfflinePushInfo // msg.OfflinePushInfo = resp.OfflinePushInfo
} //}
} }
return callbackResp return callbackResp
} }
@ -117,7 +117,7 @@ func callbackBeforeSuperGroupOnlinePush(operationID string, groupID string, msg
PlatformID: msg.SenderPlatformID, PlatformID: msg.SenderPlatformID,
Platform: constant.PlatformIDToName(int(msg.SenderPlatformID)), Platform: constant.PlatformIDToName(int(msg.SenderPlatformID)),
}, },
OfflinePushInfo: msg.OfflinePushInfo, //OfflinePushInfo: msg.OfflinePushInfo,
ClientMsgID: msg.ClientMsgID, ClientMsgID: msg.ClientMsgID,
SendID: msg.SendID, SendID: msg.SendID,
GroupID: groupID, GroupID: groupID,
@ -125,6 +125,7 @@ func callbackBeforeSuperGroupOnlinePush(operationID string, groupID string, msg
SessionType: msg.SessionType, SessionType: msg.SessionType,
AtUserIDList: msg.AtUserIDList, AtUserIDList: msg.AtUserIDList,
Content: callback.GetContent(msg), Content: callback.GetContent(msg),
Seq: msg.Seq,
} }
resp := &cbApi.CallbackBeforeSuperGroupOnlinePushResp{CommonCallbackResp: &callbackResp} resp := &cbApi.CallbackBeforeSuperGroupOnlinePushResp{CommonCallbackResp: &callbackResp}
if err := http.CallBackPostReturn(config.Config.Callback.CallbackUrl, constant.CallbackSuperGroupOnlinePushCommand, req, resp, config.Config.Callback.CallbackBeforeSuperGroupOnlinePush.CallbackTimeOut); err != nil { if err := http.CallBackPostReturn(config.Config.Callback.CallbackUrl, constant.CallbackSuperGroupOnlinePushCommand, req, resp, config.Config.Callback.CallbackBeforeSuperGroupOnlinePush.CallbackTimeOut); err != nil {
@ -142,9 +143,9 @@ func callbackBeforeSuperGroupOnlinePush(operationID string, groupID string, msg
if len(resp.UserIDList) != 0 { if len(resp.UserIDList) != 0 {
*pushToUserList = resp.UserIDList *pushToUserList = resp.UserIDList
} }
if resp.OfflinePushInfo != nil { //if resp.OfflinePushInfo != nil {
msg.OfflinePushInfo = resp.OfflinePushInfo // msg.OfflinePushInfo = resp.OfflinePushInfo
} //}
} }
log.NewDebug(operationID, utils.GetSelfFuncName(), pushToUserList, resp.UserIDList) log.NewDebug(operationID, utils.GetSelfFuncName(), pushToUserList, resp.UserIDList)
return callbackResp return callbackResp

@ -93,10 +93,11 @@ func (r *RPCServer) PushMsg(_ context.Context, pbData *pbPush.PushMsgReq) (*pbPu
} }
func (r *RPCServer) DelUserPushToken(c context.Context, req *pbPush.DelUserPushTokenReq) (*pbPush.DelUserPushTokenResp, error) { func (r *RPCServer) DelUserPushToken(c context.Context, req *pbPush.DelUserPushTokenReq) (*pbPush.DelUserPushTokenResp, error) {
log.Debug(req.OperationID, utils.GetSelfFuncName(), "req", req.String())
var resp pbPush.DelUserPushTokenResp var resp pbPush.DelUserPushTokenResp
err := db.DB.DelFcmToken(req.UserID, int(req.PlatformID)) err := db.DB.DelFcmToken(req.UserID, int(req.PlatformID))
if err != nil { if err != nil {
errMsg := req.OperationID + " " + "SetFcmToken failed " + err.Error() errMsg := req.OperationID + " " + "DelFcmToken failed " + err.Error()
log.NewError(req.OperationID, errMsg) log.NewError(req.OperationID, errMsg)
resp.ErrCode = 500 resp.ErrCode = 500
resp.ErrMsg = errMsg resp.ErrMsg = errMsg

@ -70,7 +70,7 @@ func MsgToUser(pushMsg *pbPush.PushMsgReq) {
wsResult = append(wsResult, reply.SinglePushResult...) wsResult = append(wsResult, reply.SinglePushResult...)
} }
} }
log.NewInfo(pushMsg.OperationID, "push_result", wsResult, "sendData", pushMsg.MsgData) log.NewInfo(pushMsg.OperationID, "push_result", wsResult, "sendData", pushMsg.MsgData, "isOfflinePush", isOfflinePush)
successCount++ successCount++
if isOfflinePush && pushMsg.PushToUserID != pushMsg.MsgData.SendID { if isOfflinePush && pushMsg.PushToUserID != pushMsg.MsgData.SendID {
// save invitation info for offline push // save invitation info for offline push
@ -199,11 +199,25 @@ func MsgToSuperGroupUser(pushMsg *pbPush.PushMsgReq) {
successCount++ successCount++
if isOfflinePush { if isOfflinePush {
var onlineSuccessUserIDList []string var onlineSuccessUserIDList []string
var WebAndPcBackgroundUserIDList []string
onlineSuccessUserIDList = append(onlineSuccessUserIDList, pushMsg.MsgData.SendID) onlineSuccessUserIDList = append(onlineSuccessUserIDList, pushMsg.MsgData.SendID)
for _, v := range wsResult { for _, v := range wsResult {
if v.OnlinePush && v.UserID != pushMsg.MsgData.SendID { if v.OnlinePush && v.UserID != pushMsg.MsgData.SendID {
onlineSuccessUserIDList = append(onlineSuccessUserIDList, v.UserID) onlineSuccessUserIDList = append(onlineSuccessUserIDList, v.UserID)
} }
if !v.OnlinePush {
if len(v.Resp) != 0 {
for _, singleResult := range v.Resp {
if singleResult.ResultCode == -2 {
if constant.PlatformIDToClass(int(singleResult.RecvPlatFormID)) == constant.TerminalPC ||
singleResult.RecvPlatFormID == constant.WebPlatformID {
WebAndPcBackgroundUserIDList = append(WebAndPcBackgroundUserIDList, v.UserID)
}
}
}
}
}
} }
onlineFailedUserIDList := utils.DifferenceString(onlineSuccessUserIDList, pushToUserIDList) onlineFailedUserIDList := utils.DifferenceString(onlineSuccessUserIDList, pushToUserIDList)
//Use offline push messaging //Use offline push messaging
@ -229,7 +243,17 @@ func MsgToSuperGroupUser(pushMsg *pbPush.PushMsgReq) {
} else { } else {
needOfflinePushUserIDList = onlineFailedUserIDList needOfflinePushUserIDList = onlineFailedUserIDList
} }
if pushMsg.MsgData.ContentType != constant.SignalingNotification {
notNotificationUserIDList, err := db.DB.GetSuperGroupUserReceiveNotNotifyMessageIDList(pushMsg.MsgData.GroupID)
if err != nil {
log.NewError(pushMsg.OperationID, utils.GetSelfFuncName(), "GetSuperGroupUserReceiveNotNotifyMessageIDList failed", pushMsg.MsgData.GroupID)
} else {
log.NewDebug(pushMsg.OperationID, utils.GetSelfFuncName(), notNotificationUserIDList)
}
needOfflinePushUserIDList = utils.RemoveFromSlice(notNotificationUserIDList, needOfflinePushUserIDList)
log.NewDebug(pushMsg.OperationID, utils.GetSelfFuncName(), needOfflinePushUserIDList)
}
if offlinePusher == nil { if offlinePusher == nil {
return return
} }
@ -237,7 +261,7 @@ func MsgToSuperGroupUser(pushMsg *pbPush.PushMsgReq) {
if err != nil { if err != nil {
log.NewError(pushMsg.OperationID, utils.GetSelfFuncName(), "GetOfflinePushOpts failed", pushMsg, err.Error()) log.NewError(pushMsg.OperationID, utils.GetSelfFuncName(), "GetOfflinePushOpts failed", pushMsg, err.Error())
} }
log.NewInfo(pushMsg.OperationID, utils.GetSelfFuncName(), onlineFailedUserIDList, title, detailContent, "opts:", opts) log.NewInfo(pushMsg.OperationID, utils.GetSelfFuncName(), needOfflinePushUserIDList, title, detailContent, "opts:", opts)
if title == "" { if title == "" {
switch pushMsg.MsgData.ContentType { switch pushMsg.MsgData.ContentType {
case constant.Text: case constant.Text:
@ -274,6 +298,22 @@ func MsgToSuperGroupUser(pushMsg *pbPush.PushMsgReq) {
promePkg.PromeInc(promePkg.MsgOfflinePushSuccessCounter) promePkg.PromeInc(promePkg.MsgOfflinePushSuccessCounter)
log.NewDebug(pushMsg.OperationID, "offline push return result is ", pushResult, pushMsg.MsgData) log.NewDebug(pushMsg.OperationID, "offline push return result is ", pushResult, pushMsg.MsgData)
} }
needBackgroupPushUserID := utils.IntersectString(needOfflinePushUserIDList, WebAndPcBackgroundUserIDList)
grpcCons := getcdv3.GetDefaultGatewayConn4Unique(config.Config.Etcd.EtcdSchema, strings.Join(config.Config.Etcd.EtcdAddr, ","), pushMsg.OperationID)
if len(needBackgroupPushUserID) > 0 {
//Online push message
log.Debug(pushMsg.OperationID, "len grpc", len(grpcCons), "data", pushMsg.String())
for _, v := range grpcCons {
msgClient := pbRelay.NewRelayClient(v)
_, err := msgClient.SuperGroupBackgroundOnlinePush(context.Background(), &pbRelay.OnlineBatchPushOneMsgReq{OperationID: pushMsg.OperationID, MsgData: pushMsg.MsgData,
PushToUserIDList: needBackgroupPushUserID})
if err != nil {
log.NewError("push data to client rpc err", pushMsg.OperationID, "err", err)
continue
}
}
}
} }
} }
} }

@ -2,7 +2,7 @@ package push
import "Open_IM/pkg/common/constant" import "Open_IM/pkg/common/constant"
var PushTerminal = []int{constant.IOSPlatformID, constant.AndroidPlatformID} var PushTerminal = []int{constant.IOSPlatformID, constant.AndroidPlatformID, constant.WebPlatformID}
type OfflinePusher interface { type OfflinePusher interface {
Push(userIDList []string, title, detailContent, operationID string, opts PushOpts) (resp string, err error) Push(userIDList []string, title, detailContent, operationID string, opts PushOpts) (resp string, err error)

@ -133,6 +133,24 @@ func (s *adminCMSServer) AdminLogin(_ context.Context, req *pbAdminCMS.AdminLogi
return resp, nil return resp, nil
} }
func (s *adminCMSServer) GetUserToken(_ context.Context, req *pbAdminCMS.GetUserTokenReq) (*pbAdminCMS.GetUserTokenResp, error) {
log.NewInfo(req.OperationID, utils.GetSelfFuncName(), "req: ", req.String())
resp := &pbAdminCMS.GetUserTokenResp{
CommonResp: &pbAdminCMS.CommonResp{},
}
token, expTime, err := token_verify.CreateToken(req.UserID, int(req.PlatformID))
if err != nil {
log.NewError(req.OperationID, utils.GetSelfFuncName(), "generate token failed", "userID: ", req.UserID, req.PlatformID, err.Error())
resp.CommonResp.ErrCode = constant.ErrTokenUnknown.ErrCode
resp.CommonResp.ErrMsg = err.Error()
return resp, nil
}
resp.Token = token
resp.ExpTime = expTime
log.NewInfo(req.OperationID, utils.GetSelfFuncName(), "req: ", resp.String())
return resp, nil
}
func (s *adminCMSServer) AddUserRegisterAddFriendIDList(_ context.Context, req *pbAdminCMS.AddUserRegisterAddFriendIDListReq) (*pbAdminCMS.AddUserRegisterAddFriendIDListResp, error) { func (s *adminCMSServer) AddUserRegisterAddFriendIDList(_ context.Context, req *pbAdminCMS.AddUserRegisterAddFriendIDListReq) (*pbAdminCMS.AddUserRegisterAddFriendIDListResp, error) {
log.NewInfo(req.OperationID, utils.GetSelfFuncName(), "req: ", req.String()) log.NewInfo(req.OperationID, utils.GetSelfFuncName(), "req: ", req.String())
resp := &pbAdminCMS.AddUserRegisterAddFriendIDListResp{CommonResp: &pbAdminCMS.CommonResp{}} resp := &pbAdminCMS.AddUserRegisterAddFriendIDListResp{CommonResp: &pbAdminCMS.CommonResp{}}
@ -197,37 +215,50 @@ func (s *adminCMSServer) GetUserRegisterAddFriendIDList(_ context.Context, req *
func (s *adminCMSServer) GetChatLogs(_ context.Context, req *pbAdminCMS.GetChatLogsReq) (*pbAdminCMS.GetChatLogsResp, error) { func (s *adminCMSServer) GetChatLogs(_ context.Context, req *pbAdminCMS.GetChatLogsReq) (*pbAdminCMS.GetChatLogsResp, error) {
log.NewInfo(req.OperationID, utils.GetSelfFuncName(), "GetChatLogs", req.String()) log.NewInfo(req.OperationID, utils.GetSelfFuncName(), "GetChatLogs", req.String())
resp := &pbAdminCMS.GetChatLogsResp{CommonResp: &pbAdminCMS.CommonResp{}, Pagination: &server_api_params.ResponsePagination{}} resp := &pbAdminCMS.GetChatLogsResp{CommonResp: &pbAdminCMS.CommonResp{}, Pagination: &server_api_params.ResponsePagination{}}
time, err := utils.TimeStringToTime(req.SendTime)
if err != nil {
log.NewError(req.OperationID, utils.GetSelfFuncName(), "time string parse error", err.Error())
resp.CommonResp.ErrCode = constant.ErrArgs.ErrCode
resp.CommonResp.ErrMsg = err.Error()
return resp, nil
}
chatLog := db.ChatLog{ chatLog := db.ChatLog{
Content: req.Content, Content: req.Content,
SendTime: time,
ContentType: req.ContentType, ContentType: req.ContentType,
SessionType: req.SessionType, SessionType: req.SessionType,
RecvID: req.RecvID, RecvID: req.RecvID,
SendID: req.SendID, SendID: req.SendID,
} }
log.NewDebug(req.OperationID, utils.GetSelfFuncName(), "chat_log: ", chatLog) if req.SendTime != "" {
nums, err := imdb.GetChatLogCount(chatLog) sendTime, err := utils.TimeStringToTime(req.SendTime)
if err != nil { if err != nil {
log.NewError(req.OperationID, utils.GetSelfFuncName(), "GetChatLogCount", err.Error()) log.NewError(req.OperationID, utils.GetSelfFuncName(), "time string parse error", err.Error())
resp.CommonResp.ErrCode = constant.ErrDB.ErrCode resp.CommonResp.ErrCode = constant.ErrArgs.ErrCode
resp.CommonResp.ErrMsg = err.Error() resp.CommonResp.ErrMsg = err.Error()
return resp, nil return resp, nil
} }
resp.ChatLogsNum = int32(nums) chatLog.SendTime = sendTime
chatLogs, err := imdb.GetChatLog(chatLog, req.Pagination.PageNumber, req.Pagination.ShowNumber) }
log.NewDebug(req.OperationID, utils.GetSelfFuncName(), "chat_log: ", chatLog)
num, chatLogs, err := imdb.GetChatLog(&chatLog, req.Pagination.PageNumber, req.Pagination.ShowNumber, []int32{
constant.Text,
constant.Picture,
constant.Voice,
constant.Video,
constant.File,
constant.AtText,
constant.Merger,
constant.Card,
constant.Location,
constant.Custom,
constant.Revoke,
constant.Quote,
constant.AdvancedText,
constant.AdvancedRevoke,
constant.CustomNotTriggerConversation,
})
if err != nil { if err != nil {
log.NewError(req.OperationID, utils.GetSelfFuncName(), "GetChatLog", err.Error()) log.NewError(req.OperationID, utils.GetSelfFuncName(), "GetChatLog", err.Error())
resp.CommonResp.ErrCode = constant.ErrDB.ErrCode resp.CommonResp.ErrCode = constant.ErrDB.ErrCode
resp.CommonResp.ErrMsg = err.Error() resp.CommonResp.ErrMsg = err.Error()
return resp, nil return resp, nil
} }
resp.ChatLogsNum = int32(num)
for _, chatLog := range chatLogs { for _, chatLog := range chatLogs {
pbChatLog := &pbAdminCMS.ChatLog{} pbChatLog := &pbAdminCMS.ChatLog{}
utils.CopyStructFields(pbChatLog, chatLog) utils.CopyStructFields(pbChatLog, chatLog)
@ -265,7 +296,6 @@ func (s *adminCMSServer) GetChatLogs(_ context.Context, req *pbAdminCMS.GetChatL
CurrentPage: req.Pagination.PageNumber, CurrentPage: req.Pagination.PageNumber,
ShowNumber: req.Pagination.ShowNumber, ShowNumber: req.Pagination.ShowNumber,
} }
log.NewInfo(req.OperationID, utils.GetSelfFuncName(), "resp output: ", resp.String()) log.NewInfo(req.OperationID, utils.GetSelfFuncName(), "resp output: ", resp.String())
return resp, nil return resp, nil
} }
@ -281,6 +311,7 @@ func (s *adminCMSServer) GetActiveGroup(_ context.Context, req *pbAdminCMS.GetAc
return resp, nil return resp, nil
} }
log.NewDebug(req.OperationID, utils.GetSelfFuncName(), "time: ", fromTime, toTime) log.NewDebug(req.OperationID, utils.GetSelfFuncName(), "time: ", fromTime, toTime)
toTime = toTime.AddDate(0, 0, 1)
activeGroups, err := imdb.GetActiveGroups(fromTime, toTime, 12) activeGroups, err := imdb.GetActiveGroups(fromTime, toTime, 12)
if err != nil { if err != nil {
log.NewError(req.OperationID, utils.GetSelfFuncName(), "GetActiveGroups failed", err.Error()) log.NewError(req.OperationID, utils.GetSelfFuncName(), "GetActiveGroups failed", err.Error())
@ -310,6 +341,7 @@ func (s *adminCMSServer) GetActiveUser(_ context.Context, req *pbAdminCMS.GetAct
resp.CommonResp.ErrMsg = err.Error() resp.CommonResp.ErrMsg = err.Error()
return resp, nil return resp, nil
} }
toTime = toTime.AddDate(0, 0, 1)
log.NewDebug(req.OperationID, utils.GetSelfFuncName(), "time: ", fromTime, toTime) log.NewDebug(req.OperationID, utils.GetSelfFuncName(), "time: ", fromTime, toTime)
activeUsers, err := imdb.GetActiveUsers(fromTime, toTime, 12) activeUsers, err := imdb.GetActiveUsers(fromTime, toTime, 12)
if err != nil { if err != nil {

@ -50,6 +50,11 @@ func (rpc *rpcAuth) UserRegister(_ context.Context, req *pbAuth.UserRegisterReq)
func (rpc *rpcAuth) UserToken(_ context.Context, req *pbAuth.UserTokenReq) (*pbAuth.UserTokenResp, error) { func (rpc *rpcAuth) UserToken(_ context.Context, req *pbAuth.UserTokenReq) (*pbAuth.UserTokenResp, error) {
log.NewInfo(req.OperationID, utils.GetSelfFuncName(), " rpc args ", req.String()) log.NewInfo(req.OperationID, utils.GetSelfFuncName(), " rpc args ", req.String())
_, err := imdb.GetUserByUserID(req.FromUserID)
if err != nil {
log.NewError(req.OperationID, "not this user:", req.FromUserID, req.String())
return &pbAuth.UserTokenResp{CommonResp: &pbAuth.CommonResp{ErrCode: constant.ErrDB.ErrCode, ErrMsg: err.Error()}}, nil
}
tokens, expTime, err := token_verify.CreateToken(req.FromUserID, int(req.Platform)) tokens, expTime, err := token_verify.CreateToken(req.FromUserID, int(req.Platform))
if err != nil { if err != nil {
errMsg := req.OperationID + " token_verify.CreateToken failed " + err.Error() + req.FromUserID + utils.Int32ToString(req.Platform) errMsg := req.OperationID + " token_verify.CreateToken failed " + err.Error() + req.FromUserID + utils.Int32ToString(req.Platform)

@ -61,6 +61,21 @@ func (rpc *rpcConversation) ModifyConversationField(c context.Context, req *pbCo
resp.CommonResp = &pbConversation.CommonResp{ErrCode: constant.ErrDB.ErrCode, ErrMsg: constant.ErrDB.ErrMsg} resp.CommonResp = &pbConversation.CommonResp{ErrCode: constant.ErrDB.ErrCode, ErrMsg: constant.ErrDB.ErrMsg}
return resp, nil return resp, nil
} }
if req.Conversation.ConversationType == constant.SuperGroupChatType {
if req.Conversation.RecvMsgOpt == constant.ReceiveNotNotifyMessage {
if err = db.DB.SetSuperGroupUserReceiveNotNotifyMessage(req.Conversation.GroupID, v); err != nil {
log.NewError(req.OperationID, utils.GetSelfFuncName(), "cache failed, rpc return", err.Error(), req.Conversation.GroupID, v)
resp.CommonResp = &pbConversation.CommonResp{ErrCode: constant.ErrDB.ErrCode, ErrMsg: constant.ErrDB.ErrMsg}
return resp, nil
}
} else {
if err = db.DB.SetSuperGroupUserReceiveNotifyMessage(req.Conversation.GroupID, v); err != nil {
log.NewError(req.OperationID, utils.GetSelfFuncName(), "cache failed, rpc return", err.Error(), req.Conversation.GroupID, v)
resp.CommonResp = &pbConversation.CommonResp{ErrCode: constant.ErrDB.ErrCode, ErrMsg: constant.ErrDB.ErrMsg}
return resp, nil
}
}
}
} }
err = imdb.UpdateColumnsConversations(haveUserID, req.Conversation.ConversationID, map[string]interface{}{"recv_msg_opt": conversation.RecvMsgOpt}) err = imdb.UpdateColumnsConversations(haveUserID, req.Conversation.ConversationID, map[string]interface{}{"recv_msg_opt": conversation.RecvMsgOpt})
case constant.FieldGroupAtType: case constant.FieldGroupAtType:
@ -78,6 +93,8 @@ func (rpc *rpcConversation) ModifyConversationField(c context.Context, req *pbCo
case constant.FieldUnread: case constant.FieldUnread:
isSyncConversation = false isSyncConversation = false
err = imdb.UpdateColumnsConversations(haveUserID, req.Conversation.ConversationID, map[string]interface{}{"update_unread_count_time": conversation.UpdateUnreadCountTime}) err = imdb.UpdateColumnsConversations(haveUserID, req.Conversation.ConversationID, map[string]interface{}{"update_unread_count_time": conversation.UpdateUnreadCountTime})
case constant.FieldBurnDuration:
err = imdb.UpdateColumnsConversations(haveUserID, req.Conversation.ConversationID, map[string]interface{}{"burn_duration": conversation.BurnDuration})
} }
if err != nil { if err != nil {
log.NewError(req.OperationID, utils.GetSelfFuncName(), "UpdateColumnsConversations error", err.Error()) log.NewError(req.OperationID, utils.GetSelfFuncName(), "UpdateColumnsConversations error", err.Error())
@ -149,6 +166,10 @@ func syncPeerUserConversation(conversation *pbConversation.Conversation, operati
log.NewError(operationID, utils.GetSelfFuncName(), "SetConversation error", err.Error()) log.NewError(operationID, utils.GetSelfFuncName(), "SetConversation error", err.Error())
return err return err
} }
err = rocksCache.DelUserConversationIDListFromCache(conversation.UserID)
if err != nil {
log.NewError(operationID, utils.GetSelfFuncName(), "DelConversationFromCache failed", err.Error(), conversation.OwnerUserID, conversation.ConversationID)
}
err = rocksCache.DelConversationFromCache(conversation.UserID, utils.GetConversationIDBySessionType(conversation.OwnerUserID, constant.SingleChatType)) err = rocksCache.DelConversationFromCache(conversation.UserID, utils.GetConversationIDBySessionType(conversation.OwnerUserID, constant.SingleChatType))
if err != nil { if err != nil {
log.NewError(operationID, utils.GetSelfFuncName(), "DelConversationFromCache failed", err.Error(), conversation.OwnerUserID, conversation.ConversationID) log.NewError(operationID, utils.GetSelfFuncName(), "DelConversationFromCache failed", err.Error(), conversation.OwnerUserID, conversation.ConversationID)

@ -4,11 +4,14 @@ import (
cbApi "Open_IM/pkg/call_back_struct" cbApi "Open_IM/pkg/call_back_struct"
"Open_IM/pkg/common/config" "Open_IM/pkg/common/config"
"Open_IM/pkg/common/constant" "Open_IM/pkg/common/constant"
"Open_IM/pkg/common/db"
"Open_IM/pkg/common/http" "Open_IM/pkg/common/http"
"Open_IM/pkg/common/log" "Open_IM/pkg/common/log"
pbGroup "Open_IM/pkg/proto/group" pbGroup "Open_IM/pkg/proto/group"
"Open_IM/pkg/utils" "Open_IM/pkg/utils"
http2 "net/http" http2 "net/http"
"google.golang.org/protobuf/types/known/wrapperspb"
) )
func callbackBeforeCreateGroup(req *pbGroup.CreateGroupReq) cbApi.CommonCallbackResp { func callbackBeforeCreateGroup(req *pbGroup.CreateGroupReq) cbApi.CommonCallbackResp {
@ -19,6 +22,9 @@ func callbackBeforeCreateGroup(req *pbGroup.CreateGroupReq) cbApi.CommonCallback
log.NewDebug(req.OperationID, utils.GetSelfFuncName(), req.String()) log.NewDebug(req.OperationID, utils.GetSelfFuncName(), req.String())
commonCallbackReq := &cbApi.CallbackBeforeCreateGroupReq{ commonCallbackReq := &cbApi.CallbackBeforeCreateGroupReq{
CallbackCommand: constant.CallbackBeforeCreateGroupCommand, CallbackCommand: constant.CallbackBeforeCreateGroupCommand,
OperationID: req.OperationID,
GroupInfo: *req.GroupInfo,
InitMemberList: req.InitMemberList,
} }
resp := &cbApi.CallbackBeforeCreateGroupResp{ resp := &cbApi.CallbackBeforeCreateGroupResp{
CommonCallbackResp: &callbackResp, CommonCallbackResp: &callbackResp,
@ -41,7 +47,7 @@ func callbackBeforeCreateGroup(req *pbGroup.CreateGroupReq) cbApi.CommonCallback
req.GroupInfo.GroupID = *resp.GroupID req.GroupInfo.GroupID = *resp.GroupID
} }
if resp.GroupName != nil { if resp.GroupName != nil {
req.GroupInfo.GroupName = *resp.GroupID req.GroupInfo.GroupName = *resp.GroupName
} }
if resp.Notification != nil { if resp.Notification != nil {
req.GroupInfo.Notification = *resp.Notification req.GroupInfo.Notification = *resp.Notification
@ -68,7 +74,7 @@ func callbackBeforeCreateGroup(req *pbGroup.CreateGroupReq) cbApi.CommonCallback
req.GroupInfo.GroupType = *resp.GroupType req.GroupInfo.GroupType = *resp.GroupType
} }
if resp.NeedVerification != nil { if resp.NeedVerification != nil {
req.GroupInfo.NeedVerification = *resp.GroupType req.GroupInfo.NeedVerification = *resp.NeedVerification
} }
if resp.LookMemberInfo != nil { if resp.LookMemberInfo != nil {
req.GroupInfo.LookMemberInfo = *resp.LookMemberInfo req.GroupInfo.LookMemberInfo = *resp.LookMemberInfo
@ -76,3 +82,102 @@ func callbackBeforeCreateGroup(req *pbGroup.CreateGroupReq) cbApi.CommonCallback
} }
return callbackResp return callbackResp
} }
func CallbackBeforeMemberJoinGroup(operationID string, groupMember *db.GroupMember, groupEx string) cbApi.CommonCallbackResp {
callbackResp := cbApi.CommonCallbackResp{OperationID: operationID}
if !config.Config.Callback.CallbackBeforeMemberJoinGroup.Enable {
return callbackResp
}
log.NewDebug(operationID, "args: ", *groupMember)
callbackReq := cbApi.CallbackBeforeMemberJoinGroupReq{
CallbackCommand: constant.CallbackBeforeMemberJoinGroupCommand,
OperationID: operationID,
GroupID: groupMember.GroupID,
UserID: groupMember.UserID,
Ex: groupMember.Ex,
GroupEx: groupEx,
}
resp := &cbApi.CallbackBeforeMemberJoinGroupResp{
CommonCallbackResp: &callbackResp,
}
if err := http.CallBackPostReturn(config.Config.Callback.CallbackUrl, constant.CallbackBeforeMemberJoinGroupCommand, callbackReq, resp, config.Config.Callback.CallbackBeforeMemberJoinGroup.CallbackTimeOut); err != nil {
callbackResp.ErrCode = http2.StatusInternalServerError
callbackResp.ErrMsg = err.Error()
if !config.Config.Callback.CallbackBeforeMemberJoinGroup.CallbackFailedContinue {
callbackResp.ActionCode = constant.ActionForbidden
return callbackResp
} else {
callbackResp.ActionCode = constant.ActionAllow
return callbackResp
}
}
if resp.MuteEndTime != nil {
groupMember.MuteEndTime = utils.UnixSecondToTime(*resp.MuteEndTime)
}
if resp.FaceURL != nil {
groupMember.FaceURL = *resp.FaceURL
}
if resp.Ex != nil {
groupMember.Ex = *resp.Ex
}
if resp.NickName != nil {
groupMember.Nickname = *resp.NickName
}
if resp.RoleLevel != nil {
groupMember.RoleLevel = *resp.RoleLevel
}
return callbackResp
}
func CallbackBeforeSetGroupMemberInfo(req *pbGroup.SetGroupMemberInfoReq) cbApi.CommonCallbackResp {
callbackResp := cbApi.CommonCallbackResp{OperationID: req.OperationID}
if !config.Config.Callback.CallbackBeforeSetGroupMemberInfo.Enable {
return callbackResp
}
callbackReq := cbApi.CallbackBeforeSetGroupMemberInfoReq{
CallbackCommand: constant.CallbackBeforeSetGroupMemberInfoCommand,
OperationID: req.OperationID,
GroupID: req.GroupID,
UserID: req.UserID,
}
if req.Nickname != nil {
callbackReq.Nickname = req.Nickname.Value
}
if req.FaceURL != nil {
callbackReq.FaceURL = req.FaceURL.Value
}
if req.RoleLevel != nil {
callbackReq.RoleLevel = req.RoleLevel.Value
}
if req.Ex != nil {
callbackReq.Ex = req.Ex.Value
}
resp := &cbApi.CallbackBeforeSetGroupMemberInfoResp{
CommonCallbackResp: &callbackResp,
}
if err := http.CallBackPostReturn(config.Config.Callback.CallbackUrl, constant.CallbackBeforeSetGroupMemberInfoCommand, callbackReq, resp, config.Config.Callback.CallbackBeforeSetGroupMemberInfo.CallbackTimeOut); err != nil {
callbackResp.ErrCode = http2.StatusInternalServerError
callbackResp.ErrMsg = err.Error()
if !config.Config.Callback.CallbackBeforeSetGroupMemberInfo.CallbackFailedContinue {
callbackResp.ActionCode = constant.ActionForbidden
return callbackResp
} else {
callbackResp.ActionCode = constant.ActionAllow
return callbackResp
}
}
if resp.FaceURL != nil {
req.FaceURL = &wrapperspb.StringValue{Value: *resp.FaceURL}
}
if resp.Nickname != nil {
req.Nickname = &wrapperspb.StringValue{Value: *resp.Nickname}
}
if resp.RoleLevel != nil {
req.RoleLevel = &wrapperspb.Int32Value{Value: *resp.RoleLevel}
}
if resp.Ex != nil {
req.Ex = &wrapperspb.StringValue{Value: *resp.Ex}
}
return callbackResp
}

File diff suppressed because it is too large Load Diff

@ -8,6 +8,7 @@ import (
commonPb "Open_IM/pkg/proto/sdk_ws" commonPb "Open_IM/pkg/proto/sdk_ws"
"Open_IM/pkg/utils" "Open_IM/pkg/utils"
"context" "context"
"github.com/go-redis/redis/v8" "github.com/go-redis/redis/v8"
) )

@ -5,8 +5,8 @@ import (
"Open_IM/pkg/common/db" "Open_IM/pkg/common/db"
"Open_IM/pkg/common/log" "Open_IM/pkg/common/log"
"Open_IM/pkg/common/token_verify" "Open_IM/pkg/common/token_verify"
commonPb "Open_IM/pkg/proto/sdk_ws"
"Open_IM/pkg/proto/msg" "Open_IM/pkg/proto/msg"
commonPb "Open_IM/pkg/proto/sdk_ws"
"Open_IM/pkg/utils" "Open_IM/pkg/utils"
"context" "context"
"time" "time"

@ -0,0 +1,477 @@
package msg
import (
"Open_IM/pkg/common/constant"
"Open_IM/pkg/common/db"
"Open_IM/pkg/common/log"
"Open_IM/pkg/proto/msg"
"Open_IM/pkg/proto/sdk_ws"
"Open_IM/pkg/utils"
"context"
go_redis "github.com/go-redis/redis/v8"
"time"
)
func (rpc *rpcChat) SetMessageReactionExtensions(ctx context.Context, req *msg.SetMessageReactionExtensionsReq) (resp *msg.SetMessageReactionExtensionsResp, err error) {
log.Debug(req.OperationID, utils.GetSelfFuncName(), "rpc args is:", req.String())
var rResp msg.SetMessageReactionExtensionsResp
rResp.ClientMsgID = req.ClientMsgID
rResp.MsgFirstModifyTime = req.MsgFirstModifyTime
callbackResp := callbackSetMessageReactionExtensions(req)
if callbackResp.ActionCode != constant.ActionAllow || callbackResp.ErrCode != 0 {
rResp.ErrCode = int32(callbackResp.ErrCode)
rResp.ErrMsg = callbackResp.ErrMsg
for _, value := range req.ReactionExtensionList {
temp := new(msg.KeyValueResp)
temp.KeyValue = value
temp.ErrMsg = callbackResp.ErrMsg
temp.ErrCode = 100
rResp.Result = append(rResp.Result, temp)
}
return &rResp, nil
}
//if ExternalExtension
if req.IsExternalExtensions {
var isHistory bool
if req.IsReact {
isHistory = false
} else {
isHistory = true
}
rResp.MsgFirstModifyTime = callbackResp.MsgFirstModifyTime
rResp.Result = callbackResp.ResultReactionExtensionList
ExtendMessageUpdatedNotification(req.OperationID, req.OpUserID, req.SourceID, req.OpUserIDPlatformID, req.SessionType, req, &rResp, isHistory, false)
return &rResp, nil
}
for _, v := range callbackResp.ResultReactionExtensionList {
if v.ErrCode == 0 {
req.ReactionExtensionList[v.KeyValue.TypeKey] = v.KeyValue
} else {
delete(req.ReactionExtensionList, v.KeyValue.TypeKey)
rResp.Result = append(rResp.Result, v)
}
}
isExists, err := db.DB.JudgeMessageReactionEXISTS(req.ClientMsgID, req.SessionType)
if err != nil {
rResp.ErrCode = 100
rResp.ErrMsg = err.Error()
for _, value := range req.ReactionExtensionList {
temp := new(msg.KeyValueResp)
temp.KeyValue = value
temp.ErrMsg = err.Error()
temp.ErrCode = 100
rResp.Result = append(rResp.Result, temp)
}
return &rResp, nil
}
if !isExists {
if !req.IsReact {
log.Debug(req.OperationID, "redis handle firstly", req.String())
rResp.MsgFirstModifyTime = utils.GetCurrentTimestampByMill()
for k, v := range req.ReactionExtensionList {
err := rpc.dMessageLocker.LockMessageTypeKey(req.ClientMsgID, k)
if err != nil {
setKeyResultInfo(&rResp, 100, err.Error(), req.ClientMsgID, k, v)
continue
}
v.LatestUpdateTime = utils.GetCurrentTimestampByMill()
newerr := db.DB.SetMessageTypeKeyValue(req.ClientMsgID, req.SessionType, k, utils.StructToJsonString(v))
if newerr != nil {
setKeyResultInfo(&rResp, 201, newerr.Error(), req.ClientMsgID, k, v)
continue
}
setKeyResultInfo(&rResp, 0, "", req.ClientMsgID, k, v)
}
rResp.IsReact = true
_, err := db.DB.SetMessageReactionExpire(req.ClientMsgID, req.SessionType, time.Duration(24*3)*time.Hour)
if err != nil {
log.Error(req.OperationID, "SetMessageReactionExpire err:", err.Error(), req.String())
}
} else {
err := rpc.dMessageLocker.LockGlobalMessage(req.ClientMsgID)
if err != nil {
rResp.ErrCode = 100
rResp.ErrMsg = err.Error()
for _, value := range req.ReactionExtensionList {
temp := new(msg.KeyValueResp)
temp.KeyValue = value
temp.ErrMsg = err.Error()
temp.ErrCode = 100
rResp.Result = append(rResp.Result, temp)
}
return &rResp, nil
}
mongoValue, err := db.DB.GetExtendMsg(req.SourceID, req.SessionType, req.ClientMsgID, req.MsgFirstModifyTime)
if err != nil {
rResp.ErrCode = 200
rResp.ErrMsg = err.Error()
for _, value := range req.ReactionExtensionList {
temp := new(msg.KeyValueResp)
temp.KeyValue = value
temp.ErrMsg = err.Error()
temp.ErrCode = 100
rResp.Result = append(rResp.Result, temp)
}
return &rResp, nil
}
setValue := make(map[string]*server_api_params.KeyValue)
for k, v := range req.ReactionExtensionList {
temp := new(server_api_params.KeyValue)
if vv, ok := mongoValue.ReactionExtensionList[k]; ok {
utils.CopyStructFields(temp, &vv)
if v.LatestUpdateTime != vv.LatestUpdateTime {
setKeyResultInfo(&rResp, 300, "message have update", req.ClientMsgID, k, temp)
continue
}
}
temp.TypeKey = k
temp.Value = v.Value
temp.LatestUpdateTime = utils.GetCurrentTimestampByMill()
setValue[k] = temp
}
err = db.DB.InsertOrUpdateReactionExtendMsgSet(req.SourceID, req.SessionType, req.ClientMsgID, req.MsgFirstModifyTime, setValue)
if err != nil {
for _, value := range setValue {
temp := new(msg.KeyValueResp)
temp.KeyValue = value
temp.ErrMsg = err.Error()
temp.ErrCode = 100
rResp.Result = append(rResp.Result, temp)
}
} else {
for _, value := range setValue {
temp := new(msg.KeyValueResp)
temp.KeyValue = value
rResp.Result = append(rResp.Result, temp)
}
}
lockErr := rpc.dMessageLocker.UnLockGlobalMessage(req.ClientMsgID)
if lockErr != nil {
log.Error(req.OperationID, "UnLockGlobalMessage err:", lockErr.Error())
}
}
} else {
log.Debug(req.OperationID, "redis handle secondly", req.String())
for k, v := range req.ReactionExtensionList {
err := rpc.dMessageLocker.LockMessageTypeKey(req.ClientMsgID, k)
if err != nil {
setKeyResultInfo(&rResp, 100, err.Error(), req.ClientMsgID, k, v)
continue
}
redisValue, err := db.DB.GetMessageTypeKeyValue(req.ClientMsgID, req.SessionType, k)
if err != nil && err != go_redis.Nil {
setKeyResultInfo(&rResp, 200, err.Error(), req.ClientMsgID, k, v)
continue
}
temp := new(server_api_params.KeyValue)
utils.JsonStringToStruct(redisValue, temp)
if v.LatestUpdateTime != temp.LatestUpdateTime {
setKeyResultInfo(&rResp, 300, "message have update", req.ClientMsgID, k, temp)
continue
} else {
v.LatestUpdateTime = utils.GetCurrentTimestampByMill()
newerr := db.DB.SetMessageTypeKeyValue(req.ClientMsgID, req.SessionType, k, utils.StructToJsonString(v))
if newerr != nil {
setKeyResultInfo(&rResp, 201, newerr.Error(), req.ClientMsgID, k, temp)
continue
}
setKeyResultInfo(&rResp, 0, "", req.ClientMsgID, k, v)
}
}
}
if !isExists {
if !req.IsReact {
ExtendMessageUpdatedNotification(req.OperationID, req.OpUserID, req.SourceID, req.OpUserIDPlatformID, req.SessionType, req, &rResp, true, true)
} else {
ExtendMessageUpdatedNotification(req.OperationID, req.OpUserID, req.SourceID, req.OpUserIDPlatformID, req.SessionType, req, &rResp, false, false)
}
} else {
ExtendMessageUpdatedNotification(req.OperationID, req.OpUserID, req.SourceID, req.OpUserIDPlatformID, req.SessionType, req, &rResp, false, true)
}
log.Debug(req.OperationID, utils.GetSelfFuncName(), "rpc return is:", rResp.String())
return &rResp, nil
}
func setKeyResultInfo(r *msg.SetMessageReactionExtensionsResp, errCode int32, errMsg, clientMsgID, typeKey string, keyValue *server_api_params.KeyValue) {
temp := new(msg.KeyValueResp)
temp.KeyValue = keyValue
temp.ErrCode = errCode
temp.ErrMsg = errMsg
r.Result = append(r.Result, temp)
_ = db.DB.UnLockMessageTypeKey(clientMsgID, typeKey)
}
func setDeleteKeyResultInfo(r *msg.DeleteMessageListReactionExtensionsResp, errCode int32, errMsg, clientMsgID, typeKey string, keyValue *server_api_params.KeyValue) {
temp := new(msg.KeyValueResp)
temp.KeyValue = keyValue
temp.ErrCode = errCode
temp.ErrMsg = errMsg
r.Result = append(r.Result, temp)
_ = db.DB.UnLockMessageTypeKey(clientMsgID, typeKey)
}
func (rpc *rpcChat) GetMessageListReactionExtensions(ctx context.Context, req *msg.GetMessageListReactionExtensionsReq) (resp *msg.GetMessageListReactionExtensionsResp, err error) {
log.Debug(req.OperationID, utils.GetSelfFuncName(), "rpc args is:", req.String())
var rResp msg.GetMessageListReactionExtensionsResp
if req.IsExternalExtensions {
callbackResp := callbackGetMessageListReactionExtensions(req)
if callbackResp.ActionCode != constant.ActionAllow || callbackResp.ErrCode != 0 {
rResp.ErrCode = int32(callbackResp.ErrCode)
rResp.ErrMsg = callbackResp.ErrMsg
return &rResp, nil
} else {
rResp.SingleMessageResult = callbackResp.MessageResultList
return &rResp, nil
}
}
for _, messageValue := range req.MessageReactionKeyList {
var oneMessage msg.SingleMessageExtensionResult
oneMessage.ClientMsgID = messageValue.ClientMsgID
isExists, err := db.DB.JudgeMessageReactionEXISTS(messageValue.ClientMsgID, req.SessionType)
if err != nil {
rResp.ErrCode = 100
rResp.ErrMsg = err.Error()
return &rResp, nil
}
if isExists {
redisValue, err := db.DB.GetOneMessageAllReactionList(messageValue.ClientMsgID, req.SessionType)
if err != nil {
oneMessage.ErrCode = 100
oneMessage.ErrMsg = err.Error()
rResp.SingleMessageResult = append(rResp.SingleMessageResult, &oneMessage)
continue
}
keyMap := make(map[string]*server_api_params.KeyValue)
for k, v := range redisValue {
temp := new(server_api_params.KeyValue)
utils.JsonStringToStruct(v, temp)
keyMap[k] = temp
}
oneMessage.ReactionExtensionList = keyMap
} else {
mongoValue, err := db.DB.GetExtendMsg(req.SourceID, req.SessionType, messageValue.ClientMsgID, messageValue.MsgFirstModifyTime)
if err != nil {
oneMessage.ErrCode = 100
oneMessage.ErrMsg = err.Error()
rResp.SingleMessageResult = append(rResp.SingleMessageResult, &oneMessage)
continue
}
keyMap := make(map[string]*server_api_params.KeyValue)
for k, v := range mongoValue.ReactionExtensionList {
temp := new(server_api_params.KeyValue)
temp.TypeKey = v.TypeKey
temp.Value = v.Value
temp.LatestUpdateTime = v.LatestUpdateTime
keyMap[k] = temp
}
oneMessage.ReactionExtensionList = keyMap
}
rResp.SingleMessageResult = append(rResp.SingleMessageResult, &oneMessage)
}
log.Debug(req.OperationID, utils.GetSelfFuncName(), "rpc return is:", rResp.String())
return &rResp, nil
}
func (rpc *rpcChat) AddMessageReactionExtensions(ctx context.Context, req *msg.AddMessageReactionExtensionsReq) (resp *msg.AddMessageReactionExtensionsResp, err error) {
log.Debug(req.OperationID, utils.GetSelfFuncName(), "rpc args is:", req.String())
var rResp msg.AddMessageReactionExtensionsResp
rResp.ClientMsgID = req.ClientMsgID
rResp.MsgFirstModifyTime = req.MsgFirstModifyTime
callbackResp := callbackAddMessageReactionExtensions(req)
if callbackResp.ActionCode != constant.ActionAllow || callbackResp.ErrCode != 0 {
rResp.ErrCode = int32(callbackResp.ErrCode)
rResp.ErrMsg = callbackResp.ErrMsg
for _, value := range callbackResp.ResultReactionExtensionList {
temp := new(msg.KeyValueResp)
temp.KeyValue = value.KeyValue
temp.ErrMsg = value.ErrMsg
temp.ErrCode = value.ErrCode
rResp.Result = append(rResp.Result, temp)
}
return &rResp, nil
}
//if !req.IsExternalExtensions {
// rResp.ErrCode = 200
// rResp.ErrMsg = "only extenalextensions message can be used"
// for _, value := range req.ReactionExtensionList {
// temp := new(msg.KeyValueResp)
// temp.KeyValue = value
// temp.ErrMsg = callbackResp.ErrMsg
// temp.ErrCode = 100
// rResp.Result = append(rResp.Result, temp)
// }
// return &rResp, nil
//}
//if ExternalExtension
var isHistory bool
if req.IsReact {
isHistory = false
} else {
isHistory = true
}
rResp.MsgFirstModifyTime = callbackResp.MsgFirstModifyTime
rResp.Result = callbackResp.ResultReactionExtensionList
rResp.IsReact = callbackResp.IsReact
ExtendMessageAddedNotification(req.OperationID, req.OpUserID, req.SourceID, req.OpUserIDPlatformID, req.SessionType, req, &rResp, isHistory, false)
log.Debug(req.OperationID, utils.GetSelfFuncName(), "rpc return is:", resp.String())
return &rResp, nil
}
func (rpc *rpcChat) DeleteMessageReactionExtensions(ctx context.Context, req *msg.DeleteMessageListReactionExtensionsReq) (resp *msg.DeleteMessageListReactionExtensionsResp, err error) {
log.Debug(req.OperationID, utils.GetSelfFuncName(), "rpc args is:", req.String())
var rResp msg.DeleteMessageListReactionExtensionsResp
callbackResp := callbackDeleteMessageReactionExtensions(req)
if callbackResp.ActionCode != constant.ActionAllow || callbackResp.ErrCode != 0 {
rResp.ErrCode = int32(callbackResp.ErrCode)
rResp.ErrMsg = callbackResp.ErrMsg
for _, value := range req.ReactionExtensionList {
temp := new(msg.KeyValueResp)
temp.KeyValue = value
temp.ErrMsg = callbackResp.ErrMsg
temp.ErrCode = 100
rResp.Result = append(rResp.Result, temp)
}
return &rResp, nil
}
//if ExternalExtension
if req.IsExternalExtensions {
rResp.Result = callbackResp.ResultReactionExtensionList
ExtendMessageDeleteNotification(req.OperationID, req.OpUserID, req.SourceID, req.OpUserIDPlatformID, req.SessionType, req, &rResp, false, false)
return &rResp, nil
}
for _, v := range callbackResp.ResultReactionExtensionList {
if v.ErrCode != 0 {
func(req *[]*server_api_params.KeyValue, typeKey string) {
for i := 0; i < len(*req); i++ {
if (*req)[i].TypeKey == typeKey {
*req = append((*req)[:i], (*req)[i+1:]...)
}
}
}(&req.ReactionExtensionList, v.KeyValue.TypeKey)
rResp.Result = append(rResp.Result, v)
}
}
isExists, err := db.DB.JudgeMessageReactionEXISTS(req.ClientMsgID, req.SessionType)
if err != nil {
rResp.ErrCode = 100
rResp.ErrMsg = err.Error()
for _, value := range req.ReactionExtensionList {
temp := new(msg.KeyValueResp)
temp.KeyValue = value
temp.ErrMsg = err.Error()
temp.ErrCode = 100
rResp.Result = append(rResp.Result, temp)
}
return &rResp, nil
}
if isExists {
log.Debug(req.OperationID, "redis handle this delete", req.String())
for _, v := range req.ReactionExtensionList {
err := rpc.dMessageLocker.LockMessageTypeKey(req.ClientMsgID, v.TypeKey)
if err != nil {
setDeleteKeyResultInfo(&rResp, 100, err.Error(), req.ClientMsgID, v.TypeKey, v)
continue
}
redisValue, err := db.DB.GetMessageTypeKeyValue(req.ClientMsgID, req.SessionType, v.TypeKey)
if err != nil && err != go_redis.Nil {
setDeleteKeyResultInfo(&rResp, 200, err.Error(), req.ClientMsgID, v.TypeKey, v)
continue
}
temp := new(server_api_params.KeyValue)
utils.JsonStringToStruct(redisValue, temp)
if v.LatestUpdateTime != temp.LatestUpdateTime {
setDeleteKeyResultInfo(&rResp, 300, "message have update", req.ClientMsgID, v.TypeKey, temp)
continue
} else {
newErr := db.DB.DeleteOneMessageKey(req.ClientMsgID, req.SessionType, v.TypeKey)
if newErr != nil {
setDeleteKeyResultInfo(&rResp, 201, newErr.Error(), req.ClientMsgID, v.TypeKey, temp)
continue
}
setDeleteKeyResultInfo(&rResp, 0, "", req.ClientMsgID, v.TypeKey, v)
}
}
} else {
err := rpc.dMessageLocker.LockGlobalMessage(req.ClientMsgID)
if err != nil {
rResp.ErrCode = 100
rResp.ErrMsg = err.Error()
for _, value := range req.ReactionExtensionList {
temp := new(msg.KeyValueResp)
temp.KeyValue = value
temp.ErrMsg = err.Error()
temp.ErrCode = 100
rResp.Result = append(rResp.Result, temp)
}
return &rResp, nil
}
mongoValue, err := db.DB.GetExtendMsg(req.SourceID, req.SessionType, req.ClientMsgID, req.MsgFirstModifyTime)
if err != nil {
rResp.ErrCode = 200
rResp.ErrMsg = err.Error()
for _, value := range req.ReactionExtensionList {
temp := new(msg.KeyValueResp)
temp.KeyValue = value
temp.ErrMsg = err.Error()
temp.ErrCode = 100
rResp.Result = append(rResp.Result, temp)
}
return &rResp, nil
}
setValue := make(map[string]*server_api_params.KeyValue)
for _, v := range req.ReactionExtensionList {
temp := new(server_api_params.KeyValue)
if vv, ok := mongoValue.ReactionExtensionList[v.TypeKey]; ok {
utils.CopyStructFields(temp, &vv)
if v.LatestUpdateTime != vv.LatestUpdateTime {
setDeleteKeyResultInfo(&rResp, 300, "message have update", req.ClientMsgID, v.TypeKey, temp)
continue
}
} else {
setDeleteKeyResultInfo(&rResp, 400, "key not in", req.ClientMsgID, v.TypeKey, v)
continue
}
temp.TypeKey = v.TypeKey
setValue[v.TypeKey] = temp
}
err = db.DB.DeleteReactionExtendMsgSet(req.SourceID, req.SessionType, req.ClientMsgID, req.MsgFirstModifyTime, setValue)
if err != nil {
for _, value := range setValue {
temp := new(msg.KeyValueResp)
temp.KeyValue = value
temp.ErrMsg = err.Error()
temp.ErrCode = 100
rResp.Result = append(rResp.Result, temp)
}
} else {
for _, value := range setValue {
temp := new(msg.KeyValueResp)
temp.KeyValue = value
rResp.Result = append(rResp.Result, temp)
}
}
lockErr := rpc.dMessageLocker.UnLockGlobalMessage(req.ClientMsgID)
if lockErr != nil {
log.Error(req.OperationID, "UnLockGlobalMessage err:", lockErr.Error())
}
}
ExtendMessageDeleteNotification(req.OperationID, req.OpUserID, req.SourceID, req.OpUserIDPlatformID, req.SessionType, req, &rResp, false, isExists)
log.Debug(req.OperationID, utils.GetSelfFuncName(), "rpc return is:", rResp.String())
return &rResp, nil
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save