Merge branch 'openimsdk:main' into main

pull/2622/head
icey-yu 1 year ago committed by GitHub
commit 908e812aa4
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -33,8 +33,8 @@ jobs:
remote-repository-name: cla remote-repository-name: cla
create-file-commit-message: 'Creating file for storing CLA Signatures' create-file-commit-message: 'Creating file for storing CLA Signatures'
# signed-commit-message: '$contributorName has signed the CLA in $owner/$repo#$pullRequestNo' # signed-commit-message: '$contributorName has signed the CLA in $owner/$repo#$pullRequestNo'
custom-notsigned-prcomment: '💕 Thank you for your contribution and please kindly read and sign our [CLA Docs](https://github.com/OpenIM-Robot/cla/blob/main/README.md)' custom-notsigned-prcomment: '💕 Thank you for your contribution and please kindly read and sign our CLA. [CLA Docs](https://github.com/OpenIM-Robot/cla/blob/main/README.md)'
custom-pr-sign-comment: 'The signature to be committed in order to sign the CLA' custom-pr-sign-comment: 'I have read the CLA Document and I hereby sign the CLA'
custom-allsigned-prcomment: '🤖 All Contributors have signed the [CLA](https://github.com/OpenIM-Robot/cla/blob/main/README.md).<br> The signed information is recorded [🤖here](https://github.com/openim-sigs/cla/tree/main/signatures/cla.json)' custom-allsigned-prcomment: '🤖 All Contributors have signed the [CLA](https://github.com/OpenIM-Robot/cla/blob/main/README.md).<br> The signed information is recorded [**here**](https://github.com/OpenIM-Robot/cla/blob/main/signatures/cla.json)'
#lock-pullrequest-aftermerge: false - if you don't want this bot to automatically lock the pull request after merging (default - true) #lock-pullrequest-aftermerge: false - if you don't want this bot to automatically lock the pull request after merging (default - true)
#use-dco-flag: true - If you are using DCO instead of CLA #use-dco-flag: true - If you are using DCO instead of CLA

@ -7,6 +7,9 @@ on:
pull_request: pull_request:
branches: branches:
- main - main
paths-ignore:
- '**/*.md'
workflow_dispatch: workflow_dispatch:
jobs: jobs:

@ -29,7 +29,7 @@ jobs:
uses: peter-evans/create-or-update-comment@v4 uses: peter-evans/create-or-update-comment@v4
with: with:
issue-number: ${{ github.event.issue.number }} issue-number: ${{ github.event.issue.number }}
token: ${{ secrets.BOT_GITHUB_TOKEN }} token: ${{ secrets.BOT_TOKEN }}
body: | body: |
This issue is available for anyone to work on. **Make sure to reference this issue in your pull request.** :sparkles: Thank you for your contribution! :sparkles: This issue is available for anyone to work on. **Make sure to reference this issue in your pull request.** :sparkles: Thank you for your contribution! :sparkles:
[Join slack 🤖](https://join.slack.com/t/openimsdk/shared_invite/zt-22720d66b-o_FvKxMTGXtcnnnHiMqe9Q) to connect and communicate with our developers. [Join slack 🤖](https://join.slack.com/t/openimsdk/shared_invite/zt-22720d66b-o_FvKxMTGXtcnnnHiMqe9Q) to connect and communicate with our developers.

@ -0,0 +1,19 @@
name: 'issue-translator'
on:
issue_comment:
types: [created]
issues:
types: [opened]
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: usthe/issues-translate-action@v2.7
with:
BOT_GITHUB_TOKEN: ${{ secrets.BOT_TOKEN }}
IS_MODIFY_TITLE: true
# not require, default false, . Decide whether to modify the issue title
# if true, the robot account @Issues-translate-bot must have modification permissions, invite @Issues-translate-bot to your project or use your custom bot.
CUSTOM_BOT_NOTE: Bot detected the issue body's language is not English, translate it automatically. 👯👭🏻🧑‍🤝‍🧑👫🧑🏿‍🤝‍🧑🏻👩🏾‍🤝‍👨🏿👬🏿
# not require. Customize the translation robot prefix message.

@ -1,4 +1,4 @@
enable: "etcd" enable: etcd
etcd: etcd:
rootDirectory: openim rootDirectory: openim
address: [ localhost:12379 ] address: [ localhost:12379 ]

File diff suppressed because it is too large Load Diff

@ -3,34 +3,38 @@ username: ''
# Password for authentication # Password for authentication
password: '' password: ''
# Producer acknowledgment settings # Producer acknowledgment settings
producerAck: "" producerAck:
# Compression type to use (e.g., none, gzip, snappy) # Compression type to use (e.g., none, gzip, snappy)
compressType: "none" compressType: none
# List of Kafka broker addresses # List of Kafka broker addresses
address: [ localhost:19094 ] address: [ localhost:19094 ]
# Kafka topic for Redis integration # Kafka topic for Redis integration
toRedisTopic: "toRedis" toRedisTopic: toRedis
# Kafka topic for MongoDB integration # Kafka topic for MongoDB integration
toMongoTopic: "toMongo" toMongoTopic: toMongo
# Kafka topic for push notifications # Kafka topic for push notifications
toPushTopic: "toPush" toPushTopic: toPush
# Kafka topic for offline push notifications
toOfflinePushTopic: toOfflinePush
# Consumer group ID for Redis topic # Consumer group ID for Redis topic
toRedisGroupID: redis toRedisGroupID: redis
# Consumer group ID for MongoDB topic # Consumer group ID for MongoDB topic
toMongoGroupID: mongo toMongoGroupID: mongo
# Consumer group ID for push notifications topic # Consumer group ID for push notifications topic
toPushGroupID: push toPushGroupID: push
# Consumer group ID for offline push notifications topic
toOfflinePushGroupID: offlinePush
# TLS (Transport Layer Security) configuration # TLS (Transport Layer Security) configuration
tls: tls:
# Enable or disable TLS # Enable or disable TLS
enableTLS: false enableTLS: false
# CA certificate file path # CA certificate file path
caCrt: "" caCrt:
# Client certificate file path # Client certificate file path
clientCrt: "" clientCrt:
# Client key file path # Client key file path
clientKey: "" clientKey:
# Client key password # Client key password
clientKeyPwd: "" clientKeyPwd:
# Whether to skip TLS verification (not recommended for production) # Whether to skip TLS verification (not recommended for production)
insecureSkipVerify: false insecureSkipVerify: false

@ -1,15 +1,15 @@
# Name of the bucket in MinIO # Name of the bucket in MinIO
bucket: "openim" bucket: openim
# Access key ID for MinIO authentication # Access key ID for MinIO authentication
accessKeyID: "root" accessKeyID: root
# Secret access key for MinIO authentication # Secret access key for MinIO authentication
secretAccessKey: "openIM123" secretAccessKey: openIM123
# Session token for MinIO authentication (optional) # Session token for MinIO authentication (optional)
sessionToken: '' sessionToken:
# Internal address of the MinIO server # Internal address of the MinIO server
internalAddress: "localhost:10005" internalAddress: localhost:10005
# External address of the MinIO server, accessible from outside. Supports both HTTP and HTTPS using a domain name # External address of the MinIO server, accessible from outside. Supports both HTTP and HTTPS using a domain name
externalAddress: "http://external_ip:10005" externalAddress: http://external_ip:10005
# Flag to enable or disable public read access to the bucket # Flag to enable or disable public read access to the bucket
publicRead: false publicRead: false

@ -1,5 +1,5 @@
# URI for database connection, leave empty if using address and credential settings directly # URI for database connection, leave empty if using address and credential settings directly
uri: '' uri:
# List of MongoDB server addresses # List of MongoDB server addresses
address: [ localhost:37017 ] address: [ localhost:37017 ]
# Name of the database # Name of the database

@ -28,11 +28,11 @@ groupCreated:
# Enables or disables offline push notifications. # Enables or disables offline push notifications.
enable: false enable: false
# Title for the notification when a group is created. # Title for the notification when a group is created.
title: "create group title" title: create group title
# Description for the notification. # Description for the notification.
desc: "create group desc" desc: create group desc
# Additional information for the notification. # Additional information for the notification.
ext: "create group ext" ext: create group ext
groupInfoSet: groupInfoSet:
isSendMsg: false isSendMsg: false
@ -40,9 +40,9 @@ groupInfoSet:
unreadCount: false unreadCount: false
offlinePush: offlinePush:
enable: false enable: false
title: "groupInfoSet title" title: groupInfoSet title
desc: "groupInfoSet desc" desc: groupInfoSet desc
ext: "groupInfoSet ext" ext: groupInfoSet ext
joinGroupApplication: joinGroupApplication:
@ -51,9 +51,9 @@ joinGroupApplication:
unreadCount: false unreadCount: false
offlinePush: offlinePush:
enable: false enable: false
title: "joinGroupApplication title" title: joinGroupApplication title
desc: "joinGroupApplication desc" desc: joinGroupApplication desc
ext: "joinGroupApplication ext" ext: joinGroupApplication ext
memberQuit: memberQuit:
isSendMsg: true isSendMsg: true
@ -61,9 +61,9 @@ memberQuit:
unreadCount: false unreadCount: false
offlinePush: offlinePush:
enable: false enable: false
title: "memberQuit title" title: memberQuit title
desc: "memberQuit desc" desc: memberQuit desc
ext: "memberQuit ext" ext: memberQuit ext
groupApplicationAccepted: groupApplicationAccepted:
isSendMsg: false isSendMsg: false
@ -71,9 +71,9 @@ groupApplicationAccepted:
unreadCount: false unreadCount: false
offlinePush: offlinePush:
enable: false enable: false
title: "groupApplicationAccepted title" title: groupApplicationAccepted title
desc: "groupApplicationAccepted desc" desc: groupApplicationAccepted desc
ext: "groupApplicationAccepted ext" ext: groupApplicationAccepted ext
groupApplicationRejected: groupApplicationRejected:
isSendMsg: false isSendMsg: false
@ -81,9 +81,9 @@ groupApplicationRejected:
unreadCount: false unreadCount: false
offlinePush: offlinePush:
enable: false enable: false
title: "groupApplicationRejected title" title: groupApplicationRejected title
desc: "groupApplicationRejected desc" desc: groupApplicationRejected desc
ext: "groupApplicationRejected ext" ext: groupApplicationRejected ext
groupOwnerTransferred: groupOwnerTransferred:
@ -92,9 +92,9 @@ groupOwnerTransferred:
unreadCount: false unreadCount: false
offlinePush: offlinePush:
enable: false enable: false
title: "groupOwnerTransferred title" title: groupOwnerTransferred title
desc: "groupOwnerTransferred desc" desc: groupOwnerTransferred desc
ext: "groupOwnerTransferred ext" ext: groupOwnerTransferred ext
memberKicked: memberKicked:
isSendMsg: true isSendMsg: true
@ -102,9 +102,9 @@ memberKicked:
unreadCount: false unreadCount: false
offlinePush: offlinePush:
enable: false enable: false
title: "memberKicked title" title: memberKicked title
desc: "memberKicked desc" desc: memberKicked desc
ext: "memberKicked ext" ext: memberKicked ext
memberInvited: memberInvited:
isSendMsg: true isSendMsg: true
@ -112,9 +112,9 @@ memberInvited:
unreadCount: false unreadCount: false
offlinePush: offlinePush:
enable: false enable: false
title: "memberInvited title" title: memberInvited title
desc: "memberInvited desc" desc: memberInvited desc
ext: "memberInvited ext" ext: memberInvited ext
memberEnter: memberEnter:
isSendMsg: true isSendMsg: true
@ -122,9 +122,9 @@ memberEnter:
unreadCount: false unreadCount: false
offlinePush: offlinePush:
enable: false enable: false
title: "memberEnter title" title: memberEnter title
desc: "memberEnter desc" desc: memberEnter desc
ext: "memberEnter ext" ext: memberEnter ext
groupDismissed: groupDismissed:
isSendMsg: true isSendMsg: true
@ -132,9 +132,9 @@ groupDismissed:
unreadCount: false unreadCount: false
offlinePush: offlinePush:
enable: false enable: false
title: "groupDismissed title" title: groupDismissed title
desc: "groupDismissed desc" desc: groupDismissed desc
ext: "groupDismissed ext" ext: groupDismissed ext
groupMuted: groupMuted:
isSendMsg: true isSendMsg: true
@ -142,9 +142,9 @@ groupMuted:
unreadCount: false unreadCount: false
offlinePush: offlinePush:
enable: false enable: false
title: "groupMuted title" title: groupMuted title
desc: "groupMuted desc" desc: groupMuted desc
ext: "groupMuted ext" ext: groupMuted ext
groupCancelMuted: groupCancelMuted:
isSendMsg: true isSendMsg: true
@ -152,11 +152,11 @@ groupCancelMuted:
unreadCount: false unreadCount: false
offlinePush: offlinePush:
enable: false enable: false
title: "groupCancelMuted title" title: groupCancelMuted title
desc: "groupCancelMuted desc" desc: groupCancelMuted desc
ext: "groupCancelMuted ext" ext: groupCancelMuted ext
defaultTips: defaultTips:
tips: "group Cancel Muted" tips: group Cancel Muted
groupMemberMuted: groupMemberMuted:
@ -165,9 +165,9 @@ groupMemberMuted:
unreadCount: false unreadCount: false
offlinePush: offlinePush:
enable: false enable: false
title: "groupMemberMuted title" title: groupMemberMuted title
desc: "groupMemberMuted desc" desc: groupMemberMuted desc
ext: "groupMemberMuted ext" ext: groupMemberMuted ext
groupMemberCancelMuted: groupMemberCancelMuted:
isSendMsg: true isSendMsg: true
@ -175,9 +175,9 @@ groupMemberCancelMuted:
unreadCount: false unreadCount: false
offlinePush: offlinePush:
enable: false enable: false
title: "groupMemberCancelMuted title" title: groupMemberCancelMuted title
desc: "groupMemberCancelMuted desc" desc: groupMemberCancelMuted desc
ext: "groupMemberCancelMuted ext" ext: groupMemberCancelMuted ext
groupMemberInfoSet: groupMemberInfoSet:
isSendMsg: false isSendMsg: false
@ -185,9 +185,9 @@ groupMemberInfoSet:
unreadCount: false unreadCount: false
offlinePush: offlinePush:
enable: false enable: false
title: "groupMemberInfoSet title" title: groupMemberInfoSet title
desc: "groupMemberInfoSet desc" desc: groupMemberInfoSet desc
ext: "groupMemberInfoSet ext" ext: groupMemberInfoSet ext
groupInfoSetAnnouncement: groupInfoSetAnnouncement:
isSendMsg: true isSendMsg: true
@ -195,9 +195,9 @@ groupInfoSetAnnouncement:
unreadCount: false unreadCount: false
offlinePush: offlinePush:
enable: false enable: false
title: "groupInfoSetAnnouncement title" title: groupInfoSetAnnouncement title
desc: "groupInfoSetAnnouncement desc" desc: groupInfoSetAnnouncement desc
ext: "groupInfoSetAnnouncement ext" ext: groupInfoSetAnnouncement ext
groupInfoSetName: groupInfoSetName:
@ -206,9 +206,9 @@ groupInfoSetName:
unreadCount: false unreadCount: false
offlinePush: offlinePush:
enable: false enable: false
title: "groupInfoSetName title" title: groupInfoSetName title
desc: "groupInfoSetName desc" desc: groupInfoSetName desc
ext: "groupInfoSetName ext" ext: groupInfoSetName ext
#############################friend################################# #############################friend#################################
@ -218,9 +218,9 @@ friendApplicationAdded:
unreadCount: false unreadCount: false
offlinePush: offlinePush:
enable: false enable: false
title: "Somebody applies to add you as a friend" title: Somebody applies to add you as a friend
desc: "Somebody applies to add you as a friend" desc: Somebody applies to add you as a friend
ext: "Somebody applies to add you as a friend" ext: Somebody applies to add you as a friend
friendApplicationApproved: friendApplicationApproved:
isSendMsg: true isSendMsg: true
@ -228,9 +228,9 @@ friendApplicationApproved:
unreadCount: false unreadCount: false
offlinePush: offlinePush:
enable: true enable: true
title: "Someone applies to add your friend application" title: Someone applies to add your friend application
desc: "Someone applies to add your friend application" desc: Someone applies to add your friend application
ext: "Someone applies to add your friend application" ext: Someone applies to add your friend application
friendApplicationRejected: friendApplicationRejected:
isSendMsg: false isSendMsg: false
@ -238,9 +238,9 @@ friendApplicationRejected:
unreadCount: false unreadCount: false
offlinePush: offlinePush:
enable: true enable: true
title: "Someone rejected your friend application" title: Someone rejected your friend application
desc: "Someone rejected your friend application" desc: Someone rejected your friend application
ext: "Someone rejected your friend application" ext: Someone rejected your friend application
friendAdded: friendAdded:
isSendMsg: false isSendMsg: false
@ -248,9 +248,9 @@ friendAdded:
unreadCount: false unreadCount: false
offlinePush: offlinePush:
enable: true enable: true
title: "We have become friends" title: We have become friends
desc: "We have become friends" desc: We have become friends
ext: "We have become friends" ext: We have become friends
friendDeleted: friendDeleted:
isSendMsg: false isSendMsg: false
@ -258,9 +258,9 @@ friendDeleted:
unreadCount: false unreadCount: false
offlinePush: offlinePush:
enable: true enable: true
title: "deleted a friend" title: deleted a friend
desc: "deleted a friend" desc: deleted a friend
ext: "deleted a friend" ext: deleted a friend
friendRemarkSet: friendRemarkSet:
isSendMsg: false isSendMsg: false
@ -268,9 +268,9 @@ friendRemarkSet:
unreadCount: false unreadCount: false
offlinePush: offlinePush:
enable: true enable: true
title: "Your friend's profile has been changed" title: Your friend's profile has been changed
desc: "Your friend's profile has been changed" desc: Your friend's profile has been changed
ext: "Your friend's profile has been changed" ext: Your friend's profile has been changed
blackAdded: blackAdded:
isSendMsg: false isSendMsg: false
@ -278,9 +278,9 @@ blackAdded:
unreadCount: false unreadCount: false
offlinePush: offlinePush:
enable: true enable: true
title: "blocked a user" title: blocked a user
desc: "blocked a user" desc: blocked a user
ext: "blocked a user" ext: blocked a user
blackDeleted: blackDeleted:
isSendMsg: false isSendMsg: false
@ -288,9 +288,9 @@ blackDeleted:
unreadCount: false unreadCount: false
offlinePush: offlinePush:
enable: true enable: true
title: "Remove a blocked user" title: Remove a blocked user
desc: "Remove a blocked user" desc: Remove a blocked user
ext: "Remove a blocked user" ext: Remove a blocked user
friendInfoUpdated: friendInfoUpdated:
isSendMsg: false isSendMsg: false
@ -298,9 +298,9 @@ friendInfoUpdated:
unreadCount: false unreadCount: false
offlinePush: offlinePush:
enable: true enable: true
title: "friend info updated" title: friend info updated
desc: "friend info updated" desc: friend info updated
ext: "friend info updated" ext: friend info updated
#####################user######################### #####################user#########################
userInfoUpdated: userInfoUpdated:
@ -309,9 +309,9 @@ userInfoUpdated:
unreadCount: false unreadCount: false
offlinePush: offlinePush:
enable: true enable: true
title: "Remove a blocked user" title: Remove a blocked user
desc: "Remove a blocked user" desc: Remove a blocked user
ext: "Remove a blocked user" ext: Remove a blocked user
userStatusChanged: userStatusChanged:
isSendMsg: false isSendMsg: false
@ -319,9 +319,9 @@ userStatusChanged:
unreadCount: false unreadCount: false
offlinePush: offlinePush:
enable: false enable: false
title: "user status changed" title: user status changed
desc: "user status changed" desc: user status changed
ext: "user status changed" ext: user status changed
#####################conversation######################### #####################conversation#########################
conversationChanged: conversationChanged:
@ -330,9 +330,9 @@ conversationChanged:
unreadCount: false unreadCount: false
offlinePush: offlinePush:
enable: true enable: true
title: "conversation changed" title: conversation changed
desc: "conversation changed" desc: conversation changed
ext: "conversation changed" ext: conversation changed
conversationSetPrivate: conversationSetPrivate:
isSendMsg: true isSendMsg: true
@ -340,6 +340,6 @@ conversationSetPrivate:
unreadCount: false unreadCount: false
offlinePush: offlinePush:
enable: true enable: true
title: "burn after reading" title: burn after reading
desc: "burn after reading" desc: burn after reading
ext: "burn after reading" ext: burn after reading

@ -3,11 +3,14 @@ api:
listenIP: 0.0.0.0 listenIP: 0.0.0.0
# Listening ports; if multiple are configured, multiple instances will be launched, must be consistent with the number of prometheus.ports # Listening ports; if multiple are configured, multiple instances will be launched, must be consistent with the number of prometheus.ports
ports: [ 10002 ] ports: [ 10002 ]
# API compression level; 0: default compression, 1: best compression, 2: best speed, -1: no compression
compressionLevel: 0
prometheus: prometheus:
# Whether to enable prometheus # Whether to enable prometheus
enable: true enable: true
# Prometheus listening ports, must match the number of api.ports # Prometheus listening ports, must match the number of api.ports
ports: [ 20113 ] ports: [ 12002 ]
# This address can be accessed via a browser # This address can be accessed via a browser
grafanaURL: http://127.0.0.1:13000/ grafanaURL: http://127.0.0.1:13000/

@ -1,3 +1,3 @@
cronExecuteTime: "0 2 * * *" cronExecuteTime: 0 2 * * *
retainChatRecords: 365 retainChatRecords: 365
fileExpireTime: 90 fileExpireTime: 90

@ -1,14 +1,14 @@
rpc: rpc:
# The IP address where this RPC service registers itself; if left blank, it defaults to the internal network IP # The IP address where this RPC service registers itself; if left blank, it defaults to the internal network IP
registerIP: '' registerIP:
# List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports # List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports
ports: [ 10140 ] ports: [ 10140, 10141, 10142, 10143, 10144, 10145, 10146, 10147, 10148, 10149, 10150, 10151, 10152, 10153, 10154, 10155 ]
prometheus: prometheus:
# Enable or disable Prometheus monitoring # Enable or disable Prometheus monitoring
enable: true enable: true
# List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup # List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup
ports: [ 20112 ] ports: [ 12140, 12141, 12142, 12143, 12144, 12145, 12146, 12147, 12148, 12149, 12150, 12151, 12152, 12153, 12154, 12155 ]
# IP address that the RPC/WebSocket service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP # IP address that the RPC/WebSocket service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP
listenIP: 0.0.0.0 listenIP: 0.0.0.0
@ -25,6 +25,3 @@ longConnSvr:
# 1: For Android, iOS, Windows, Mac, and web platforms, only one instance can be online at a time # 1: For Android, iOS, Windows, Mac, and web platforms, only one instance can be online at a time
multiLoginPolicy: 1 multiLoginPolicy: 1

@ -3,4 +3,4 @@ prometheus:
enable: true enable: true
# List of ports that Prometheus listens on; each port corresponds to an instance of monitoring. Ensure these are managed accordingly # List of ports that Prometheus listens on; each port corresponds to an instance of monitoring. Ensure these are managed accordingly
# Because four instances have been launched, four ports need to be specified # Because four instances have been launched, four ports need to be specified
ports: [ 20108, 20109, 20110, 20111 ] ports: [ 12020, 12021, 12022, 12023, 12024, 12025, 12026, 12027 ]

@ -1,46 +1,41 @@
rpc: rpc:
# The IP address where this RPC service registers itself; if left blank, it defaults to the internal network IP # The IP address where this RPC service registers itself; if left blank, it defaults to the internal network IP
registerIP: '' registerIP:
# IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP # IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP
listenIP: 0.0.0.0 listenIP: 0.0.0.0
# List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports # List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports
ports: [ 10170 ] ports: [ 10170, 10171, 10172, 10173, 10174, 10175, 10176, 10177, 10178, 10179, 10180, 10181, 10182, 10183, 10184, 10185 ]
prometheus: prometheus:
# Enable or disable Prometheus monitoring # Enable or disable Prometheus monitoring
enable: true enable: true
# List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup # List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup
ports: [ 20107 ] ports: [ 12170, 12171, 12172, 12173, 12174, 12175, 12176, 12177, 12178, 12179, 12180, 12181, 12182, 12183, 12184, 12185 ]
maxConcurrentWorkers: 3 maxConcurrentWorkers: 3
#"Use geTui for offline push notifications, or choose fcm or jpns; corresponding configuration settings must be specified." #Use geTui for offline push notifications, or choose fcm or jpns; corresponding configuration settings must be specified.
enable: "geTui" enable: geTui
geTui: geTui:
pushUrl: "https://restapi.getui.com/v2/$appId" pushUrl: https://restapi.getui.com/v2/$appId
masterSecret: '' masterSecret:
appKey: '' appKey:
intent: '' intent:
channelID: '' channelID:
channelName: '' channelName:
fcm: fcm:
# Prioritize using file paths. If the file path is empty, use URL # Prioritize using file paths. If the file path is empty, use URL
filePath: "" # File path is concatenated with the parameters passed in through - c(`mage` default pass in `config/`) and filePath. filePath: # File path is concatenated with the parameters passed in through - c(`mage` default pass in `config/`) and filePath.
authURL: "" # Must start with https or http. authURL: # Must start with https or http.
jpns: jpns:
appKey: '' appKey:
masterSecret: '' masterSecret:
pushURL: '' pushURL:
pushIntent: '' pushIntent:
# iOS system push sound and badge count # iOS system push sound and badge count
iosPush: iosPush:
pushSound: "xxx" pushSound: xxx
badgeCount: true badgeCount: true
production: false production: false
fullUserCache: true

@ -1,18 +1,17 @@
rpc: rpc:
# The IP address where this RPC service registers itself; if left blank, it defaults to the internal network IP # The IP address where this RPC service registers itself; if left blank, it defaults to the internal network IP
registerIP: '' registerIP:
# IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP # IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP
listenIP: 0.0.0.0 listenIP: 0.0.0.0
# List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports # List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports
ports: [ 10160 ] ports: [ 10200 ]
prometheus: prometheus:
# Enable or disable Prometheus monitoring # Enable or disable Prometheus monitoring
enable: true enable: true
# List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup # List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup
ports: [ 20106 ] ports: [ 12200 ]
tokenPolicy: tokenPolicy:
# Token validity period, in days # Token validity period, in days
expire: 90 expire: 90

@ -1,13 +1,13 @@
rpc: rpc:
# The IP address where this RPC service registers itself; if left blank, it defaults to the internal network IP # The IP address where this RPC service registers itself; if left blank, it defaults to the internal network IP
registerIP: '' registerIP:
# IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP # IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP
listenIP: 0.0.0.0 listenIP: 0.0.0.0
# List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports # List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports
ports: [ 10180 ] ports: [ 10220 ]
prometheus: prometheus:
# Enable or disable Prometheus monitoring # Enable or disable Prometheus monitoring
enable: true enable: true
# List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup # List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup
ports: [ 20105 ] ports: [ 12220 ]

@ -1,13 +1,13 @@
rpc: rpc:
# The IP address where this RPC service registers itself; if left blank, it defaults to the internal network IP # The IP address where this RPC service registers itself; if left blank, it defaults to the internal network IP
registerIP: '' registerIP:
# IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP # IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP
listenIP: 0.0.0.0 listenIP: 0.0.0.0
# List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports # List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports
ports: [ 10120 ] ports: [ 10240 ]
prometheus: prometheus:
# Enable or disable Prometheus monitoring # Enable or disable Prometheus monitoring
enable: true enable: true
# List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup # List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup
ports: [ 20104 ] ports: [ 12240 ]

@ -1,13 +1,16 @@
rpc: rpc:
# The IP address where this RPC service registers itself; if left blank, it defaults to the internal network IP # The IP address where this RPC service registers itself; if left blank, it defaults to the internal network IP
registerIP: '' registerIP:
# IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP # IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP
listenIP: 0.0.0.0 listenIP: 0.0.0.0
# List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports # List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports
ports: [ 10150 ] ports: [ 10260 ]
prometheus: prometheus:
# Enable or disable Prometheus monitoring # Enable or disable Prometheus monitoring
enable: true enable: true
# List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup # List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup
ports: [ 20103 ] ports: [ 12260 ]
enableHistoryForNewMembers: true

@ -1,20 +1,17 @@
rpc: rpc:
# The IP address where this RPC service registers itself; if left blank, it defaults to the internal network IP # The IP address where this RPC service registers itself; if left blank, it defaults to the internal network IP
registerIP: '' registerIP:
# IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP # IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP
listenIP: 0.0.0.0 listenIP: 0.0.0.0
# List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports # List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports
ports: [ 10130 ] ports: [ 10280 ]
prometheus: prometheus:
# Enable or disable Prometheus monitoring # Enable or disable Prometheus monitoring
enable: true enable: true
# List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup # List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup
ports: [ 20102 ] ports: [ 12280 ]
# Does sending messages require friend verification # Does sending messages require friend verification
friendVerify: false friendVerify: false

@ -1,40 +1,40 @@
rpc: rpc:
# The IP address where this RPC service registers itself; if left blank, it defaults to the internal network IP # The IP address where this RPC service registers itself; if left blank, it defaults to the internal network IP
registerIP: '' registerIP:
# IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP # IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP
listenIP: 0.0.0.0 listenIP: 0.0.0.0
# List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports # List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports
ports: [ 10190 ] ports: [ 10300 ]
prometheus: prometheus:
# Enable or disable Prometheus monitoring # Enable or disable Prometheus monitoring
enable: true enable: true
# List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup # List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup
ports: [ 20101 ] ports: [ 12300 ]
object: object:
# Use MinIO as object storage, or set to "cos", "oss", "kodo", "aws", while also configuring the corresponding settings # Use MinIO as object storage, or set to "cos", "oss", "kodo", "aws", while also configuring the corresponding settings
enable: "minio" enable: minio
cos: cos:
bucketURL: https://temp-1252357374.cos.ap-chengdu.myqcloud.com bucketURL: https://temp-1252357374.cos.ap-chengdu.myqcloud.com
secretID: '' secretID:
secretKey: '' secretKey:
sessionToken: '' sessionToken:
publicRead: false publicRead: false
oss: oss:
endpoint: "https://oss-cn-chengdu.aliyuncs.com" endpoint: https://oss-cn-chengdu.aliyuncs.com
bucket: "demo-9999999" bucket: demo-9999999
bucketURL: "https://demo-9999999.oss-cn-chengdu.aliyuncs.com" bucketURL: https://demo-9999999.oss-cn-chengdu.aliyuncs.com
accessKeyID: '' accessKeyID:
accessKeySecret: '' accessKeySecret:
sessionToken: '' sessionToken:
publicRead: false publicRead: false
kodo: kodo:
endpoint: "http://s3.cn-south-1.qiniucs.com" endpoint: http://s3.cn-south-1.qiniucs.com
bucket: "kodo-bucket-test" bucket: kodo-bucket-test
bucketURL: "http://kodo-bucket-test-oetobfb.qiniudns.com" bucketURL: http://kodo-bucket-test-oetobfb.qiniudns.com
accessKeyID: '' accessKeyID:
accessKeySecret: '' accessKeySecret:
sessionToken: '' sessionToken:
publicRead: false publicRead: false

@ -1,17 +1,13 @@
rpc: rpc:
# API or other RPCs can access this RPC through this IP; if left blank, the internal network IP is obtained by default # API or other RPCs can access this RPC through this IP; if left blank, the internal network IP is obtained by default
registerIP: '' registerIP:
# Listening IP; 0.0.0.0 means both internal and external IPs are listened to, if blank, the internal network IP is automatically obtained by default # Listening IP; 0.0.0.0 means both internal and external IPs are listened to, if blank, the internal network IP is automatically obtained by default
listenIP: 0.0.0.0 listenIP: 0.0.0.0
# Listening ports; if multiple are configured, multiple instances will be launched, and must be consistent with the number of prometheus.ports # Listening ports; if multiple are configured, multiple instances will be launched, and must be consistent with the number of prometheus.ports
ports: [ 10110 ] ports: [ 10320 ]
prometheus: prometheus:
# Whether to enable prometheus # Whether to enable prometheus
enable: true enable: true
# Prometheus listening ports, must be consistent with the number of rpc.ports # Prometheus listening ports, must be consistent with the number of rpc.ports
ports: [ 20100 ] ports: [ 12320 ]

@ -8,76 +8,79 @@ global:
alerting: alerting:
alertmanagers: alertmanagers:
- static_configs: - static_configs:
- targets: ['internal_ip:19093'] - targets: [internal_ip:19093]
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'. # Load rules once and periodically evaluate them according to the global evaluation_interval.
rule_files: rule_files:
- "instance-down-rules.yml" - instance-down-rules.yml
# - "first_rules.yml" # - first_rules.yml
# - "second_rules.yml" # - second_rules.yml
# A scrape configuration containing exactly one endpoint to scrape: # A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself. # Here it's Prometheus itself.
scrape_configs: scrape_configs:
# The job name is added as a label "job='job_name'"" to any timeseries scraped from this config. # The job name is added as a label "job=job_name" to any timeseries scraped from this config.
# Monitored information captured by prometheus # Monitored information captured by prometheus
# prometheus fetches application services # prometheus fetches application services
- job_name: 'node_exporter' - job_name: node_exporter
static_configs: static_configs:
- targets: [ 'internal_ip:20114' ] - targets: [ internal_ip:20500 ]
- job_name: 'openimserver-openim-api' - job_name: openimserver-openim-api
static_configs: static_configs:
- targets: [ 'internal_ip:20113' ] - targets: [ internal_ip:12002 ]
labels: labels:
namespace: 'default' namespace: default
- job_name: 'openimserver-openim-msggateway' - job_name: openimserver-openim-msggateway
static_configs: static_configs:
- targets: [ 'internal_ip:20112' ] - targets: [ internal_ip:12140 ]
# - targets: [ internal_ip:12140, internal_ip:12141, internal_ip:12142, internal_ip:12143, internal_ip:12144, internal_ip:12145, internal_ip:12146, internal_ip:12147, internal_ip:12148, internal_ip:12149, internal_ip:12150, internal_ip:12151, internal_ip:12152, internal_ip:12153, internal_ip:12154, internal_ip:12155 ]
labels: labels:
namespace: 'default' namespace: default
- job_name: 'openimserver-openim-msgtransfer' - job_name: openimserver-openim-msgtransfer
static_configs: static_configs:
- targets: [ 'internal_ip:20111', 'internal_ip:20110', 'internal_ip:20109', 'internal_ip:20108' ] - targets: [ internal_ip:12020, internal_ip:12021, internal_ip:12022, internal_ip:12023, internal_ip:12024, internal_ip:12025, internal_ip:12026, internal_ip:12027 ]
# - targets: [ internal_ip:12020, internal_ip:12021, internal_ip:12022, internal_ip:12023, internal_ip:12024, internal_ip:12025, internal_ip:12026, internal_ip:12027, internal_ip:12028, internal_ip:12029, internal_ip:12030, internal_ip:12031, internal_ip:12032, internal_ip:12033, internal_ip:12034, internal_ip:12035 ]
labels: labels:
namespace: 'default' namespace: default
- job_name: 'openimserver-openim-push' - job_name: openimserver-openim-push
static_configs: static_configs:
- targets: [ 'internal_ip:20107' ] - targets: [ internal_ip:12170, internal_ip:12171, internal_ip:12172, internal_ip:12173, internal_ip:12174, internal_ip:12175, internal_ip:12176, internal_ip:12177 ]
# - targets: [ internal_ip:12170, internal_ip:12171, internal_ip:12172, internal_ip:12173, internal_ip:12174, internal_ip:12175, internal_ip:12176, internal_ip:12177, internal_ip:12178, internal_ip:12179, internal_ip:12180, internal_ip:12181, internal_ip:12182, internal_ip:12183, internal_ip:12184, internal_ip:12185 ]
labels: labels:
namespace: 'default' namespace: default
- job_name: 'openimserver-openim-rpc-auth' - job_name: openimserver-openim-rpc-auth
static_configs: static_configs:
- targets: [ 'internal_ip:20106' ] - targets: [ internal_ip:12200 ]
labels: labels:
namespace: 'default' namespace: default
- job_name: 'openimserver-openim-rpc-conversation' - job_name: openimserver-openim-rpc-conversation
static_configs: static_configs:
- targets: [ 'internal_ip:20105' ] - targets: [ internal_ip:12220 ]
labels: labels:
namespace: 'default' namespace: default
- job_name: 'openimserver-openim-rpc-friend' - job_name: openimserver-openim-rpc-friend
static_configs: static_configs:
- targets: [ 'internal_ip:20104' ] - targets: [ internal_ip:12240 ]
labels: labels:
namespace: 'default' namespace: default
- job_name: 'openimserver-openim-rpc-group' - job_name: openimserver-openim-rpc-group
static_configs: static_configs:
- targets: [ 'internal_ip:20103' ] - targets: [ internal_ip:12260 ]
labels: labels:
namespace: 'default' namespace: default
- job_name: 'openimserver-openim-rpc-msg' - job_name: openimserver-openim-rpc-msg
static_configs: static_configs:
- targets: [ 'internal_ip:20102' ] - targets: [ internal_ip:12280 ]
labels: labels:
namespace: 'default' namespace: default
- job_name: 'openimserver-openim-rpc-third' - job_name: openimserver-openim-rpc-third
static_configs: static_configs:
- targets: [ 'internal_ip:20101' ] - targets: [ internal_ip:12300 ]
labels: labels:
namespace: 'default' namespace: default
- job_name: 'openimserver-openim-rpc-user' - job_name: openimserver-openim-rpc-user
static_configs: static_configs:
- targets: [ 'internal_ip:20100' ] - targets: [ internal_ip:12320 ]
labels: labels:
namespace: 'default' namespace: default

@ -1,6 +1,7 @@
address: [ localhost:16379 ] address: [ localhost:16379 ]
username: '' username:
password: openIM123 password: openIM123
clusterMode: false clusterMode: false
db: 0 db: 0
maxRetry: 10 maxRetry: 10
poolSize: 100

@ -10,5 +10,5 @@ rpcRegisterName:
conversation: conversation conversation: conversation
third: third third: third
imAdminUserID: [ "imAdmin" ] imAdminUserID: [ imAdmin ]

@ -1,4 +1,4 @@
url: "webhook://127.0.0.1:10008/callbackExample" url: webhook://127.0.0.1:10008/callbackExample
beforeSendSingleMsg: beforeSendSingleMsg:
enable: false enable: false
timeout: 5 timeout: 5
@ -130,6 +130,13 @@ beforeSetGroupInfo:
enable: false enable: false
timeout: 5 timeout: 5
failedContinue: true failedContinue: true
afterSetGroupInfoEX:
enable: false
timeout: 5
beforeSetGroupInfoEX:
enable: false
timeout: 5
failedContinue: true
afterRevokeMsg: afterRevokeMsg:
enable: false enable: false
timeout: 5 timeout: 5

@ -140,50 +140,49 @@ services:
networks: networks:
- openim - openim
prometheus: # prometheus:
image: ${PROMETHEUS_IMAGE} # image: ${PROMETHEUS_IMAGE}
container_name: prometheus # container_name: prometheus
restart: always # restart: always
volumes: # volumes:
- ./config/prometheus.yml:/etc/prometheus/prometheus.yml # - ./config/prometheus.yml:/etc/prometheus/prometheus.yml
- ./config/instance-down-rules.yml:/etc/prometheus/instance-down-rules.yml # - ./config/instance-down-rules.yml:/etc/prometheus/instance-down-rules.yml
- ${DATA_DIR}/components/prometheus/data:/prometheus # - ${DATA_DIR}/components/prometheus/data:/prometheus
command: # command:
- '--config.file=/etc/prometheus/prometheus.yml' # - '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus' # - '--storage.tsdb.path=/prometheus'
ports: # ports:
- "19091:9090" # - "19091:9090"
networks: # networks:
- openim # - openim
#
alertmanager: # alertmanager:
image: ${ALERTMANAGER_IMAGE} # image: ${ALERTMANAGER_IMAGE}
container_name: alertmanager # container_name: alertmanager
restart: always # restart: always
volumes: # volumes:
- ./config/alertmanager.yml:/etc/alertmanager/alertmanager.yml # - ./config/alertmanager.yml:/etc/alertmanager/alertmanager.yml
- ./config/email.tmpl:/etc/alertmanager/email.tmpl # - ./config/email.tmpl:/etc/alertmanager/email.tmpl
ports: # ports:
- "19093:9093" # - "19093:9093"
networks: # networks:
- openim # - openim
#
grafana: # grafana:
image: ${GRAFANA_IMAGE} # image: ${GRAFANA_IMAGE}
container_name: grafana # container_name: grafana
user: root # user: root
restart: always # restart: always
environment: # environment:
- GF_SECURITY_ALLOW_EMBEDDING=true # - GF_SECURITY_ALLOW_EMBEDDING=true
- GF_SESSION_COOKIE_SAMESITE=none # - GF_SESSION_COOKIE_SAMESITE=none
- GF_SESSION_COOKIE_SECURE=true # - GF_SESSION_COOKIE_SECURE=true
- GF_AUTH_ANONYMOUS_ENABLED=true # - GF_AUTH_ANONYMOUS_ENABLED=true
- GF_AUTH_ANONYMOUS_ORG_ROLE=Admin # - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
ports: # ports:
- "13000:3000" # - "13000:3000"
volumes: # volumes:
- ${DATA_DIR:-./}/components/grafana:/var/lib/grafana # - ${DATA_DIR:-./}/components/grafana:/var/lib/grafana
networks: # networks:
- openim # - openim

@ -6,21 +6,21 @@ require (
firebase.google.com/go v3.13.0+incompatible firebase.google.com/go v3.13.0+incompatible
github.com/dtm-labs/rockscache v0.1.1 github.com/dtm-labs/rockscache v0.1.1
github.com/gin-gonic/gin v1.9.1 github.com/gin-gonic/gin v1.9.1
github.com/go-playground/validator/v10 v10.18.0 github.com/go-playground/validator/v10 v10.20.0
github.com/gogo/protobuf v1.3.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.5.0 github.com/golang-jwt/jwt/v4 v4.5.0
github.com/gorilla/websocket v1.5.1 github.com/gorilla/websocket v1.5.1
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
github.com/mitchellh/mapstructure v1.5.0 github.com/mitchellh/mapstructure v1.5.0
github.com/openimsdk/protocol v0.0.69 github.com/openimsdk/protocol v0.0.72-alpha.18
github.com/openimsdk/tools v0.0.49-alpha.55 github.com/openimsdk/tools v0.0.50-alpha.12
github.com/pkg/errors v0.9.1 // indirect github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_golang v1.18.0 github.com/prometheus/client_golang v1.18.0
github.com/stretchr/testify v1.9.0 github.com/stretchr/testify v1.9.0
go.mongodb.org/mongo-driver v1.14.0 go.mongodb.org/mongo-driver v1.14.0
google.golang.org/api v0.165.0 google.golang.org/api v0.165.0
google.golang.org/grpc v1.62.1 google.golang.org/grpc v1.66.2
google.golang.org/protobuf v1.33.0 google.golang.org/protobuf v1.34.2
gopkg.in/yaml.v3 v3.0.1 gopkg.in/yaml.v3 v3.0.1
) )
@ -29,6 +29,7 @@ require github.com/google/uuid v1.6.0
require ( require (
github.com/IBM/sarama v1.43.0 github.com/IBM/sarama v1.43.0
github.com/fatih/color v1.14.1 github.com/fatih/color v1.14.1
github.com/gin-contrib/gzip v1.0.1
github.com/go-redis/redis v6.15.9+incompatible github.com/go-redis/redis v6.15.9+incompatible
github.com/go-redis/redismock/v9 v9.2.0 github.com/go-redis/redismock/v9 v9.2.0
github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/hashicorp/golang-lru/v2 v2.0.7
@ -41,13 +42,13 @@ require (
github.com/spf13/viper v1.18.2 github.com/spf13/viper v1.18.2
github.com/stathat/consistent v1.0.0 github.com/stathat/consistent v1.0.0
go.uber.org/automaxprocs v1.5.3 go.uber.org/automaxprocs v1.5.3
golang.org/x/sync v0.6.0 golang.org/x/exp v0.0.0-20230905200255-921286631fa9
golang.org/x/sync v0.8.0
) )
require ( require (
cloud.google.com/go v0.112.0 // indirect cloud.google.com/go v0.112.0 // indirect
cloud.google.com/go/compute v1.23.3 // indirect cloud.google.com/go/compute/metadata v0.3.0 // indirect
cloud.google.com/go/compute/metadata v0.2.3 // indirect
cloud.google.com/go/firestore v1.14.0 // indirect cloud.google.com/go/firestore v1.14.0 // indirect
cloud.google.com/go/iam v1.1.5 // indirect cloud.google.com/go/iam v1.1.5 // indirect
cloud.google.com/go/longrunning v0.5.4 // indirect cloud.google.com/go/longrunning v0.5.4 // indirect
@ -72,11 +73,12 @@ require (
github.com/aws/aws-sdk-go-v2/service/sts v1.25.4 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.25.4 // indirect
github.com/aws/smithy-go v1.17.0 // indirect github.com/aws/smithy-go v1.17.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect
github.com/bytedance/sonic v1.9.1 // indirect github.com/bytedance/sonic v1.11.6 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/bytedance/sonic/loader v0.1.1 // indirect
github.com/chai2010/webp v1.1.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect
github.com/clbanning/mxj v1.8.4 // indirect github.com/clbanning/mxj v1.8.4 // indirect
github.com/cloudwego/base64x v0.1.4 // indirect
github.com/cloudwego/iasm v0.2.0 // indirect
github.com/coreos/go-semver v0.3.0 // indirect github.com/coreos/go-semver v0.3.0 // indirect
github.com/coreos/go-systemd/v22 v22.3.2 // indirect github.com/coreos/go-systemd/v22 v22.3.2 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
@ -117,7 +119,7 @@ require (
github.com/json-iterator/go v1.1.12 // indirect github.com/json-iterator/go v1.1.12 // indirect
github.com/kelindar/simd v1.1.2 // indirect github.com/kelindar/simd v1.1.2 // indirect
github.com/klauspost/compress v1.17.7 // indirect github.com/klauspost/compress v1.17.7 // indirect
github.com/klauspost/cpuid/v2 v2.2.6 // indirect github.com/klauspost/cpuid/v2 v2.2.7 // indirect
github.com/leodido/go-urn v1.4.0 // indirect github.com/leodido/go-urn v1.4.0 // indirect
github.com/lestrrat-go/strftime v1.0.6 // indirect github.com/lestrrat-go/strftime v1.0.6 // indirect
github.com/lithammer/shortuuid v3.0.0+incompatible // indirect github.com/lithammer/shortuuid v3.0.0+incompatible // indirect
@ -132,7 +134,7 @@ require (
github.com/modern-go/reflect2 v1.0.2 // indirect github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe // indirect github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe // indirect
github.com/mozillazg/go-httpheader v0.4.0 // indirect github.com/mozillazg/go-httpheader v0.4.0 // indirect
github.com/pelletier/go-toml/v2 v2.1.0 // indirect github.com/pelletier/go-toml/v2 v2.2.2 // indirect
github.com/pierrec/lz4/v4 v4.1.21 // indirect github.com/pierrec/lz4/v4 v4.1.21 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/client_model v0.5.0 // indirect github.com/prometheus/client_model v0.5.0 // indirect
@ -169,18 +171,17 @@ require (
go.opentelemetry.io/otel/trace v1.23.0 // indirect go.opentelemetry.io/otel/trace v1.23.0 // indirect
go.uber.org/atomic v1.9.0 // indirect go.uber.org/atomic v1.9.0 // indirect
go.uber.org/multierr v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect
golang.org/x/arch v0.3.0 // indirect golang.org/x/arch v0.7.0 // indirect
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect
golang.org/x/image v0.15.0 // indirect golang.org/x/image v0.15.0 // indirect
golang.org/x/net v0.22.0 // indirect golang.org/x/net v0.29.0 // indirect
golang.org/x/oauth2 v0.17.0 // indirect golang.org/x/oauth2 v0.21.0 // indirect
golang.org/x/sys v0.19.0 // indirect golang.org/x/sys v0.25.0 // indirect
golang.org/x/text v0.14.0 // indirect golang.org/x/text v0.18.0 // indirect
golang.org/x/time v0.5.0 // indirect golang.org/x/time v0.5.0 // indirect
google.golang.org/appengine v1.6.8 // indirect google.golang.org/appengine v1.6.8 // indirect
google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe // indirect google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect
gorm.io/gorm v1.25.8 // indirect gorm.io/gorm v1.25.8 // indirect
stathat.com/c/consistent v1.0.0 // indirect stathat.com/c/consistent v1.0.0 // indirect
) )
@ -188,10 +189,10 @@ require (
require ( require (
github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/locales v0.14.1 // indirect
github.com/goccy/go-json v0.10.2 // indirect github.com/goccy/go-json v0.10.2 // indirect
github.com/mattn/go-isatty v0.0.19 // indirect github.com/mattn/go-isatty v0.0.20 // indirect
github.com/spf13/cobra v1.8.0 github.com/spf13/cobra v1.8.0
github.com/ugorji/go/codec v1.2.11 // indirect github.com/ugorji/go/codec v1.2.12 // indirect
go.uber.org/zap v1.24.0 // indirect go.uber.org/zap v1.24.0 // indirect
golang.org/x/crypto v0.21.0 // indirect golang.org/x/crypto v0.27.0 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect
) )

106
go.sum

@ -1,10 +1,8 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM= cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM=
cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4= cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4=
cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc=
cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
cloud.google.com/go/firestore v1.14.0 h1:8aLcKnMPoldYU3YHgu4t2exrKhLQkqaXAGqT0ljrFVw= cloud.google.com/go/firestore v1.14.0 h1:8aLcKnMPoldYU3YHgu4t2exrKhLQkqaXAGqT0ljrFVw=
cloud.google.com/go/firestore v1.14.0/go.mod h1:96MVaHLsEhbvkBEdZgfN+AS/GIkco1LRpH9Xp9YZfzQ= cloud.google.com/go/firestore v1.14.0/go.mod h1:96MVaHLsEhbvkBEdZgfN+AS/GIkco1LRpH9Xp9YZfzQ=
cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI= cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI=
@ -65,23 +63,23 @@ github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=
github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA=
github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM= github.com/bytedance/sonic v1.11.6 h1:oUp34TzMlL+OY1OUWxHqsdkgC/Zfc85zGqw9siXjrc0=
github.com/bytedance/sonic v1.9.1 h1:6iJ6NqdoxCDr6mbY8h18oSO+cShGSMRGCEo7F2h0x8s= github.com/bytedance/sonic v1.11.6/go.mod h1:LysEHSvpvDySVdC2f87zGWf6CIKJcAvqab1ZaiQtds4=
github.com/bytedance/sonic v1.9.1/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U= github.com/bytedance/sonic/loader v0.1.1 h1:c+e5Pt1k/cy5wMveRDyk2X4B9hF4g7an8N3zCYjJFNM=
github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chai2010/webp v1.1.1 h1:jTRmEccAJ4MGrhFOrPMpNGIJ/eybIgwKpcACsrTEapk=
github.com/chai2010/webp v1.1.1/go.mod h1:0XVwvZWdjjdxpUEIf7b9g9VkHFnInUSYujwqTLEuldU=
github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams=
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk=
github.com/clbanning/mxj v1.8.4 h1:HuhwZtbyvyOw+3Z1AowPkU87JkJUSv751ELWaiTpj8I= github.com/clbanning/mxj v1.8.4 h1:HuhwZtbyvyOw+3Z1AowPkU87JkJUSv751ELWaiTpj8I=
github.com/clbanning/mxj v1.8.4/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng= github.com/clbanning/mxj v1.8.4/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/0Y=
github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w=
github.com/cloudwego/iasm v0.2.0 h1:1KNIy1I1H9hNNFEEH3DVnI4UujN+1zjpuk6gwHLTssg=
github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ= github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b h1:ga8SEFjZ60pxLcmhnThWgvH2wg8376yUJmPhEH4H3kw=
github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM= github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI=
@ -123,6 +121,8 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0= github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0=
github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk= github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk=
github.com/gin-contrib/gzip v1.0.1 h1:HQ8ENHODeLY7a4g1Au/46Z92bdGFl74OhxcZble9WJE=
github.com/gin-contrib/gzip v1.0.1/go.mod h1:njt428fdUNRvjuJf16tZMYZ2Yl+WQB53X5wmhDwXvC4=
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg= github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg=
@ -146,8 +146,8 @@ github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
github.com/go-playground/validator/v10 v10.8.0/go.mod h1:9JhgTzTaE31GZDpH/HSvHiRJrJ3iKAgqqH0Bl/Ocjdk= github.com/go-playground/validator/v10 v10.8.0/go.mod h1:9JhgTzTaE31GZDpH/HSvHiRJrJ3iKAgqqH0Bl/Ocjdk=
github.com/go-playground/validator/v10 v10.18.0 h1:BvolUXjp4zuvkZ5YN5t7ebzbhlUtPsPm2S9NAZ5nl9U= github.com/go-playground/validator/v10 v10.20.0 h1:K9ISHbSaI0lyB2eWMPJo+kOS/FBExVwjEviJTixqxL8=
github.com/go-playground/validator/v10 v10.18.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= github.com/go-playground/validator/v10 v10.20.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg= github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg=
github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
github.com/go-redis/redismock/v9 v9.2.0 h1:ZrMYQeKPECZPjOj5u9eyOjg8Nnb0BS9lkVIZ6IpsKLw= github.com/go-redis/redismock/v9 v9.2.0 h1:ZrMYQeKPECZPjOj5u9eyOjg8Nnb0BS9lkVIZ6IpsKLw=
@ -259,8 +259,9 @@ github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLA
github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM=
github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
@ -290,8 +291,8 @@ github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3v
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg=
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k=
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
@ -321,12 +322,12 @@ github.com/onsi/gomega v1.25.0 h1:Vw7br2PCDYijJHSfBOWhov+8cAnUf8MfMaIOV323l6Y=
github.com/onsi/gomega v1.25.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM= github.com/onsi/gomega v1.25.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM=
github.com/openimsdk/gomake v0.0.14-alpha.5 h1:VY9c5x515lTfmdhhPjMvR3BBRrRquAUCFsz7t7vbv7Y= github.com/openimsdk/gomake v0.0.14-alpha.5 h1:VY9c5x515lTfmdhhPjMvR3BBRrRquAUCFsz7t7vbv7Y=
github.com/openimsdk/gomake v0.0.14-alpha.5/go.mod h1:PndCozNc2IsQIciyn9mvEblYWZwJmAI+06z94EY+csI= github.com/openimsdk/gomake v0.0.14-alpha.5/go.mod h1:PndCozNc2IsQIciyn9mvEblYWZwJmAI+06z94EY+csI=
github.com/openimsdk/protocol v0.0.69 h1:dVi8meSg8kmUzSH1XQab4MjihqKkkcCAmt1BYXPJuXo= github.com/openimsdk/protocol v0.0.72-alpha.18 h1:EytTtgZuXMG1cgTlJryqXXSO1J3t3wrLIn3Os2PRBEE=
github.com/openimsdk/protocol v0.0.69/go.mod h1:OZQA9FR55lseYoN2Ql1XAHYKHJGu7OMNkUbuekrKCM8= github.com/openimsdk/protocol v0.0.72-alpha.18/go.mod h1:OZQA9FR55lseYoN2Ql1XAHYKHJGu7OMNkUbuekrKCM8=
github.com/openimsdk/tools v0.0.49-alpha.55 h1:KPgC53oqiwZYssLKljhtXbWXifMlTj2SSQEusj4Uf4k= github.com/openimsdk/tools v0.0.50-alpha.12 h1:rV3BxgqN+F79vZvdoQ+97Eob8ScsRVEM8D+Wrcl23uo=
github.com/openimsdk/tools v0.0.49-alpha.55/go.mod h1:h1cYmfyaVtgFbKmb1Cfsl8XwUOMTt8ubVUQrdGtsUh4= github.com/openimsdk/tools v0.0.50-alpha.12/go.mod h1:h1cYmfyaVtgFbKmb1Cfsl8XwUOMTt8ubVUQrdGtsUh4=
github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
@ -410,8 +411,8 @@ github.com/tklauser/numcpus v0.7.0 h1:yjuerZP127QG9m5Zh/mSO4wqurYil27tHrqwRoRjpr
github.com/tklauser/numcpus v0.7.0/go.mod h1:bb6dMVcj8A42tSE7i32fsIUCbQNllK5iDguyOZRUzAY= github.com/tklauser/numcpus v0.7.0/go.mod h1:bb6dMVcj8A42tSE7i32fsIUCbQNllK5iDguyOZRUzAY=
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU= github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE=
github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c=
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY=
@ -458,8 +459,8 @@ go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN8
go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60=
go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg=
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/arch v0.3.0 h1:02VY4/ZcO/gBOH6PUaoiptASxtXU10jazRCP865E97k= golang.org/x/arch v0.7.0 h1:pskyeJh/3AmoQ8CPE95vxHLqp1G1GfGNXTmcl9NEKTc=
golang.org/x/arch v0.3.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= golang.org/x/arch v0.7.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
@ -467,8 +468,8 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A=
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g=
golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k=
@ -495,19 +496,19 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo=
golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ= golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs=
golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA= golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -522,8 +523,8 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34=
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@ -536,8 +537,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@ -566,17 +567,17 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe h1:USL2DhxfgRchafRvt/wYyyQNzwgL7ZiURcozOE/Pkvo= google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe h1:USL2DhxfgRchafRvt/wYyyQNzwgL7ZiURcozOE/Pkvo=
google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro=
google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 h1:Lj5rbfG876hIAYFjqiJnPHfhXbv+nzTWfm04Fg/XSVU= google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 h1:+rdxYoE3E5htTEWIe15GlN6IfvbURM//Jt0mmkmm6ZU=
google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA= google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117/go.mod h1:OimBR/bc1wPO9iV4NC2bpyjy3VnAwZh5EBPQdtaE5oo=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc= google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= google.golang.org/grpc v1.66.2 h1:3QdXkuq3Bkh7w+ywLdLvM56cmGvQHUMZpiCzt6Rqaoo=
google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= google.golang.org/grpc v1.66.2/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@ -588,8 +589,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
@ -610,6 +611,7 @@ gorm.io/gorm v1.25.8 h1:WAGEZ/aEcznN4D03laj8DKnehe1e9gYQAjW8xyPRdeo=
gorm.io/gorm v1.25.8/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= gorm.io/gorm v1.25.8/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
stathat.com/c/consistent v1.0.0 h1:ezyc51EGcRPJUxfHGSgJjWzJdj3NiMU9pNfLNGiXV0c= stathat.com/c/consistent v1.0.0 h1:ezyc51EGcRPJUxfHGSgJjWzJdj3NiMU9pNfLNGiXV0c=
stathat.com/c/consistent v1.0.0/go.mod h1:QkzMWzcbB+yQBL2AttO6sgsQS/JSTapcDISJalmCDS0= stathat.com/c/consistent v1.0.0/go.mod h1:QkzMWzcbB+yQBL2AttO6sgsQS/JSTapcDISJalmCDS0=

@ -35,6 +35,10 @@ func (o *GroupApi) SetGroupInfo(c *gin.Context) {
a2r.Call(group.GroupClient.SetGroupInfo, o.Client, c) a2r.Call(group.GroupClient.SetGroupInfo, o.Client, c)
} }
func (o *GroupApi) SetGroupInfoEX(c *gin.Context) {
a2r.Call(group.GroupClient.SetGroupInfoEX, o.Client, c)
}
func (o *GroupApi) JoinGroup(c *gin.Context) { func (o *GroupApi) JoinGroup(c *gin.Context) {
a2r.Call(group.GroupClient.JoinGroup, o.Client, c) a2r.Call(group.GroupClient.JoinGroup, o.Client, c)
} }

@ -49,14 +49,14 @@ func NewMessageApi(msgRpcClient *rpcclient.Message, userRpcClient *rpcclient.Use
userRpcClient: rpcclient.NewUserRpcClientByUser(userRpcClient), imAdminUserID: imAdminUserID} userRpcClient: rpcclient.NewUserRpcClientByUser(userRpcClient), imAdminUserID: imAdminUserID}
} }
func (MessageApi) SetOptions(options map[string]bool, value bool) { func (*MessageApi) SetOptions(options map[string]bool, value bool) {
datautil.SetSwitchFromOptions(options, constant.IsHistory, value) datautil.SetSwitchFromOptions(options, constant.IsHistory, value)
datautil.SetSwitchFromOptions(options, constant.IsPersistent, value) datautil.SetSwitchFromOptions(options, constant.IsPersistent, value)
datautil.SetSwitchFromOptions(options, constant.IsSenderSync, value) datautil.SetSwitchFromOptions(options, constant.IsSenderSync, value)
datautil.SetSwitchFromOptions(options, constant.IsConversationUpdate, value) datautil.SetSwitchFromOptions(options, constant.IsConversationUpdate, value)
} }
func (m MessageApi) newUserSendMsgReq(_ *gin.Context, params *apistruct.SendMsg) *msg.SendMsgReq { func (m *MessageApi) newUserSendMsgReq(_ *gin.Context, params *apistruct.SendMsg) *msg.SendMsgReq {
var newContent string var newContent string
options := make(map[string]bool, 5) options := make(map[string]bool, 5)
switch params.ContentType { switch params.ContentType {
@ -231,7 +231,7 @@ func (m *MessageApi) SendMessage(c *gin.Context) {
} }
// Set the status to successful if the message is sent. // Set the status to successful if the message is sent.
var status int = constant.MsgSendSuccessed var status = constant.MsgSendSuccessed
// Attempt to update the message sending status in the system. // Attempt to update the message sending status in the system.
_, err = m.Client.SetSendMsgStatus(c, &msg.SetSendMsgStatusReq{ _, err = m.Client.SetSendMsgStatus(c, &msg.SetSendMsgStatusReq{

@ -2,12 +2,17 @@ package api
import ( import (
"fmt" "fmt"
"github.com/gin-contrib/gzip"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/gin-gonic/gin/binding" "github.com/gin-gonic/gin/binding"
"github.com/go-playground/validator/v10" "github.com/go-playground/validator/v10"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/credentials/insecure"
"net/http"
"strings"
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics" "github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs" "github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient" "github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
@ -16,8 +21,13 @@ import (
"github.com/openimsdk/tools/discovery" "github.com/openimsdk/tools/discovery"
"github.com/openimsdk/tools/log" "github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/mw" "github.com/openimsdk/tools/mw"
"net/http" )
"strings"
const (
NoCompression = -1
DefaultCompression = 0
BestCompression = 1
BestSpeed = 2
) )
func prommetricsGin() gin.HandlerFunc { func prommetricsGin() gin.HandlerFunc {
@ -52,7 +62,15 @@ func newGinRouter(disCov discovery.SvcDiscoveryRegistry, config *Config) *gin.En
conversationRpc := rpcclient.NewConversation(disCov, config.Share.RpcRegisterName.Conversation) conversationRpc := rpcclient.NewConversation(disCov, config.Share.RpcRegisterName.Conversation)
authRpc := rpcclient.NewAuth(disCov, config.Share.RpcRegisterName.Auth) authRpc := rpcclient.NewAuth(disCov, config.Share.RpcRegisterName.Auth)
thirdRpc := rpcclient.NewThird(disCov, config.Share.RpcRegisterName.Third, config.API.Prometheus.GrafanaURL) thirdRpc := rpcclient.NewThird(disCov, config.Share.RpcRegisterName.Third, config.API.Prometheus.GrafanaURL)
switch config.API.Api.CompressionLevel {
case NoCompression:
case DefaultCompression:
r.Use(gzip.Gzip(gzip.DefaultCompression))
case BestCompression:
r.Use(gzip.Gzip(gzip.BestCompression))
case BestSpeed:
r.Use(gzip.Gzip(gzip.BestSpeed))
}
r.Use(prommetricsGin(), gin.Recovery(), mw.CorsHandler(), mw.GinParseOperationID(), GinParseToken(authRpc)) r.Use(prommetricsGin(), gin.Recovery(), mw.CorsHandler(), mw.GinParseOperationID(), GinParseToken(authRpc))
u := NewUserApi(*userRpc) u := NewUserApi(*userRpc)
m := NewMessageApi(messageRpc, userRpc, config.Share.IMAdminUserID) m := NewMessageApi(messageRpc, userRpc, config.Share.IMAdminUserID)
@ -112,6 +130,7 @@ func newGinRouter(disCov discovery.SvcDiscoveryRegistry, config *Config) *gin.En
{ {
groupRouterGroup.POST("/create_group", g.CreateGroup) groupRouterGroup.POST("/create_group", g.CreateGroup)
groupRouterGroup.POST("/set_group_info", g.SetGroupInfo) groupRouterGroup.POST("/set_group_info", g.SetGroupInfo)
groupRouterGroup.POST("/set_group_info_ex", g.SetGroupInfoEX)
groupRouterGroup.POST("/join_group", g.JoinGroup) groupRouterGroup.POST("/join_group", g.JoinGroup)
groupRouterGroup.POST("/quit_group", g.QuitGroup) groupRouterGroup.POST("/quit_group", g.QuitGroup)
groupRouterGroup.POST("/group_application_response", g.ApplicationGroupResponse) groupRouterGroup.POST("/group_application_response", g.ApplicationGroupResponse)

@ -36,9 +36,11 @@ func (u *UserApi) UserRegister(c *gin.Context) {
a2r.Call(user.UserClient.UserRegister, u.Client, c) a2r.Call(user.UserClient.UserRegister, u.Client, c)
} }
// UpdateUserInfo is deprecated. Use UpdateUserInfoEx
func (u *UserApi) UpdateUserInfo(c *gin.Context) { func (u *UserApi) UpdateUserInfo(c *gin.Context) {
a2r.Call(user.UserClient.UpdateUserInfo, u.Client, c) a2r.Call(user.UserClient.UpdateUserInfo, u.Client, c)
} }
func (u *UserApi) UpdateUserInfoEx(c *gin.Context) { func (u *UserApi) UpdateUserInfoEx(c *gin.Context) {
a2r.Call(user.UserClient.UpdateUserInfoEx, u.Client, c) a2r.Call(user.UserClient.UpdateUserInfoEx, u.Client, c)
} }

@ -22,6 +22,8 @@ import (
"sync/atomic" "sync/atomic"
"time" "time"
"google.golang.org/protobuf/proto"
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor" "github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
"github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/constant"
"github.com/openimsdk/protocol/sdkws" "github.com/openimsdk/protocol/sdkws"
@ -30,7 +32,6 @@ import (
"github.com/openimsdk/tools/log" "github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/mcontext" "github.com/openimsdk/tools/mcontext"
"github.com/openimsdk/tools/utils/stringutil" "github.com/openimsdk/tools/utils/stringutil"
"google.golang.org/protobuf/proto"
) )
var ( var (
@ -220,6 +221,10 @@ func (c *Client) handleMessage(message []byte) error {
resp, messageErr = c.longConnServer.SendSignalMessage(ctx, binaryReq) resp, messageErr = c.longConnServer.SendSignalMessage(ctx, binaryReq)
case WSPullMsgBySeqList: case WSPullMsgBySeqList:
resp, messageErr = c.longConnServer.PullMessageBySeqList(ctx, binaryReq) resp, messageErr = c.longConnServer.PullMessageBySeqList(ctx, binaryReq)
case WSPullMsg:
resp, messageErr = c.longConnServer.GetSeqMessage(ctx, binaryReq)
case WSGetConvMaxReadSeq:
resp, messageErr = c.longConnServer.GetConversationsHasReadAndMaxSeq(ctx, binaryReq)
case WsLogoutMsg: case WsLogoutMsg:
resp, messageErr = c.longConnServer.UserLogout(ctx, binaryReq) resp, messageErr = c.longConnServer.UserLogout(ctx, binaryReq)
case WsSetBackgroundStatus: case WsSetBackgroundStatus:
@ -271,11 +276,13 @@ func (c *Client) replyMessage(ctx context.Context, binaryReq *Req, err error, re
ErrMsg: errResp.ErrMsg, ErrMsg: errResp.ErrMsg,
Data: resp, Data: resp,
} }
t := time.Now()
log.ZDebug(ctx, "gateway reply message", "resp", mReply.String()) log.ZDebug(ctx, "gateway reply message", "resp", mReply.String())
err = c.writeBinaryMsg(mReply) err = c.writeBinaryMsg(mReply)
if err != nil { if err != nil {
log.ZWarn(ctx, "wireBinaryMsg replyMessage", err, "resp", mReply.String()) log.ZWarn(ctx, "wireBinaryMsg replyMessage", err, "resp", mReply.String())
} }
log.ZDebug(ctx, "wireBinaryMsg end", "time cost", time.Since(t))
if binaryReq.ReqIdentifier == WsLogoutMsg { if binaryReq.ReqIdentifier == WsLogoutMsg {
return errs.New("user logout", "operationID", binaryReq.OperationID).Wrap() return errs.New("user logout", "operationID", binaryReq.OperationID).Wrap()

@ -39,6 +39,8 @@ const (
WSPullMsgBySeqList = 1002 WSPullMsgBySeqList = 1002
WSSendMsg = 1003 WSSendMsg = 1003
WSSendSignalMsg = 1004 WSSendSignalMsg = 1004
WSPullMsg = 1005
WSGetConvMaxReadSeq = 1006
WSPushMsg = 2001 WSPushMsg = 2001
WSKickOnlineMsg = 2002 WSKickOnlineMsg = 2002
WsLogoutMsg = 2003 WsLogoutMsg = 2003

@ -58,7 +58,7 @@ func Start(ctx context.Context, index int, conf *Config) error {
) )
hubServer := NewServer(rpcPort, longServer, conf, func(srv *Server) error { hubServer := NewServer(rpcPort, longServer, conf, func(srv *Server) error {
longServer.online = rpccache.NewOnlineCache(srv.userRcp, nil, rdb, longServer.subscriberUserOnlineStatusChanges) longServer.online, _ = rpccache.NewOnlineCache(srv.userRcp, nil, rdb, false, longServer.subscriberUserOnlineStatusChanges)
return nil return nil
}) })

@ -19,6 +19,8 @@ import (
"sync" "sync"
"github.com/go-playground/validator/v10" "github.com/go-playground/validator/v10"
"google.golang.org/protobuf/proto"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient" "github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
"github.com/openimsdk/protocol/msg" "github.com/openimsdk/protocol/msg"
@ -27,7 +29,6 @@ import (
"github.com/openimsdk/tools/discovery" "github.com/openimsdk/tools/discovery"
"github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/utils/jsonutil" "github.com/openimsdk/tools/utils/jsonutil"
"google.golang.org/protobuf/proto"
) )
type Req struct { type Req struct {
@ -94,6 +95,8 @@ type MessageHandler interface {
SendMessage(context context.Context, data *Req) ([]byte, error) SendMessage(context context.Context, data *Req) ([]byte, error)
SendSignalMessage(context context.Context, data *Req) ([]byte, error) SendSignalMessage(context context.Context, data *Req) ([]byte, error)
PullMessageBySeqList(context context.Context, data *Req) ([]byte, error) PullMessageBySeqList(context context.Context, data *Req) ([]byte, error)
GetConversationsHasReadAndMaxSeq(context context.Context, data *Req) ([]byte, error)
GetSeqMessage(context context.Context, data *Req) ([]byte, error)
UserLogout(context context.Context, data *Req) ([]byte, error) UserLogout(context context.Context, data *Req) ([]byte, error)
SetUserDeviceBackground(context context.Context, data *Req) ([]byte, bool, error) SetUserDeviceBackground(context context.Context, data *Req) ([]byte, bool, error)
} }
@ -175,7 +178,7 @@ func (g GrpcHandler) SendSignalMessage(context context.Context, data *Req) ([]by
func (g GrpcHandler) PullMessageBySeqList(context context.Context, data *Req) ([]byte, error) { func (g GrpcHandler) PullMessageBySeqList(context context.Context, data *Req) ([]byte, error) {
req := sdkws.PullMessageBySeqsReq{} req := sdkws.PullMessageBySeqsReq{}
if err := proto.Unmarshal(data.Data, &req); err != nil { if err := proto.Unmarshal(data.Data, &req); err != nil {
return nil, errs.WrapMsg(err, "error unmarshaling request", "action", "unmarshal", "dataType", "PullMessageBySeqsReq") return nil, errs.WrapMsg(err, "err proto unmarshal", "action", "unmarshal", "dataType", "PullMessageBySeqsReq")
} }
if err := g.validate.Struct(data); err != nil { if err := g.validate.Struct(data); err != nil {
return nil, errs.WrapMsg(err, "validation failed", "action", "validate", "dataType", "PullMessageBySeqsReq") return nil, errs.WrapMsg(err, "validation failed", "action", "validate", "dataType", "PullMessageBySeqsReq")
@ -191,6 +194,44 @@ func (g GrpcHandler) PullMessageBySeqList(context context.Context, data *Req) ([
return c, nil return c, nil
} }
func (g GrpcHandler) GetConversationsHasReadAndMaxSeq(context context.Context, data *Req) ([]byte, error) {
req := msg.GetConversationsHasReadAndMaxSeqReq{}
if err := proto.Unmarshal(data.Data, &req); err != nil {
return nil, errs.WrapMsg(err, "err proto unmarshal", "action", "unmarshal", "dataType", "GetConversationsHasReadAndMaxSeq")
}
if err := g.validate.Struct(data); err != nil {
return nil, errs.WrapMsg(err, "validation failed", "action", "validate", "dataType", "GetConversationsHasReadAndMaxSeq")
}
resp, err := g.msgRpcClient.GetConversationsHasReadAndMaxSeq(context, &req)
if err != nil {
return nil, err
}
c, err := proto.Marshal(resp)
if err != nil {
return nil, errs.WrapMsg(err, "error marshaling response", "action", "marshal", "dataType", "GetConversationsHasReadAndMaxSeq")
}
return c, nil
}
func (g GrpcHandler) GetSeqMessage(context context.Context, data *Req) ([]byte, error) {
req := msg.GetSeqMessageReq{}
if err := proto.Unmarshal(data.Data, &req); err != nil {
return nil, errs.WrapMsg(err, "error unmarshaling request", "action", "unmarshal", "dataType", "GetSeqMessage")
}
if err := g.validate.Struct(data); err != nil {
return nil, errs.WrapMsg(err, "validation failed", "action", "validate", "dataType", "GetSeqMessage")
}
resp, err := g.msgRpcClient.GetSeqMessage(context, &req)
if err != nil {
return nil, err
}
c, err := proto.Marshal(resp)
if err != nil {
return nil, errs.WrapMsg(err, "error marshaling response", "action", "marshal", "dataType", "GetSeqMessage")
}
return c, nil
}
func (g GrpcHandler) UserLogout(context context.Context, data *Req) ([]byte, error) { func (g GrpcHandler) UserLogout(context context.Context, data *Req) ([]byte, error) {
req := push.DelUserPushTokenReq{} req := push.DelUserPushTokenReq{}
if err := proto.Unmarshal(data.Data, &req); err != nil { if err := proto.Unmarshal(data.Data, &req); err != nil {

@ -265,7 +265,7 @@ func (ws *WsServer) registerClient(client *Client) {
if clientOK { if clientOK {
ws.clients.Set(client.UserID, client) ws.clients.Set(client.UserID, client)
// There is already a connection to the platform // There is already a connection to the platform
log.ZInfo(client.ctx, "repeat login", "userID", client.UserID, "platformID", log.ZDebug(client.ctx, "repeat login", "userID", client.UserID, "platformID",
client.PlatformID, "old remote addr", getRemoteAdders(oldClients)) client.PlatformID, "old remote addr", getRemoteAdders(oldClients))
ws.onlineUserConnNum.Add(1) ws.onlineUserConnNum.Add(1)
} else { } else {
@ -275,7 +275,7 @@ func (ws *WsServer) registerClient(client *Client) {
} }
wg := sync.WaitGroup{} wg := sync.WaitGroup{}
log.ZDebug(client.ctx, "ws.msgGatewayConfig.Discovery.Enable", ws.msgGatewayConfig.Discovery.Enable) log.ZDebug(client.ctx, "ws.msgGatewayConfig.Discovery.Enable", "discoveryEnable", ws.msgGatewayConfig.Discovery.Enable)
if ws.msgGatewayConfig.Discovery.Enable != "k8s" { if ws.msgGatewayConfig.Discovery.Enable != "k8s" {
wg.Add(1) wg.Add(1)
@ -293,7 +293,7 @@ func (ws *WsServer) registerClient(client *Client) {
wg.Wait() wg.Wait()
log.ZInfo( log.ZDebug(
client.ctx, client.ctx,
"user online", "user online",
"online user Num", "online user Num",
@ -360,7 +360,7 @@ func (ws *WsServer) unregisterClient(client *Client) {
ws.onlineUserConnNum.Add(-1) ws.onlineUserConnNum.Add(-1)
ws.subscription.DelClient(client) ws.subscription.DelClient(client)
//ws.SetUserOnlineStatus(client.ctx, client, constant.Offline) //ws.SetUserOnlineStatus(client.ctx, client, constant.Offline)
log.ZInfo(client.ctx, "user offline", "close reason", client.closedErr, "online user Num", log.ZDebug(client.ctx, "user offline", "close reason", client.closedErr, "online user Num",
ws.onlineUserNum.Load(), "online user conn Num", ws.onlineUserNum.Load(), "online user conn Num",
ws.onlineUserConnNum.Load(), ws.onlineUserConnNum.Load(),
) )

@ -16,20 +16,22 @@ package msgtransfer
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"net/http"
"os"
"os/signal"
"syscall"
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics" "github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
"github.com/openimsdk/tools/db/mongoutil" "github.com/openimsdk/tools/db/mongoutil"
"github.com/openimsdk/tools/db/redisutil" "github.com/openimsdk/tools/db/redisutil"
"github.com/openimsdk/tools/utils/datautil" "github.com/openimsdk/tools/utils/datautil"
"net/http"
"os"
"os/signal"
"syscall"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
kdisc "github.com/openimsdk/open-im-server/v3/pkg/common/discoveryregister" discRegister "github.com/openimsdk/open-im-server/v3/pkg/common/discoveryregister"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient" "github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
"github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/errs"
@ -64,6 +66,7 @@ type Config struct {
func Start(ctx context.Context, index int, config *Config) error { func Start(ctx context.Context, index int, config *Config) error {
log.CInfo(ctx, "MSG-TRANSFER server is initializing", "prometheusPorts", log.CInfo(ctx, "MSG-TRANSFER server is initializing", "prometheusPorts",
config.MsgTransfer.Prometheus.Ports, "index", index) config.MsgTransfer.Prometheus.Ports, "index", index)
mgocli, err := mongoutil.NewMongoDB(ctx, config.MongodbConfig.Build()) mgocli, err := mongoutil.NewMongoDB(ctx, config.MongodbConfig.Build())
if err != nil { if err != nil {
return err return err
@ -72,12 +75,13 @@ func Start(ctx context.Context, index int, config *Config) error {
if err != nil { if err != nil {
return err return err
} }
client, err := kdisc.NewDiscoveryRegister(&config.Discovery, &config.Share) client, err := discRegister.NewDiscoveryRegister(&config.Discovery, &config.Share)
if err != nil { if err != nil {
return err return err
} }
client.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials()), client.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"LoadBalancingPolicy": "%s"}`, "round_robin"))) grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"LoadBalancingPolicy": "%s"}`, "round_robin")))
msgModel := redis.NewMsgCache(rdb) msgModel := redis.NewMsgCache(rdb)
msgDocModel, err := mgo.NewMsgMongo(mgocli.GetDB()) msgDocModel, err := mgo.NewMsgMongo(mgocli.GetDB())
if err != nil { if err != nil {
@ -93,20 +97,21 @@ func Start(ctx context.Context, index int, config *Config) error {
return err return err
} }
seqUserCache := redis.NewSeqUserCacheRedis(rdb, seqUser) seqUserCache := redis.NewSeqUserCacheRedis(rdb, seqUser)
msgDatabase, err := controller.NewCommonMsgDatabase(msgDocModel, msgModel, seqUserCache, seqConversationCache, &config.KafkaConfig) msgTransferDatabase, err := controller.NewMsgTransferDatabase(msgDocModel, msgModel, seqUserCache, seqConversationCache, &config.KafkaConfig)
if err != nil { if err != nil {
return err return err
} }
conversationRpcClient := rpcclient.NewConversationRpcClient(client, config.Share.RpcRegisterName.Conversation) conversationRpcClient := rpcclient.NewConversationRpcClient(client, config.Share.RpcRegisterName.Conversation)
groupRpcClient := rpcclient.NewGroupRpcClient(client, config.Share.RpcRegisterName.Group) groupRpcClient := rpcclient.NewGroupRpcClient(client, config.Share.RpcRegisterName.Group)
historyCH, err := NewOnlineHistoryRedisConsumerHandler(&config.KafkaConfig, msgDatabase, &conversationRpcClient, &groupRpcClient) historyCH, err := NewOnlineHistoryRedisConsumerHandler(&config.KafkaConfig, msgTransferDatabase, &conversationRpcClient, &groupRpcClient)
if err != nil { if err != nil {
return err return err
} }
historyMongoCH, err := NewOnlineHistoryMongoConsumerHandler(&config.KafkaConfig, msgDatabase) historyMongoCH, err := NewOnlineHistoryMongoConsumerHandler(&config.KafkaConfig, msgTransferDatabase)
if err != nil { if err != nil {
return err return err
} }
msgTransfer := &MsgTransfer{ msgTransfer := &MsgTransfer{
historyCH: historyCH, historyCH: historyCH,
historyMongoCH: historyMongoCH, historyMongoCH: historyMongoCH,
@ -137,7 +142,7 @@ func (m *MsgTransfer) Start(index int, config *Config) error {
return return
} }
if err := prommetrics.TransferInit(prometheusPort); err != nil && err != http.ErrServerClosed { if err := prommetrics.TransferInit(prometheusPort); err != nil && !errors.Is(err, http.ErrServerClosed) {
netErr = errs.WrapMsg(err, "prometheus start error", "prometheusPort", prometheusPort) netErr = errs.WrapMsg(err, "prometheus start error", "prometheusPort", prometheusPort)
netDone <- struct{}{} netDone <- struct{}{}
} }

@ -16,6 +16,12 @@ package msgtransfer
import ( import (
"context" "context"
"encoding/json"
"errors"
"strconv"
"strings"
"time"
"github.com/IBM/sarama" "github.com/IBM/sarama"
"github.com/go-redis/redis" "github.com/go-redis/redis"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
@ -31,9 +37,6 @@ import (
"github.com/openimsdk/tools/mq/kafka" "github.com/openimsdk/tools/mq/kafka"
"github.com/openimsdk/tools/utils/stringutil" "github.com/openimsdk/tools/utils/stringutil"
"google.golang.org/protobuf/proto" "google.golang.org/protobuf/proto"
"strconv"
"strings"
"time"
) )
const ( const (
@ -54,19 +57,19 @@ type OnlineHistoryRedisConsumerHandler struct {
redisMessageBatches *batcher.Batcher[sarama.ConsumerMessage] redisMessageBatches *batcher.Batcher[sarama.ConsumerMessage]
msgDatabase controller.CommonMsgDatabase msgTransferDatabase controller.MsgTransferDatabase
conversationRpcClient *rpcclient.ConversationRpcClient conversationRpcClient *rpcclient.ConversationRpcClient
groupRpcClient *rpcclient.GroupRpcClient groupRpcClient *rpcclient.GroupRpcClient
} }
func NewOnlineHistoryRedisConsumerHandler(kafkaConf *config.Kafka, database controller.CommonMsgDatabase, func NewOnlineHistoryRedisConsumerHandler(kafkaConf *config.Kafka, database controller.MsgTransferDatabase,
conversationRpcClient *rpcclient.ConversationRpcClient, groupRpcClient *rpcclient.GroupRpcClient) (*OnlineHistoryRedisConsumerHandler, error) { conversationRpcClient *rpcclient.ConversationRpcClient, groupRpcClient *rpcclient.GroupRpcClient) (*OnlineHistoryRedisConsumerHandler, error) {
historyConsumerGroup, err := kafka.NewMConsumerGroup(kafkaConf.Build(), kafkaConf.ToRedisGroupID, []string{kafkaConf.ToRedisTopic}, false) historyConsumerGroup, err := kafka.NewMConsumerGroup(kafkaConf.Build(), kafkaConf.ToRedisGroupID, []string{kafkaConf.ToRedisTopic}, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
var och OnlineHistoryRedisConsumerHandler var och OnlineHistoryRedisConsumerHandler
och.msgDatabase = database och.msgTransferDatabase = database
b := batcher.New[sarama.ConsumerMessage]( b := batcher.New[sarama.ConsumerMessage](
batcher.WithSize(size), batcher.WithSize(size),
@ -88,6 +91,7 @@ func NewOnlineHistoryRedisConsumerHandler(kafkaConf *config.Kafka, database cont
och.conversationRpcClient = conversationRpcClient och.conversationRpcClient = conversationRpcClient
och.groupRpcClient = groupRpcClient och.groupRpcClient = groupRpcClient
och.historyConsumerGroup = historyConsumerGroup och.historyConsumerGroup = historyConsumerGroup
return &och, err return &och, err
} }
func (och *OnlineHistoryRedisConsumerHandler) do(ctx context.Context, channelID int, val *batcher.Msg[sarama.ConsumerMessage]) { func (och *OnlineHistoryRedisConsumerHandler) do(ctx context.Context, channelID int, val *batcher.Msg[sarama.ConsumerMessage]) {
@ -96,6 +100,7 @@ func (och *OnlineHistoryRedisConsumerHandler) do(ctx context.Context, channelID
ctx = withAggregationCtx(ctx, ctxMessages) ctx = withAggregationCtx(ctx, ctxMessages)
log.ZInfo(ctx, "msg arrived channel", "channel id", channelID, "msgList length", len(ctxMessages), log.ZInfo(ctx, "msg arrived channel", "channel id", channelID, "msgList length", len(ctxMessages),
"key", val.Key()) "key", val.Key())
och.doSetReadSeq(ctx, ctxMessages)
storageMsgList, notStorageMsgList, storageNotificationList, notStorageNotificationList := storageMsgList, notStorageMsgList, storageNotificationList, notStorageNotificationList :=
och.categorizeMessageLists(ctxMessages) och.categorizeMessageLists(ctxMessages)
@ -109,6 +114,60 @@ func (och *OnlineHistoryRedisConsumerHandler) do(ctx context.Context, channelID
och.handleNotification(ctx, val.Key(), conversationIDNotification, storageNotificationList, notStorageNotificationList) och.handleNotification(ctx, val.Key(), conversationIDNotification, storageNotificationList, notStorageNotificationList)
} }
func (och *OnlineHistoryRedisConsumerHandler) doSetReadSeq(ctx context.Context, msgs []*ContextMsg) {
type seqKey struct {
conversationID string
userID string
}
var readSeq map[seqKey]int64
for _, msg := range msgs {
if msg.message.ContentType != constant.HasReadReceipt {
continue
}
var elem sdkws.NotificationElem
if err := json.Unmarshal(msg.message.Content, &elem); err != nil {
log.ZError(ctx, "handlerConversationRead Unmarshal NotificationElem msg err", err, "msg", msg)
continue
}
var tips sdkws.MarkAsReadTips
if err := json.Unmarshal([]byte(elem.Detail), &tips); err != nil {
log.ZError(ctx, "handlerConversationRead Unmarshal MarkAsReadTips msg err", err, "msg", msg)
continue
}
if len(tips.Seqs) > 0 {
for _, seq := range tips.Seqs {
if tips.HasReadSeq < seq {
tips.HasReadSeq = seq
}
}
clear(tips.Seqs)
tips.Seqs = nil
}
if tips.HasReadSeq < 0 {
continue
}
if readSeq == nil {
readSeq = make(map[seqKey]int64)
}
key := seqKey{
conversationID: tips.ConversationID,
userID: tips.MarkAsReadUserID,
}
if readSeq[key] > tips.HasReadSeq {
continue
}
readSeq[key] = tips.HasReadSeq
}
if readSeq == nil {
return
}
for key, seq := range readSeq {
if err := och.msgTransferDatabase.SetHasReadSeqToDB(ctx, key.userID, key.conversationID, seq); err != nil {
log.ZError(ctx, "set read seq to db error", err, "userID", key.userID, "conversationID", key.conversationID, "seq", seq)
}
}
}
func (och *OnlineHistoryRedisConsumerHandler) parseConsumerMessages(ctx context.Context, consumerMessages []*sarama.ConsumerMessage) []*ContextMsg { func (och *OnlineHistoryRedisConsumerHandler) parseConsumerMessages(ctx context.Context, consumerMessages []*sarama.ConsumerMessage) []*ContextMsg {
var ctxMessages []*ContextMsg var ctxMessages []*ContextMsg
for i := 0; i < len(consumerMessages); i++ { for i := 0; i < len(consumerMessages); i++ {
@ -179,6 +238,11 @@ func (och *OnlineHistoryRedisConsumerHandler) categorizeMessageLists(totalMsgs [
} }
func (och *OnlineHistoryRedisConsumerHandler) handleMsg(ctx context.Context, key, conversationID string, storageList, notStorageList []*ContextMsg) { func (och *OnlineHistoryRedisConsumerHandler) handleMsg(ctx context.Context, key, conversationID string, storageList, notStorageList []*ContextMsg) {
log.ZInfo(ctx, "handle storage msg")
for _, storageMsg := range storageList {
log.ZDebug(ctx, "handle storage msg", "msg", storageMsg.message.String())
}
och.toPushTopic(ctx, key, conversationID, notStorageList) och.toPushTopic(ctx, key, conversationID, notStorageList)
var storageMessageList []*sdkws.MsgData var storageMessageList []*sdkws.MsgData
for _, msg := range storageList { for _, msg := range storageList {
@ -186,21 +250,25 @@ func (och *OnlineHistoryRedisConsumerHandler) handleMsg(ctx context.Context, key
} }
if len(storageMessageList) > 0 { if len(storageMessageList) > 0 {
msg := storageMessageList[0] msg := storageMessageList[0]
lastSeq, isNewConversation, err := och.msgDatabase.BatchInsertChat2Cache(ctx, conversationID, storageMessageList) lastSeq, isNewConversation, err := och.msgTransferDatabase.BatchInsertChat2Cache(ctx, conversationID, storageMessageList)
if err != nil && errs.Unwrap(err) != redis.Nil { if err != nil && !errors.Is(errs.Unwrap(err), redis.Nil) {
log.ZError(ctx, "batch data insert to redis err", err, "storageMsgList", storageMessageList) log.ZError(ctx, "batch data insert to redis err", err, "storageMsgList", storageMessageList)
return return
} }
log.ZInfo(ctx, "BatchInsertChat2Cache end")
if isNewConversation { if isNewConversation {
switch msg.SessionType { switch msg.SessionType {
case constant.ReadGroupChatType: case constant.ReadGroupChatType:
log.ZInfo(ctx, "group chat first create conversation", "conversationID", log.ZDebug(ctx, "group chat first create conversation", "conversationID",
conversationID) conversationID)
userIDs, err := och.groupRpcClient.GetGroupMemberIDs(ctx, msg.GroupID) userIDs, err := och.groupRpcClient.GetGroupMemberIDs(ctx, msg.GroupID)
if err != nil { if err != nil {
log.ZWarn(ctx, "get group member ids error", err, "conversationID", log.ZWarn(ctx, "get group member ids error", err, "conversationID",
conversationID) conversationID)
} else { } else {
log.ZInfo(ctx, "GetGroupMemberIDs end")
if err := och.conversationRpcClient.GroupChatFirstCreateConversation(ctx, if err := och.conversationRpcClient.GroupChatFirstCreateConversation(ctx,
msg.GroupID, userIDs); err != nil { msg.GroupID, userIDs); err != nil {
log.ZWarn(ctx, "single chat first create conversation error", err, log.ZWarn(ctx, "single chat first create conversation error", err,
@ -219,13 +287,16 @@ func (och *OnlineHistoryRedisConsumerHandler) handleMsg(ctx context.Context, key
} }
} }
log.ZDebug(ctx, "success incr to next topic") log.ZInfo(ctx, "success incr to next topic")
err = och.msgDatabase.MsgToMongoMQ(ctx, key, conversationID, storageMessageList, lastSeq) err = och.msgTransferDatabase.MsgToMongoMQ(ctx, key, conversationID, storageMessageList, lastSeq)
if err != nil { if err != nil {
log.ZError(ctx, "Msg To MongoDB MQ error", err, "conversationID", log.ZError(ctx, "Msg To MongoDB MQ error", err, "conversationID",
conversationID, "storageList", storageMessageList, "lastSeq", lastSeq) conversationID, "storageList", storageMessageList, "lastSeq", lastSeq)
} }
log.ZInfo(ctx, "MsgToMongoMQ end")
och.toPushTopic(ctx, key, conversationID, storageList) och.toPushTopic(ctx, key, conversationID, storageList)
log.ZInfo(ctx, "toPushTopic end")
} }
} }
@ -237,14 +308,14 @@ func (och *OnlineHistoryRedisConsumerHandler) handleNotification(ctx context.Con
storageMessageList = append(storageMessageList, msg.message) storageMessageList = append(storageMessageList, msg.message)
} }
if len(storageMessageList) > 0 { if len(storageMessageList) > 0 {
lastSeq, _, err := och.msgDatabase.BatchInsertChat2Cache(ctx, conversationID, storageMessageList) lastSeq, _, err := och.msgTransferDatabase.BatchInsertChat2Cache(ctx, conversationID, storageMessageList)
if err != nil { if err != nil {
log.ZError(ctx, "notification batch insert to redis error", err, "conversationID", conversationID, log.ZError(ctx, "notification batch insert to redis error", err, "conversationID", conversationID,
"storageList", storageMessageList) "storageList", storageMessageList)
return return
} }
log.ZDebug(ctx, "success to next topic", "conversationID", conversationID) log.ZDebug(ctx, "success to next topic", "conversationID", conversationID)
err = och.msgDatabase.MsgToMongoMQ(ctx, key, conversationID, storageMessageList, lastSeq) err = och.msgTransferDatabase.MsgToMongoMQ(ctx, key, conversationID, storageMessageList, lastSeq)
if err != nil { if err != nil {
log.ZError(ctx, "Msg To MongoDB MQ error", err, "conversationID", log.ZError(ctx, "Msg To MongoDB MQ error", err, "conversationID",
conversationID, "storageList", storageMessageList, "lastSeq", lastSeq) conversationID, "storageList", storageMessageList, "lastSeq", lastSeq)
@ -253,9 +324,10 @@ func (och *OnlineHistoryRedisConsumerHandler) handleNotification(ctx context.Con
} }
} }
func (och *OnlineHistoryRedisConsumerHandler) toPushTopic(_ context.Context, key, conversationID string, msgs []*ContextMsg) { func (och *OnlineHistoryRedisConsumerHandler) toPushTopic(ctx context.Context, key, conversationID string, msgs []*ContextMsg) {
for _, v := range msgs { for _, v := range msgs {
och.msgDatabase.MsgToPushMQ(v.ctx, key, conversationID, v.message) log.ZDebug(ctx, "push msg to topic", "msg", v.message.String())
_, _, _ = och.msgTransferDatabase.MsgToPushMQ(v.ctx, key, conversationID, v.message)
} }
} }
@ -280,7 +352,7 @@ func (och *OnlineHistoryRedisConsumerHandler) Cleanup(_ sarama.ConsumerGroupSess
func (och *OnlineHistoryRedisConsumerHandler) ConsumeClaim(session sarama.ConsumerGroupSession, func (och *OnlineHistoryRedisConsumerHandler) ConsumeClaim(session sarama.ConsumerGroupSession,
claim sarama.ConsumerGroupClaim) error { // a instance in the consumer group claim sarama.ConsumerGroupClaim) error { // a instance in the consumer group
log.ZInfo(context.Background(), "online new session msg come", "highWaterMarkOffset", log.ZDebug(context.Background(), "online new session msg come", "highWaterMarkOffset",
claim.HighWaterMarkOffset(), "topic", claim.Topic(), "partition", claim.Partition()) claim.HighWaterMarkOffset(), "topic", claim.Topic(), "partition", claim.Partition())
och.redisMessageBatches.OnComplete = func(lastMessage *sarama.ConsumerMessage, totalCount int) { och.redisMessageBatches.OnComplete = func(lastMessage *sarama.ConsumerMessage, totalCount int) {
session.MarkMessage(lastMessage, "") session.MarkMessage(lastMessage, "")

@ -29,10 +29,10 @@ import (
type OnlineHistoryMongoConsumerHandler struct { type OnlineHistoryMongoConsumerHandler struct {
historyConsumerGroup *kafka.MConsumerGroup historyConsumerGroup *kafka.MConsumerGroup
msgDatabase controller.CommonMsgDatabase msgTransferDatabase controller.MsgTransferDatabase
} }
func NewOnlineHistoryMongoConsumerHandler(kafkaConf *config.Kafka, database controller.CommonMsgDatabase) (*OnlineHistoryMongoConsumerHandler, error) { func NewOnlineHistoryMongoConsumerHandler(kafkaConf *config.Kafka, database controller.MsgTransferDatabase) (*OnlineHistoryMongoConsumerHandler, error) {
historyConsumerGroup, err := kafka.NewMConsumerGroup(kafkaConf.Build(), kafkaConf.ToMongoGroupID, []string{kafkaConf.ToMongoTopic}, true) historyConsumerGroup, err := kafka.NewMConsumerGroup(kafkaConf.Build(), kafkaConf.ToMongoGroupID, []string{kafkaConf.ToMongoTopic}, true)
if err != nil { if err != nil {
return nil, err return nil, err
@ -40,7 +40,7 @@ func NewOnlineHistoryMongoConsumerHandler(kafkaConf *config.Kafka, database cont
mc := &OnlineHistoryMongoConsumerHandler{ mc := &OnlineHistoryMongoConsumerHandler{
historyConsumerGroup: historyConsumerGroup, historyConsumerGroup: historyConsumerGroup,
msgDatabase: database, msgTransferDatabase: database,
} }
return mc, nil return mc, nil
} }
@ -57,8 +57,8 @@ func (mc *OnlineHistoryMongoConsumerHandler) handleChatWs2Mongo(ctx context.Cont
log.ZError(ctx, "msgFromMQ.MsgData is empty", nil, "cMsg", cMsg) log.ZError(ctx, "msgFromMQ.MsgData is empty", nil, "cMsg", cMsg)
return return
} }
log.ZInfo(ctx, "mongo consumer recv msg", "msgs", msgFromMQ.String()) log.ZDebug(ctx, "mongo consumer recv msg", "msgs", msgFromMQ.String())
err = mc.msgDatabase.BatchInsertChat2DB(ctx, msgFromMQ.ConversationID, msgFromMQ.MsgData, msgFromMQ.LastSeq) err = mc.msgTransferDatabase.BatchInsertChat2DB(ctx, msgFromMQ.ConversationID, msgFromMQ.MsgData, msgFromMQ.LastSeq)
if err != nil { if err != nil {
log.ZError( log.ZError(
ctx, ctx,
@ -77,7 +77,7 @@ func (mc *OnlineHistoryMongoConsumerHandler) handleChatWs2Mongo(ctx context.Cont
for _, msg := range msgFromMQ.MsgData { for _, msg := range msgFromMQ.MsgData {
seqs = append(seqs, msg.Seq) seqs = append(seqs, msg.Seq)
} }
err = mc.msgDatabase.DeleteMessagesFromCache(ctx, msgFromMQ.ConversationID, seqs) err = mc.msgTransferDatabase.DeleteMessagesFromCache(ctx, msgFromMQ.ConversationID, seqs)
if err != nil { if err != nil {
log.ZError( log.ZError(
ctx, ctx,
@ -91,13 +91,13 @@ func (mc *OnlineHistoryMongoConsumerHandler) handleChatWs2Mongo(ctx context.Cont
} }
} }
func (OnlineHistoryMongoConsumerHandler) Setup(_ sarama.ConsumerGroupSession) error { return nil } func (*OnlineHistoryMongoConsumerHandler) Setup(_ sarama.ConsumerGroupSession) error { return nil }
func (OnlineHistoryMongoConsumerHandler) Cleanup(_ sarama.ConsumerGroupSession) error { return nil } func (*OnlineHistoryMongoConsumerHandler) Cleanup(_ sarama.ConsumerGroupSession) error { return nil }
func (mc *OnlineHistoryMongoConsumerHandler) ConsumeClaim( func (mc *OnlineHistoryMongoConsumerHandler) ConsumeClaim(
sess sarama.ConsumerGroupSession, sess sarama.ConsumerGroupSession,
claim sarama.ConsumerGroupClaim, claim sarama.ConsumerGroupClaim,
) error { // a instance in the consumer group ) error { // an instance in the consumer group
log.ZDebug(context.Background(), "online new session msg come", "highWaterMarkOffset", log.ZDebug(context.Background(), "online new session msg come", "highWaterMarkOffset",
claim.HighWaterMarkOffset(), "topic", claim.Topic(), "partition", claim.Partition()) claim.HighWaterMarkOffset(), "topic", claim.Topic(), "partition", claim.Partition())
for msg := range claim.Messages() { for msg := range claim.Messages() {

@ -0,0 +1,29 @@
package push
import (
"github.com/openimsdk/protocol/sdkws"
"testing"
)
func TestName(t *testing.T) {
var c ConsumerHandler
c.readCh = make(chan *sdkws.MarkAsReadTips)
go c.loopRead()
go func() {
for i := 0; ; i++ {
seq := int64(i + 1)
if seq%3 == 0 {
seq = 1
}
c.readCh <- &sdkws.MarkAsReadTips{
ConversationID: "c100",
MarkAsReadUserID: "u100",
HasReadSeq: seq,
}
}
}()
select {}
}

@ -17,6 +17,7 @@ package dummy
import ( import (
"context" "context"
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options" "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options"
"github.com/openimsdk/tools/log"
) )
func NewClient() *Dummy { func NewClient() *Dummy {
@ -27,5 +28,6 @@ type Dummy struct {
} }
func (d *Dummy) Push(ctx context.Context, userIDs []string, title, content string, opts *options.Opts) error { func (d *Dummy) Push(ctx context.Context, userIDs []string, title, content string, opts *options.Opts) error {
log.ZDebug(ctx, "dummy push")
return nil return nil
} }

@ -18,11 +18,11 @@ import (
"context" "context"
"crypto/sha256" "crypto/sha256"
"encoding/hex" "encoding/hex"
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options"
"strconv" "strconv"
"sync" "sync"
"time" "time"
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/errs"
@ -91,6 +91,15 @@ func (g *Client) Push(ctx context.Context, userIDs []string, title, content stri
for i, v := range s.GetSplitResult() { for i, v := range s.GetSplitResult() {
go func(index int, userIDs []string) { go func(index int, userIDs []string) {
defer wg.Done() defer wg.Done()
for i := 0; i < len(userIDs); i += maxNum {
end := i + maxNum
if end > len(userIDs) {
end = len(userIDs)
}
if err = g.batchPush(ctx, token, userIDs[i:end], pushReq); err != nil {
log.ZError(ctx, "batchPush failed", err, "index", index, "token", token, "req", pushReq)
}
}
if err = g.batchPush(ctx, token, userIDs, pushReq); err != nil { if err = g.batchPush(ctx, token, userIDs, pushReq); err != nil {
log.ZError(ctx, "batchPush failed", err, "index", index, "token", token, "req", pushReq) log.ZError(ctx, "batchPush failed", err, "index", index, "token", token, "req", pushReq)
} }

@ -0,0 +1,122 @@
package push
import (
"context"
"github.com/IBM/sarama"
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush"
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options"
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
"github.com/openimsdk/protocol/constant"
pbpush "github.com/openimsdk/protocol/push"
"github.com/openimsdk/protocol/sdkws"
"github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/mq/kafka"
"github.com/openimsdk/tools/utils/jsonutil"
"google.golang.org/protobuf/proto"
)
type OfflinePushConsumerHandler struct {
OfflinePushConsumerGroup *kafka.MConsumerGroup
offlinePusher offlinepush.OfflinePusher
}
func NewOfflinePushConsumerHandler(config *Config, offlinePusher offlinepush.OfflinePusher) (*OfflinePushConsumerHandler, error) {
var offlinePushConsumerHandler OfflinePushConsumerHandler
var err error
offlinePushConsumerHandler.offlinePusher = offlinePusher
offlinePushConsumerHandler.OfflinePushConsumerGroup, err = kafka.NewMConsumerGroup(config.KafkaConfig.Build(), config.KafkaConfig.ToOfflineGroupID,
[]string{config.KafkaConfig.ToOfflinePushTopic}, true)
if err != nil {
return nil, err
}
return &offlinePushConsumerHandler, nil
}
func (*OfflinePushConsumerHandler) Setup(sarama.ConsumerGroupSession) error { return nil }
func (*OfflinePushConsumerHandler) Cleanup(sarama.ConsumerGroupSession) error { return nil }
func (o *OfflinePushConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error {
for msg := range claim.Messages() {
ctx := o.OfflinePushConsumerGroup.GetContextFromMsg(msg)
o.handleMsg2OfflinePush(ctx, msg.Value)
sess.MarkMessage(msg, "")
}
return nil
}
func (o *OfflinePushConsumerHandler) handleMsg2OfflinePush(ctx context.Context, msg []byte) {
offlinePushMsg := pbpush.PushMsgReq{}
if err := proto.Unmarshal(msg, &offlinePushMsg); err != nil {
log.ZError(ctx, "offline push Unmarshal msg err", err, "msg", string(msg))
return
}
if offlinePushMsg.MsgData == nil || offlinePushMsg.UserIDs == nil {
log.ZError(ctx, "offline push msg is empty", errs.New("offlinePushMsg is empty"), "userIDs", offlinePushMsg.UserIDs, "msg", offlinePushMsg.MsgData)
return
}
log.ZInfo(ctx, "receive to OfflinePush MQ", "userIDs", offlinePushMsg.UserIDs, "msg", offlinePushMsg.MsgData)
err := o.offlinePushMsg(ctx, offlinePushMsg.MsgData, offlinePushMsg.UserIDs)
if err != nil {
log.ZWarn(ctx, "offline push failed", err, "msg", offlinePushMsg.String())
}
}
func (o *OfflinePushConsumerHandler) getOfflinePushInfos(msg *sdkws.MsgData) (title, content string, opts *options.Opts, err error) {
type AtTextElem struct {
Text string `json:"text,omitempty"`
AtUserList []string `json:"atUserList,omitempty"`
IsAtSelf bool `json:"isAtSelf"`
}
opts = &options.Opts{Signal: &options.Signal{}}
if msg.OfflinePushInfo != nil {
opts.IOSBadgeCount = msg.OfflinePushInfo.IOSBadgeCount
opts.IOSPushSound = msg.OfflinePushInfo.IOSPushSound
opts.Ex = msg.OfflinePushInfo.Ex
}
if msg.OfflinePushInfo != nil {
title = msg.OfflinePushInfo.Title
content = msg.OfflinePushInfo.Desc
}
if title == "" {
switch msg.ContentType {
case constant.Text:
fallthrough
case constant.Picture:
fallthrough
case constant.Voice:
fallthrough
case constant.Video:
fallthrough
case constant.File:
title = constant.ContentType2PushContent[int64(msg.ContentType)]
case constant.AtText:
ac := AtTextElem{}
_ = jsonutil.JsonStringToStruct(string(msg.Content), &ac)
case constant.SignalingNotification:
title = constant.ContentType2PushContent[constant.SignalMsg]
default:
title = constant.ContentType2PushContent[constant.Common]
}
}
if content == "" {
content = title
}
return
}
func (o *OfflinePushConsumerHandler) offlinePushMsg(ctx context.Context, msg *sdkws.MsgData, offlinePushUserIDs []string) error {
title, content, opts, err := o.getOfflinePushInfos(msg)
if err != nil {
return err
}
err = o.offlinePusher.Push(ctx, offlinePushUserIDs, title, content, opts)
if err != nil {
prommetrics.MsgOfflinePushFailedCounter.Inc()
return err
}
return nil
}

@ -19,20 +19,20 @@ type OnlinePusher interface {
pushToUserIDs *[]string) []string pushToUserIDs *[]string) []string
} }
type emptyOnlinePUsher struct{} type emptyOnlinePusher struct{}
func newEmptyOnlinePUsher() *emptyOnlinePUsher { func newEmptyOnlinePusher() *emptyOnlinePusher {
return &emptyOnlinePUsher{} return &emptyOnlinePusher{}
} }
func (emptyOnlinePUsher) GetConnsAndOnlinePush(ctx context.Context, msg *sdkws.MsgData, func (emptyOnlinePusher) GetConnsAndOnlinePush(ctx context.Context, msg *sdkws.MsgData,
pushToUserIDs []string) (wsResults []*msggateway.SingleMsgToUserResults, err error) { pushToUserIDs []string) (wsResults []*msggateway.SingleMsgToUserResults, err error) {
log.ZWarn(ctx, "emptyOnlinePUsher GetConnsAndOnlinePush", nil) log.ZInfo(ctx, "emptyOnlinePusher GetConnsAndOnlinePush", nil)
return nil, nil return nil, nil
} }
func (u emptyOnlinePUsher) GetOnlinePushFailedUserIDs(ctx context.Context, msg *sdkws.MsgData, func (u emptyOnlinePusher) GetOnlinePushFailedUserIDs(ctx context.Context, msg *sdkws.MsgData,
wsResults []*msggateway.SingleMsgToUserResults, pushToUserIDs *[]string) []string { wsResults []*msggateway.SingleMsgToUserResults, pushToUserIDs *[]string) []string {
log.ZWarn(ctx, "emptyOnlinePUsher GetOnlinePushFailedUserIDs", nil) log.ZInfo(ctx, "emptyOnlinePusher GetOnlinePushFailedUserIDs", nil)
return nil return nil
} }
@ -45,7 +45,7 @@ func NewOnlinePusher(disCov discovery.SvcDiscoveryRegistry, config *Config) Onli
case "etcd": case "etcd":
return NewDefaultAllNode(disCov, config) return NewDefaultAllNode(disCov, config)
default: default:
return newEmptyOnlinePUsher() return newEmptyOnlinePusher()
} }
} }

@ -2,6 +2,7 @@ package push
import ( import (
"context" "context"
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush" "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
@ -17,12 +18,12 @@ type pushServer struct {
disCov discovery.SvcDiscoveryRegistry disCov discovery.SvcDiscoveryRegistry
offlinePusher offlinepush.OfflinePusher offlinePusher offlinepush.OfflinePusher
pushCh *ConsumerHandler pushCh *ConsumerHandler
offlinePushCh *OfflinePushConsumerHandler
} }
type Config struct { type Config struct {
RpcConfig config.Push RpcConfig config.Push
RedisConfig config.Redis RedisConfig config.Redis
MongodbConfig config.Mongo
KafkaConfig config.Kafka KafkaConfig config.Kafka
NotificationConfig config.Notification NotificationConfig config.Notification
Share config.Share Share config.Share
@ -55,18 +56,30 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
if err != nil { if err != nil {
return err return err
} }
database := controller.NewPushDatabase(cacheModel)
consumer, err := NewConsumerHandler(config, offlinePusher, rdb, client) database := controller.NewPushDatabase(cacheModel, &config.KafkaConfig)
consumer, err := NewConsumerHandler(config, database, offlinePusher, rdb, client)
if err != nil {
return err
}
offlinePushConsumer, err := NewOfflinePushConsumerHandler(config, offlinePusher)
if err != nil { if err != nil {
return err return err
} }
pbpush.RegisterPushMsgServiceServer(server, &pushServer{ pbpush.RegisterPushMsgServiceServer(server, &pushServer{
database: database, database: database,
disCov: client, disCov: client,
offlinePusher: offlinePusher, offlinePusher: offlinePusher,
pushCh: consumer, pushCh: consumer,
offlinePushCh: offlinePushConsumer,
}) })
go consumer.pushConsumerGroup.RegisterHandleAndConsumer(ctx, consumer) go consumer.pushConsumerGroup.RegisterHandleAndConsumer(ctx, consumer)
go offlinePushConsumer.OfflinePushConsumerGroup.RegisterHandleAndConsumer(ctx, offlinePushConsumer)
return nil return nil
} }

@ -1,33 +1,20 @@
// Copyright © 2023 OpenIM. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package push package push
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"github.com/IBM/sarama" "github.com/IBM/sarama"
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush" "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush"
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options" "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options"
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics" "github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
"github.com/openimsdk/open-im-server/v3/pkg/common/webhook" "github.com/openimsdk/open-im-server/v3/pkg/common/webhook"
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor" "github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
"github.com/openimsdk/open-im-server/v3/pkg/rpccache" "github.com/openimsdk/open-im-server/v3/pkg/rpccache"
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient" "github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
"github.com/openimsdk/open-im-server/v3/pkg/util/conversationutil" "github.com/openimsdk/open-im-server/v3/pkg/util/conversationutil"
"github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/constant"
pbchat "github.com/openimsdk/protocol/msg"
"github.com/openimsdk/protocol/msggateway" "github.com/openimsdk/protocol/msggateway"
pbpush "github.com/openimsdk/protocol/push" pbpush "github.com/openimsdk/protocol/push"
"github.com/openimsdk/protocol/sdkws" "github.com/openimsdk/protocol/sdkws"
@ -40,12 +27,16 @@ import (
"github.com/openimsdk/tools/utils/timeutil" "github.com/openimsdk/tools/utils/timeutil"
"github.com/redis/go-redis/v9" "github.com/redis/go-redis/v9"
"google.golang.org/protobuf/proto" "google.golang.org/protobuf/proto"
"math/rand"
"strconv"
"time"
) )
type ConsumerHandler struct { type ConsumerHandler struct {
pushConsumerGroup *kafka.MConsumerGroup pushConsumerGroup *kafka.MConsumerGroup
offlinePusher offlinepush.OfflinePusher offlinePusher offlinepush.OfflinePusher
onlinePusher OnlinePusher onlinePusher OnlinePusher
pushDatabase controller.PushDatabase
onlineCache *rpccache.OnlineCache onlineCache *rpccache.OnlineCache
groupLocalCache *rpccache.GroupLocalCache groupLocalCache *rpccache.GroupLocalCache
conversationLocalCache *rpccache.ConversationLocalCache conversationLocalCache *rpccache.ConversationLocalCache
@ -56,7 +47,7 @@ type ConsumerHandler struct {
config *Config config *Config
} }
func NewConsumerHandler(config *Config, offlinePusher offlinepush.OfflinePusher, rdb redis.UniversalClient, func NewConsumerHandler(config *Config, database controller.PushDatabase, offlinePusher offlinepush.OfflinePusher, rdb redis.UniversalClient,
client discovery.SvcDiscoveryRegistry) (*ConsumerHandler, error) { client discovery.SvcDiscoveryRegistry) (*ConsumerHandler, error) {
var consumerHandler ConsumerHandler var consumerHandler ConsumerHandler
var err error var err error
@ -65,7 +56,9 @@ func NewConsumerHandler(config *Config, offlinePusher offlinepush.OfflinePusher,
if err != nil { if err != nil {
return nil, err return nil, err
} }
userRpcClient := rpcclient.NewUserRpcClient(client, config.Share.RpcRegisterName.User, config.Share.IMAdminUserID) userRpcClient := rpcclient.NewUserRpcClient(client, config.Share.RpcRegisterName.User, config.Share.IMAdminUserID)
consumerHandler.offlinePusher = offlinePusher consumerHandler.offlinePusher = offlinePusher
consumerHandler.onlinePusher = NewOnlinePusher(client, config) consumerHandler.onlinePusher = NewOnlinePusher(client, config)
consumerHandler.groupRpcClient = rpcclient.NewGroupRpcClient(client, config.Share.RpcRegisterName.Group) consumerHandler.groupRpcClient = rpcclient.NewGroupRpcClient(client, config.Share.RpcRegisterName.Group)
@ -75,42 +68,45 @@ func NewConsumerHandler(config *Config, offlinePusher offlinepush.OfflinePusher,
consumerHandler.conversationLocalCache = rpccache.NewConversationLocalCache(consumerHandler.conversationRpcClient, &config.LocalCacheConfig, rdb) consumerHandler.conversationLocalCache = rpccache.NewConversationLocalCache(consumerHandler.conversationRpcClient, &config.LocalCacheConfig, rdb)
consumerHandler.webhookClient = webhook.NewWebhookClient(config.WebhooksConfig.URL) consumerHandler.webhookClient = webhook.NewWebhookClient(config.WebhooksConfig.URL)
consumerHandler.config = config consumerHandler.config = config
consumerHandler.onlineCache = rpccache.NewOnlineCache(userRpcClient, consumerHandler.groupLocalCache, rdb, nil) consumerHandler.pushDatabase = database
consumerHandler.onlineCache, err = rpccache.NewOnlineCache(userRpcClient, consumerHandler.groupLocalCache, rdb, config.RpcConfig.FullUserCache, nil)
if err != nil {
return nil, err
}
return &consumerHandler, nil return &consumerHandler, nil
} }
func (c *ConsumerHandler) handleMs2PsChat(ctx context.Context, msg []byte) { func (c *ConsumerHandler) handleMs2PsChat(ctx context.Context, msg []byte) {
msgFromMQ := pbchat.PushMsgDataToMQ{} msgFromMQ := pbpush.PushMsgReq{}
if err := proto.Unmarshal(msg, &msgFromMQ); err != nil { if err := proto.Unmarshal(msg, &msgFromMQ); err != nil {
log.ZError(ctx, "push Unmarshal msg err", err, "msg", string(msg)) log.ZError(ctx, "push Unmarshal msg err", err, "msg", string(msg))
return return
} }
pbData := &pbpush.PushMsgReq{
MsgData: msgFromMQ.MsgData,
ConversationID: msgFromMQ.ConversationID,
}
sec := msgFromMQ.MsgData.SendTime / 1000 sec := msgFromMQ.MsgData.SendTime / 1000
nowSec := timeutil.GetCurrentTimestampBySecond() nowSec := timeutil.GetCurrentTimestampBySecond()
log.ZDebug(ctx, "push msg", "msg", pbData.String(), "sec", sec, "nowSec", nowSec)
if nowSec-sec > 10 { if nowSec-sec > 10 {
return prommetrics.MsgLoneTimePushCounter.Inc()
log.ZWarn(ctx, "its been a while since the message was sent", nil, "msg", msgFromMQ.String(), "sec", sec, "nowSec", nowSec, "nowSec-sec", nowSec-sec)
} }
var err error var err error
switch msgFromMQ.MsgData.SessionType { switch msgFromMQ.MsgData.SessionType {
case constant.ReadGroupChatType: case constant.ReadGroupChatType:
err = c.Push2Group(ctx, pbData.MsgData.GroupID, pbData.MsgData) err = c.Push2Group(ctx, msgFromMQ.MsgData.GroupID, msgFromMQ.MsgData)
default: default:
var pushUserIDList []string var pushUserIDList []string
isSenderSync := datautil.GetSwitchFromOptions(pbData.MsgData.Options, constant.IsSenderSync) isSenderSync := datautil.GetSwitchFromOptions(msgFromMQ.MsgData.Options, constant.IsSenderSync)
if !isSenderSync || pbData.MsgData.SendID == pbData.MsgData.RecvID { if !isSenderSync || msgFromMQ.MsgData.SendID == msgFromMQ.MsgData.RecvID {
pushUserIDList = append(pushUserIDList, pbData.MsgData.RecvID) pushUserIDList = append(pushUserIDList, msgFromMQ.MsgData.RecvID)
} else { } else {
pushUserIDList = append(pushUserIDList, pbData.MsgData.RecvID, pbData.MsgData.SendID) pushUserIDList = append(pushUserIDList, msgFromMQ.MsgData.RecvID, msgFromMQ.MsgData.SendID)
} }
err = c.Push2User(ctx, pushUserIDList, pbData.MsgData) err = c.Push2User(ctx, pushUserIDList, msgFromMQ.MsgData)
} }
if err != nil { if err != nil {
log.ZWarn(ctx, "push failed", err, "msg", pbData.String()) log.ZWarn(ctx, "push failed", err, "msg", msgFromMQ.String())
} }
} }
@ -119,6 +115,14 @@ func (*ConsumerHandler) Setup(sarama.ConsumerGroupSession) error { return nil }
func (*ConsumerHandler) Cleanup(sarama.ConsumerGroupSession) error { return nil } func (*ConsumerHandler) Cleanup(sarama.ConsumerGroupSession) error { return nil }
func (c *ConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error { func (c *ConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error {
c.onlineCache.Lock.Lock()
for c.onlineCache.CurrentPhase.Load() < rpccache.DoSubscribeOver {
c.onlineCache.Cond.Wait()
}
c.onlineCache.Lock.Unlock()
ctx := mcontext.SetOperationID(context.TODO(), strconv.FormatInt(time.Now().UnixNano()+int64(rand.Uint32()), 10))
log.ZInfo(ctx, "begin consume messages")
for msg := range claim.Messages() { for msg := range claim.Messages() {
ctx := c.pushConsumerGroup.GetContextFromMsg(msg) ctx := c.pushConsumerGroup.GetContextFromMsg(msg)
c.handleMs2PsChat(ctx, msg.Value) c.handleMs2PsChat(ctx, msg.Value)
@ -129,20 +133,27 @@ func (c *ConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSession, claim s
// Push2User Suitable for two types of conversations, one is SingleChatType and the other is NotificationChatType. // Push2User Suitable for two types of conversations, one is SingleChatType and the other is NotificationChatType.
func (c *ConsumerHandler) Push2User(ctx context.Context, userIDs []string, msg *sdkws.MsgData) (err error) { func (c *ConsumerHandler) Push2User(ctx context.Context, userIDs []string, msg *sdkws.MsgData) (err error) {
log.ZDebug(ctx, "Get msg from msg_transfer And push msg", "userIDs", userIDs, "msg", msg.String()) log.ZInfo(ctx, "Get msg from msg_transfer And push msg", "userIDs", userIDs, "msg", msg.String())
defer func(duration time.Time) {
t := time.Since(duration)
log.ZInfo(ctx, "Get msg from msg_transfer And push msg", "msg", msg.String(), "time cost", t)
}(time.Now())
if err := c.webhookBeforeOnlinePush(ctx, &c.config.WebhooksConfig.BeforeOnlinePush, userIDs, msg); err != nil { if err := c.webhookBeforeOnlinePush(ctx, &c.config.WebhooksConfig.BeforeOnlinePush, userIDs, msg); err != nil {
return err return err
} }
log.ZInfo(ctx, "webhookBeforeOnlinePush end")
wsResults, err := c.GetConnsAndOnlinePush(ctx, msg, userIDs) wsResults, err := c.GetConnsAndOnlinePush(ctx, msg, userIDs)
if err != nil { if err != nil {
return err return err
} }
log.ZDebug(ctx, "single and notification push result", "result", wsResults, "msg", msg, "push_to_userID", userIDs) log.ZInfo(ctx, "single and notification push result", "result", wsResults, "msg", msg, "push_to_userID", userIDs)
if !c.shouldPushOffline(ctx, msg) { if !c.shouldPushOffline(ctx, msg) {
return nil return nil
} }
log.ZInfo(ctx, "shouldPushOffline end")
for _, v := range wsResults { for _, v := range wsResults {
//message sender do not need offline push //message sender do not need offline push
@ -154,17 +165,17 @@ func (c *ConsumerHandler) Push2User(ctx context.Context, userIDs []string, msg *
return nil return nil
} }
} }
offlinePUshUserID := []string{msg.RecvID} offlinePushUserID := []string{msg.RecvID}
//receiver offline push //receiver offline push
if err = c.webhookBeforeOfflinePush(ctx, &c.config.WebhooksConfig.BeforeOfflinePush, if err = c.webhookBeforeOfflinePush(ctx, &c.config.WebhooksConfig.BeforeOfflinePush,
offlinePUshUserID, msg, nil); err != nil { offlinePushUserID, msg, nil); err != nil {
return err return err
} }
log.ZInfo(ctx, "webhookBeforeOfflinePush end")
err = c.offlinePushMsg(ctx, msg, offlinePUshUserID) err = c.offlinePushMsg(ctx, msg, offlinePushUserID)
if err != nil { if err != nil {
log.ZWarn(ctx, "offlinePushMsg failed", err, "offlinePUshUserID", offlinePUshUserID, "msg", msg) log.ZWarn(ctx, "offlinePushMsg failed", err, "offlinePushUserID", offlinePushUserID, "msg", msg)
return nil return nil
} }
@ -183,21 +194,11 @@ func (c *ConsumerHandler) shouldPushOffline(_ context.Context, msg *sdkws.MsgDat
} }
func (c *ConsumerHandler) GetConnsAndOnlinePush(ctx context.Context, msg *sdkws.MsgData, pushToUserIDs []string) ([]*msggateway.SingleMsgToUserResults, error) { func (c *ConsumerHandler) GetConnsAndOnlinePush(ctx context.Context, msg *sdkws.MsgData, pushToUserIDs []string) ([]*msggateway.SingleMsgToUserResults, error) {
var ( onlineUserIDs, offlineUserIDs, err := c.onlineCache.GetUsersOnline(ctx, pushToUserIDs)
onlineUserIDs []string if err != nil {
offlineUserIDs []string return nil, err
)
for _, userID := range pushToUserIDs {
online, err := c.onlineCache.GetUserOnline(ctx, userID)
if err != nil {
return nil, err
}
if online {
onlineUserIDs = append(onlineUserIDs, userID)
} else {
offlineUserIDs = append(offlineUserIDs, userID)
}
} }
log.ZDebug(ctx, "GetConnsAndOnlinePush online cache", "sendID", msg.SendID, "recvID", msg.RecvID, "groupID", msg.GroupID, "sessionType", msg.SessionType, "clientMsgID", msg.ClientMsgID, "serverMsgID", msg.ServerMsgID, "offlineUserIDs", offlineUserIDs, "onlineUserIDs", onlineUserIDs) log.ZDebug(ctx, "GetConnsAndOnlinePush online cache", "sendID", msg.SendID, "recvID", msg.RecvID, "groupID", msg.GroupID, "sessionType", msg.SessionType, "clientMsgID", msg.ClientMsgID, "serverMsgID", msg.ServerMsgID, "offlineUserIDs", offlineUserIDs, "onlineUserIDs", onlineUserIDs)
var result []*msggateway.SingleMsgToUserResults var result []*msggateway.SingleMsgToUserResults
if len(onlineUserIDs) > 0 { if len(onlineUserIDs) > 0 {
@ -216,57 +217,70 @@ func (c *ConsumerHandler) GetConnsAndOnlinePush(ctx context.Context, msg *sdkws.
} }
func (c *ConsumerHandler) Push2Group(ctx context.Context, groupID string, msg *sdkws.MsgData) (err error) { func (c *ConsumerHandler) Push2Group(ctx context.Context, groupID string, msg *sdkws.MsgData) (err error) {
log.ZDebug(ctx, "Get group msg from msg_transfer and push msg", "msg", msg.String(), "groupID", groupID) log.ZInfo(ctx, "Get group msg from msg_transfer and push msg", "msg", msg.String(), "groupID", groupID)
defer func(duration time.Time) {
t := time.Since(duration)
log.ZInfo(ctx, "Get group msg from msg_transfer and push msg end", "msg", msg.String(), "groupID", groupID, "time cost", t)
}(time.Now())
var pushToUserIDs []string var pushToUserIDs []string
if err = c.webhookBeforeGroupOnlinePush(ctx, &c.config.WebhooksConfig.BeforeGroupOnlinePush, groupID, msg, if err = c.webhookBeforeGroupOnlinePush(ctx, &c.config.WebhooksConfig.BeforeGroupOnlinePush, groupID, msg,
&pushToUserIDs); err != nil { &pushToUserIDs); err != nil {
return err return err
} }
log.ZInfo(ctx, "webhookBeforeGroupOnlinePush end")
err = c.groupMessagesHandler(ctx, groupID, &pushToUserIDs, msg) err = c.groupMessagesHandler(ctx, groupID, &pushToUserIDs, msg)
if err != nil { if err != nil {
return err return err
} }
log.ZInfo(ctx, "groupMessagesHandler end")
wsResults, err := c.GetConnsAndOnlinePush(ctx, msg, pushToUserIDs) wsResults, err := c.GetConnsAndOnlinePush(ctx, msg, pushToUserIDs)
if err != nil { if err != nil {
return err return err
} }
log.ZDebug(ctx, "group push result", "result", wsResults, "msg", msg) log.ZInfo(ctx, "group push result", "result", wsResults, "msg", msg)
if !c.shouldPushOffline(ctx, msg) { if !c.shouldPushOffline(ctx, msg) {
return nil return nil
} }
needOfflinePushUserIDs := c.onlinePusher.GetOnlinePushFailedUserIDs(ctx, msg, wsResults, &pushToUserIDs) needOfflinePushUserIDs := c.onlinePusher.GetOnlinePushFailedUserIDs(ctx, msg, wsResults, &pushToUserIDs)
log.ZInfo(ctx, "GetOnlinePushFailedUserIDs end")
//filter some user, like don not disturb or don't need offline push etc. //filter some user, like don not disturb or don't need offline push etc.
needOfflinePushUserIDs, err = c.filterGroupMessageOfflinePush(ctx, groupID, msg, needOfflinePushUserIDs) needOfflinePushUserIDs, err = c.filterGroupMessageOfflinePush(ctx, groupID, msg, needOfflinePushUserIDs)
if err != nil { if err != nil {
return err return err
} }
log.ZInfo(ctx, "filterGroupMessageOfflinePush end")
// Use offline push messaging // Use offline push messaging
if len(needOfflinePushUserIDs) > 0 { if len(needOfflinePushUserIDs) > 0 {
var offlinePushUserIDs []string c.asyncOfflinePush(ctx, needOfflinePushUserIDs, msg)
err = c.webhookBeforeOfflinePush(ctx, &c.config.WebhooksConfig.BeforeOfflinePush, needOfflinePushUserIDs, msg, &offlinePushUserIDs) }
if err != nil {
return err
}
if len(offlinePushUserIDs) > 0 {
needOfflinePushUserIDs = offlinePushUserIDs
}
err = c.offlinePushMsg(ctx, msg, needOfflinePushUserIDs) return nil
if err != nil { }
log.ZWarn(ctx, "offlinePushMsg failed", err, "groupID", groupID, "msg", msg)
return nil
}
func (c *ConsumerHandler) asyncOfflinePush(ctx context.Context, needOfflinePushUserIDs []string, msg *sdkws.MsgData) {
var offlinePushUserIDs []string
err := c.webhookBeforeOfflinePush(ctx, &c.config.WebhooksConfig.BeforeOfflinePush, needOfflinePushUserIDs, msg, &offlinePushUserIDs)
if err != nil {
log.ZWarn(ctx, "webhookBeforeOfflinePush failed", err, "msg", msg)
return
} }
return nil if len(offlinePushUserIDs) > 0 {
needOfflinePushUserIDs = offlinePushUserIDs
}
if err := c.pushDatabase.MsgToOfflinePushMQ(ctx, conversationutil.GenConversationUniqueKeyForSingle(msg.SendID, msg.RecvID), needOfflinePushUserIDs, msg); err != nil {
log.ZError(ctx, "Msg To OfflinePush MQ error", err, "needOfflinePushUserIDs",
needOfflinePushUserIDs, "msg", msg)
prommetrics.SingleChatMsgProcessFailedCounter.Inc()
return
}
} }
func (c *ConsumerHandler) groupMessagesHandler(ctx context.Context, groupID string, pushToUserIDs *[]string, msg *sdkws.MsgData) (err error) { func (c *ConsumerHandler) groupMessagesHandler(ctx context.Context, groupID string, pushToUserIDs *[]string, msg *sdkws.MsgData) (err error) {
if len(*pushToUserIDs) == 0 { if len(*pushToUserIDs) == 0 {
*pushToUserIDs, err = c.groupLocalCache.GetGroupMemberIDs(ctx, groupID) *pushToUserIDs, err = c.groupLocalCache.GetGroupMemberIDs(ctx, groupID)
@ -300,7 +314,7 @@ func (c *ConsumerHandler) groupMessagesHandler(ctx context.Context, groupID stri
if unmarshalNotificationElem(msg.Content, &tips) != nil { if unmarshalNotificationElem(msg.Content, &tips) != nil {
return err return err
} }
log.ZInfo(ctx, "GroupDismissedNotificationInfo****", "groupID", groupID, "num", len(*pushToUserIDs), "list", pushToUserIDs) log.ZDebug(ctx, "GroupDismissedNotificationInfo****", "groupID", groupID, "num", len(*pushToUserIDs), "list", pushToUserIDs)
if len(c.config.Share.IMAdminUserID) > 0 { if len(c.config.Share.IMAdminUserID) > 0 {
ctx = mcontext.WithOpUserIDContext(ctx, c.config.Share.IMAdminUserID[0]) ctx = mcontext.WithOpUserIDContext(ctx, c.config.Share.IMAdminUserID[0])
} }
@ -384,6 +398,7 @@ func (c *ConsumerHandler) getOfflinePushInfos(msg *sdkws.MsgData) (title, conten
} }
return return
} }
func (c *ConsumerHandler) DeleteMemberAndSetConversationSeq(ctx context.Context, groupID string, userIDs []string) error { func (c *ConsumerHandler) DeleteMemberAndSetConversationSeq(ctx context.Context, groupID string, userIDs []string) error {
conversationID := msgprocessor.GetConversationIDBySessionType(constant.ReadGroupChatType, groupID) conversationID := msgprocessor.GetConversationIDBySessionType(constant.ReadGroupChatType, groupID)
maxSeq, err := c.msgRpcClient.GetConversationMaxSeq(ctx, conversationID) maxSeq, err := c.msgRpcClient.GetConversationMaxSeq(ctx, conversationID)
@ -392,6 +407,7 @@ func (c *ConsumerHandler) DeleteMemberAndSetConversationSeq(ctx context.Context,
} }
return c.conversationRpcClient.SetConversationMaxSeq(ctx, userIDs, conversationID, maxSeq) return c.conversationRpcClient.SetConversationMaxSeq(ctx, userIDs, conversationID, maxSeq)
} }
func unmarshalNotificationElem(bytes []byte, t any) error { func unmarshalNotificationElem(bytes []byte, t any) error {
var notification sdkws.NotificationElem var notification sdkws.NotificationElem
if err := json.Unmarshal(bytes, &notification); err != nil { if err := json.Unmarshal(bytes, &notification); err != nil {

@ -221,11 +221,11 @@ func (c *conversationServer) SetConversation(ctx context.Context, req *pbconvers
return resp, nil return resp, nil
} }
// nolint
func (c *conversationServer) SetConversations(ctx context.Context, req *pbconversation.SetConversationsReq) (*pbconversation.SetConversationsResp, error) { func (c *conversationServer) SetConversations(ctx context.Context, req *pbconversation.SetConversationsReq) (*pbconversation.SetConversationsResp, error) {
if req.Conversation == nil { if req.Conversation == nil {
return nil, errs.ErrArgs.WrapMsg("conversation must not be nil") return nil, errs.ErrArgs.WrapMsg("conversation must not be nil")
} }
if req.Conversation.ConversationType == constant.WriteGroupChatType { if req.Conversation.ConversationType == constant.WriteGroupChatType {
groupInfo, err := c.groupRpcClient.GetGroupInfo(ctx, req.Conversation.GroupID) groupInfo, err := c.groupRpcClient.GetGroupInfo(ctx, req.Conversation.GroupID)
if err != nil { if err != nil {
@ -235,98 +235,141 @@ func (c *conversationServer) SetConversations(ctx context.Context, req *pbconver
return nil, servererrs.ErrDismissedAlready.WrapMsg("group dismissed") return nil, servererrs.ErrDismissedAlready.WrapMsg("group dismissed")
} }
} }
var unequal int
var conv dbModel.Conversation conversationMap := make(map[string]*dbModel.Conversation)
if len(req.UserIDs) == 1 { var needUpdateUsersList []string
cs, err := c.conversationDatabase.FindConversations(ctx, req.UserIDs[0], []string{req.Conversation.ConversationID})
for _, userID := range req.UserIDs {
conversationList, err := c.conversationDatabase.FindConversations(ctx, userID, []string{req.Conversation.ConversationID})
if err != nil { if err != nil {
return nil, err return nil, err
} }
if len(cs) == 0 { if len(conversationList) != 0 {
return nil, errs.ErrRecordNotFound.WrapMsg("conversation not found") conversationMap[userID] = conversationList[0]
} else {
needUpdateUsersList = append(needUpdateUsersList, userID)
} }
conv = *cs[0]
} }
var conversation dbModel.Conversation var conversation dbModel.Conversation
conversation.ConversationID = req.Conversation.ConversationID conversation.ConversationID = req.Conversation.ConversationID
conversation.ConversationType = req.Conversation.ConversationType conversation.ConversationType = req.Conversation.ConversationType
conversation.UserID = req.Conversation.UserID conversation.UserID = req.Conversation.UserID
conversation.GroupID = req.Conversation.GroupID conversation.GroupID = req.Conversation.GroupID
m := make(map[string]any) m := make(map[string]any)
if req.Conversation.RecvMsgOpt != nil {
m["recv_msg_opt"] = req.Conversation.RecvMsgOpt.Value setConversationFieldsFunc := func() {
if req.Conversation.RecvMsgOpt.Value != conv.RecvMsgOpt { if req.Conversation.RecvMsgOpt != nil {
unequal++ m["recv_msg_opt"] = req.Conversation.RecvMsgOpt.Value
} }
} if req.Conversation.AttachedInfo != nil {
if req.Conversation.AttachedInfo != nil { m["attached_info"] = req.Conversation.AttachedInfo.Value
m["attached_info"] = req.Conversation.AttachedInfo.Value
if req.Conversation.AttachedInfo.Value != conv.AttachedInfo {
unequal++
} }
} if req.Conversation.Ex != nil {
if req.Conversation.Ex != nil { m["ex"] = req.Conversation.Ex.Value
m["ex"] = req.Conversation.Ex.Value
if req.Conversation.Ex.Value != conv.Ex {
unequal++
} }
} if req.Conversation.IsPinned != nil {
if req.Conversation.IsPinned != nil { m["is_pinned"] = req.Conversation.IsPinned.Value
m["is_pinned"] = req.Conversation.IsPinned.Value
if req.Conversation.IsPinned.Value != conv.IsPinned {
unequal++
} }
} if req.Conversation.GroupAtType != nil {
if req.Conversation.GroupAtType != nil { m["group_at_type"] = req.Conversation.GroupAtType.Value
m["group_at_type"] = req.Conversation.GroupAtType.Value
if req.Conversation.GroupAtType.Value != conv.GroupAtType {
unequal++
} }
} if req.Conversation.MsgDestructTime != nil {
if req.Conversation.MsgDestructTime != nil { m["msg_destruct_time"] = req.Conversation.MsgDestructTime.Value
m["msg_destruct_time"] = req.Conversation.MsgDestructTime.Value }
if req.Conversation.MsgDestructTime.Value != conv.MsgDestructTime { if req.Conversation.MsgDestructTime != nil {
unequal++ m["msg_destruct_time"] = req.Conversation.MsgDestructTime.Value
}
if req.Conversation.BurnDuration != nil {
m["burn_duration"] = req.Conversation.BurnDuration.Value
} }
} }
if req.Conversation.IsMsgDestruct != nil {
m["is_msg_destruct"] = req.Conversation.IsMsgDestruct.Value // set need set field in conversation
if req.Conversation.IsMsgDestruct.Value != conv.IsMsgDestruct { setConversationFieldsFunc()
unequal++
for userID := range conversationMap {
unequal := len(m)
if req.Conversation.RecvMsgOpt != nil {
if req.Conversation.RecvMsgOpt.Value == conversationMap[userID].RecvMsgOpt {
unequal--
}
}
if req.Conversation.AttachedInfo != nil {
if req.Conversation.AttachedInfo.Value == conversationMap[userID].AttachedInfo {
unequal--
}
}
if req.Conversation.Ex != nil {
if req.Conversation.Ex.Value == conversationMap[userID].Ex {
unequal--
}
}
if req.Conversation.IsPinned != nil {
if req.Conversation.IsPinned.Value == conversationMap[userID].IsPinned {
unequal--
}
}
if req.Conversation.GroupAtType != nil {
if req.Conversation.GroupAtType.Value == conversationMap[userID].GroupAtType {
unequal--
}
}
if req.Conversation.MsgDestructTime != nil {
if req.Conversation.MsgDestructTime.Value == conversationMap[userID].MsgDestructTime {
unequal--
}
}
if req.Conversation.IsMsgDestruct != nil {
if req.Conversation.IsMsgDestruct.Value == conversationMap[userID].IsMsgDestruct {
unequal--
}
}
if req.Conversation.BurnDuration != nil {
if req.Conversation.BurnDuration.Value == conversationMap[userID].BurnDuration {
unequal--
}
}
if unequal > 0 {
needUpdateUsersList = append(needUpdateUsersList, userID)
} }
} }
if req.Conversation.IsPrivateChat != nil && req.Conversation.ConversationType != constant.ReadGroupChatType { if req.Conversation.IsPrivateChat != nil && req.Conversation.ConversationType != constant.ReadGroupChatType {
var conversations []*dbModel.Conversation var conversations []*dbModel.Conversation
for _, ownerUserID := range req.UserIDs { for _, ownerUserID := range req.UserIDs {
conversation2 := conversation transConversation := conversation
conversation2.OwnerUserID = ownerUserID transConversation.OwnerUserID = ownerUserID
conversation2.IsPrivateChat = req.Conversation.IsPrivateChat.Value transConversation.IsPrivateChat = req.Conversation.IsPrivateChat.Value
conversations = append(conversations, &conversation2) conversations = append(conversations, &transConversation)
} }
if err := c.conversationDatabase.SyncPeerUserPrivateConversationTx(ctx, conversations); err != nil { if err := c.conversationDatabase.SyncPeerUserPrivateConversationTx(ctx, conversations); err != nil {
return nil, err return nil, err
} }
for _, userID := range req.UserIDs { for _, userID := range req.UserIDs {
c.conversationNotificationSender.ConversationSetPrivateNotification(ctx, userID, req.Conversation.UserID, c.conversationNotificationSender.ConversationSetPrivateNotification(ctx, userID, req.Conversation.UserID,
req.Conversation.IsPrivateChat.Value, req.Conversation.ConversationID) req.Conversation.IsPrivateChat.Value, req.Conversation.ConversationID)
} }
} } else {
if len(m) != 0 && len(needUpdateUsersList) != 0 {
if req.Conversation.BurnDuration != nil { if err := c.conversationDatabase.SetUsersConversationFieldTx(ctx, needUpdateUsersList, &conversation, m); err != nil {
m["burn_duration"] = req.Conversation.BurnDuration.Value return nil, err
if req.Conversation.BurnDuration.Value != conv.BurnDuration { }
unequal++
}
}
if err := c.conversationDatabase.SetUsersConversationFieldTx(ctx, req.UserIDs, &conversation, m); err != nil {
return nil, err
}
if unequal > 0 { for _, v := range needUpdateUsersList {
for _, v := range req.UserIDs { c.conversationNotificationSender.ConversationChangeNotification(ctx, v, []string{req.Conversation.ConversationID})
c.conversationNotificationSender.ConversationChangeNotification(ctx, v, []string{req.Conversation.ConversationID}) }
} }
} }
@ -392,6 +435,14 @@ func (c *conversationServer) SetConversationMaxSeq(ctx context.Context, req *pbc
return &pbconversation.SetConversationMaxSeqResp{}, nil return &pbconversation.SetConversationMaxSeqResp{}, nil
} }
func (c *conversationServer) SetConversationMinSeq(ctx context.Context, req *pbconversation.SetConversationMinSeqReq) (*pbconversation.SetConversationMinSeqResp, error) {
if err := c.conversationDatabase.UpdateUsersConversationField(ctx, req.OwnerUserID, req.ConversationID,
map[string]any{"min_seq": req.MinSeq}); err != nil {
return nil, err
}
return &pbconversation.SetConversationMinSeqResp{}, nil
}
func (c *conversationServer) GetConversationIDs(ctx context.Context, req *pbconversation.GetConversationIDsReq) (*pbconversation.GetConversationIDsResp, error) { func (c *conversationServer) GetConversationIDs(ctx context.Context, req *pbconversation.GetConversationIDsReq) (*pbconversation.GetConversationIDsResp, error) {
conversationIDs, err := c.conversationDatabase.GetConversationIDs(ctx, req.UserID) conversationIDs, err := c.conversationDatabase.GetConversationIDs(ctx, req.UserID)
if err != nil { if err != nil {
@ -634,11 +685,11 @@ func (c *conversationServer) GetConversationsNeedDestructMsgs(ctx context.Contex
conversationIDs, err := c.conversationDatabase.PageConversationIDs(ctx, pagination) conversationIDs, err := c.conversationDatabase.PageConversationIDs(ctx, pagination)
if err != nil { if err != nil {
log.ZError(ctx, "PageConversationIDs failed", err, "pageNumber", pageNumber) // log.ZError(ctx, "PageConversationIDs failed", err, "pageNumber", pageNumber)
continue continue
} }
log.ZDebug(ctx, "PageConversationIDs success", "pageNumber", pageNumber, "conversationIDsNum", len(conversationIDs), "conversationIDs", conversationIDs) // log.ZDebug(ctx, "PageConversationIDs success", "pageNumber", pageNumber, "conversationIDsNum", len(conversationIDs), "conversationIDs", conversationIDs)
if len(conversationIDs) == 0 { if len(conversationIDs) == 0 {
continue continue
} }

@ -358,3 +358,74 @@ func (s *groupServer) webhookAfterSetGroupInfo(ctx context.Context, after *confi
} }
s.webhookClient.AsyncPost(ctx, cbReq.GetCallbackCommand(), cbReq, &callbackstruct.CallbackAfterSetGroupInfoResp{}, after) s.webhookClient.AsyncPost(ctx, cbReq.GetCallbackCommand(), cbReq, &callbackstruct.CallbackAfterSetGroupInfoResp{}, after)
} }
func (s *groupServer) webhookBeforeSetGroupInfoEX(ctx context.Context, before *config.BeforeConfig, req *group.SetGroupInfoEXReq) error {
return webhook.WithCondition(ctx, before, func(ctx context.Context) error {
cbReq := &callbackstruct.CallbackBeforeSetGroupInfoEXReq{
CallbackCommand: callbackstruct.CallbackBeforeSetGroupInfoCommand,
GroupID: req.GroupInfoForSetEX.GroupID,
GroupName: req.GroupInfoForSetEX.GroupName,
Notification: req.GroupInfoForSetEX.Notification,
Introduction: req.GroupInfoForSetEX.Introduction,
FaceURL: req.GroupInfoForSetEX.FaceURL,
}
if req.GroupInfoForSetEX.Ex != nil {
cbReq.Ex = req.GroupInfoForSetEX.Ex
}
log.ZDebug(ctx, "debug CallbackBeforeSetGroupInfoEX", "ex", cbReq.Ex)
if req.GroupInfoForSetEX.NeedVerification != nil {
cbReq.NeedVerification = req.GroupInfoForSetEX.NeedVerification
}
if req.GroupInfoForSetEX.LookMemberInfo != nil {
cbReq.LookMemberInfo = req.GroupInfoForSetEX.LookMemberInfo
}
if req.GroupInfoForSetEX.ApplyMemberFriend != nil {
cbReq.ApplyMemberFriend = req.GroupInfoForSetEX.ApplyMemberFriend
}
resp := &callbackstruct.CallbackBeforeSetGroupInfoEXResp{}
if err := s.webhookClient.SyncPost(ctx, cbReq.GetCallbackCommand(), cbReq, resp, before); err != nil {
return err
}
datautil.NotNilReplace(&req.GroupInfoForSetEX.GroupID, &resp.GroupID)
datautil.NotNilReplace(&req.GroupInfoForSetEX.GroupName, &resp.GroupName)
datautil.NotNilReplace(&req.GroupInfoForSetEX.FaceURL, &resp.FaceURL)
datautil.NotNilReplace(&req.GroupInfoForSetEX.Introduction, &resp.Introduction)
datautil.NotNilReplace(&req.GroupInfoForSetEX.Ex, &resp.Ex)
datautil.NotNilReplace(&req.GroupInfoForSetEX.NeedVerification, &resp.NeedVerification)
datautil.NotNilReplace(&req.GroupInfoForSetEX.LookMemberInfo, &resp.LookMemberInfo)
datautil.NotNilReplace(&req.GroupInfoForSetEX.ApplyMemberFriend, &resp.ApplyMemberFriend)
return nil
})
}
func (s *groupServer) webhookAfterSetGroupInfoEX(ctx context.Context, after *config.AfterConfig, req *group.SetGroupInfoEXReq) {
cbReq := &callbackstruct.CallbackAfterSetGroupInfoEXReq{
CallbackCommand: callbackstruct.CallbackAfterSetGroupInfoCommand,
GroupID: req.GroupInfoForSetEX.GroupID,
GroupName: req.GroupInfoForSetEX.GroupName,
Notification: req.GroupInfoForSetEX.Notification,
Introduction: req.GroupInfoForSetEX.Introduction,
FaceURL: req.GroupInfoForSetEX.FaceURL,
}
if req.GroupInfoForSetEX.Ex != nil {
cbReq.Ex = req.GroupInfoForSetEX.Ex
}
if req.GroupInfoForSetEX.NeedVerification != nil {
cbReq.NeedVerification = req.GroupInfoForSetEX.NeedVerification
}
if req.GroupInfoForSetEX.LookMemberInfo != nil {
cbReq.LookMemberInfo = req.GroupInfoForSetEX.LookMemberInfo
}
if req.GroupInfoForSetEX.ApplyMemberFriend != nil {
cbReq.ApplyMemberFriend = req.GroupInfoForSetEX.ApplyMemberFriend
}
s.webhookClient.AsyncPost(ctx, cbReq.GetCallbackCommand(), cbReq, &callbackstruct.CallbackAfterSetGroupInfoEXResp{}, after)
}

@ -54,6 +54,39 @@ func UpdateGroupInfoMap(ctx context.Context, group *sdkws.GroupInfoForSet) map[s
return m return m
} }
func UpdateGroupInfoEXMap(ctx context.Context, group *sdkws.GroupInfoForSetEX) map[string]any {
m := make(map[string]any)
if group.GroupName != "" {
m["group_name"] = group.GroupName
}
if group.Notification != nil {
m["notification"] = group.Notification.Value
m["notification_update_time"] = time.Now()
m["notification_user_id"] = mcontext.GetOpUserID(ctx)
}
if group.Introduction != nil {
m["introduction"] = group.Introduction.Value
}
if group.FaceURL != nil {
m["face_url"] = group.FaceURL.Value
}
if group.NeedVerification != nil {
m["need_verification"] = group.NeedVerification.Value
}
if group.LookMemberInfo != nil {
m["look_member_info"] = group.LookMemberInfo.Value
}
if group.ApplyMemberFriend != nil {
m["apply_member_friend"] = group.ApplyMemberFriend.Value
}
if group.Ex != nil {
m["ex"] = group.Ex.Value
}
return m
}
func UpdateGroupStatusMap(status int) map[string]any { func UpdateGroupStatusMap(status int) map[string]any {
return map[string]any{ return map[string]any{
"status": status, "status": status,

File diff suppressed because it is too large Load Diff

@ -16,25 +16,28 @@ package group
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
"github.com/openimsdk/open-im-server/v3/pkg/common/convert" "github.com/openimsdk/open-im-server/v3/pkg/common/convert"
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/versionctx" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/versionctx"
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient/notification" "github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient" "github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient/notification"
"github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/constant"
pbgroup "github.com/openimsdk/protocol/group" pbgroup "github.com/openimsdk/protocol/group"
"github.com/openimsdk/protocol/msg"
"github.com/openimsdk/protocol/sdkws" "github.com/openimsdk/protocol/sdkws"
"github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log" "github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/mcontext" "github.com/openimsdk/tools/mcontext"
"github.com/openimsdk/tools/utils/datautil" "github.com/openimsdk/tools/utils/datautil"
"github.com/openimsdk/tools/utils/stringutil" "github.com/openimsdk/tools/utils/stringutil"
"go.mongodb.org/mongo-driver/mongo"
) )
// GroupApplicationReceiver // GroupApplicationReceiver
@ -43,12 +46,22 @@ const (
adminReceiver adminReceiver
) )
func NewGroupNotificationSender(db controller.GroupDatabase, msgRpcClient *rpcclient.MessageRpcClient, userRpcClient *rpcclient.UserRpcClient, config *Config, fn func(ctx context.Context, userIDs []string) ([]notification.CommonUser, error)) *GroupNotificationSender { func NewGroupNotificationSender(
db controller.GroupDatabase,
msgRpcClient *rpcclient.MessageRpcClient,
userRpcClient *rpcclient.UserRpcClient,
conversationRpcClient *rpcclient.ConversationRpcClient,
config *Config,
fn func(ctx context.Context, userIDs []string) ([]notification.CommonUser, error),
) *GroupNotificationSender {
return &GroupNotificationSender{ return &GroupNotificationSender{
NotificationSender: rpcclient.NewNotificationSender(&config.NotificationConfig, rpcclient.WithRpcClient(msgRpcClient), rpcclient.WithUserRpcClient(userRpcClient)), NotificationSender: rpcclient.NewNotificationSender(&config.NotificationConfig, rpcclient.WithRpcClient(msgRpcClient), rpcclient.WithUserRpcClient(userRpcClient)),
getUsersInfo: fn, getUsersInfo: fn,
db: db, db: db,
config: config, config: config,
conversationRpcClient: conversationRpcClient,
msgRpcClient: msgRpcClient,
} }
} }
@ -57,6 +70,9 @@ type GroupNotificationSender struct {
getUsersInfo func(ctx context.Context, userIDs []string) ([]notification.CommonUser, error) getUsersInfo func(ctx context.Context, userIDs []string) ([]notification.CommonUser, error)
db controller.GroupDatabase db controller.GroupDatabase
config *Config config *Config
conversationRpcClient *rpcclient.ConversationRpcClient
msgRpcClient *rpcclient.MessageRpcClient
} }
func (g *GroupNotificationSender) PopulateGroupMember(ctx context.Context, members ...*model.GroupMember) error { func (g *GroupNotificationSender) PopulateGroupMember(ctx context.Context, members ...*model.GroupMember) error {
@ -212,10 +228,13 @@ func (g *GroupNotificationSender) groupMemberDB2PB(member *model.GroupMember, ap
} */ } */
func (g *GroupNotificationSender) fillOpUser(ctx context.Context, opUser **sdkws.GroupMemberFullInfo, groupID string) (err error) { func (g *GroupNotificationSender) fillOpUser(ctx context.Context, opUser **sdkws.GroupMemberFullInfo, groupID string) (err error) {
return g.fillOpUserByUserID(ctx, mcontext.GetOpUserID(ctx), opUser, groupID)
}
func (g *GroupNotificationSender) fillOpUserByUserID(ctx context.Context, userID string, opUser **sdkws.GroupMemberFullInfo, groupID string) error {
if opUser == nil { if opUser == nil {
return errs.ErrInternalServer.WrapMsg("**sdkws.GroupMemberFullInfo is nil") return errs.ErrInternalServer.WrapMsg("**sdkws.GroupMemberFullInfo is nil")
} }
userID := mcontext.GetOpUserID(ctx)
if groupID != "" { if groupID != "" {
if authverify.IsManagerUserID(userID, g.config.Share.IMAdminUserID) { if authverify.IsManagerUserID(userID, g.config.Share.IMAdminUserID) {
*opUser = &sdkws.GroupMemberFullInfo{ *opUser = &sdkws.GroupMemberFullInfo{
@ -228,7 +247,7 @@ func (g *GroupNotificationSender) fillOpUser(ctx context.Context, opUser **sdkws
member, err := g.db.TakeGroupMember(ctx, groupID, userID) member, err := g.db.TakeGroupMember(ctx, groupID, userID)
if err == nil { if err == nil {
*opUser = g.groupMemberDB2PB(member, 0) *opUser = g.groupMemberDB2PB(member, 0)
} else if !errs.ErrRecordNotFound.Is(err) { } else if !(errors.Is(err, mongo.ErrNoDocuments) || errs.ErrRecordNotFound.Is(err)) {
return err return err
} }
} }
@ -494,50 +513,67 @@ func (g *GroupNotificationSender) MemberKickedNotification(ctx context.Context,
g.Notification(ctx, mcontext.GetOpUserID(ctx), tips.Group.GroupID, constant.MemberKickedNotification, tips) g.Notification(ctx, mcontext.GetOpUserID(ctx), tips.Group.GroupID, constant.MemberKickedNotification, tips)
} }
func (g *GroupNotificationSender) MemberInvitedNotification(ctx context.Context, groupID, reason string, invitedUserIDList []string) { func (g *GroupNotificationSender) GroupApplicationAgreeMemberEnterNotification(ctx context.Context, groupID string, invitedOpUserID string, entrantUserID ...string) error {
var err error var err error
defer func() { defer func() {
if err != nil { if err != nil {
log.ZError(ctx, stringutil.GetFuncName(1)+" failed", err) log.ZError(ctx, stringutil.GetFuncName(1)+" failed", err)
} }
}() }()
var group *sdkws.GroupInfo
group, err = g.getGroupInfo(ctx, groupID) if !g.config.RpcConfig.EnableHistoryForNewMembers {
if err != nil { conversationID := msgprocessor.GetConversationIDBySessionType(constant.ReadGroupChatType, groupID)
return maxSeq, err := g.msgRpcClient.GetConversationMaxSeq(ctx, conversationID)
if err != nil {
return err
}
if _, err = g.msgRpcClient.SetUserConversationsMinSeq(ctx, &msg.SetUserConversationsMinSeqReq{
UserIDs: entrantUserID,
ConversationID: conversationID,
Seq: maxSeq,
}); err != nil {
return err
}
} }
var users []*sdkws.GroupMemberFullInfo if err := g.conversationRpcClient.GroupChatFirstCreateConversation(ctx, groupID, entrantUserID); err != nil {
users, err = g.getGroupMembers(ctx, groupID, invitedUserIDList) return err
if err != nil {
return
} }
tips := &sdkws.MemberInvitedTips{Group: group, InvitedUserList: users}
err = g.fillOpUser(ctx, &tips.OpUser, tips.Group.GroupID)
g.setVersion(ctx, &tips.GroupMemberVersion, &tips.GroupMemberVersionID, database.GroupMemberVersionName, tips.Group.GroupID)
g.Notification(ctx, mcontext.GetOpUserID(ctx), group.GroupID, constant.MemberInvitedNotification, tips)
}
func (g *GroupNotificationSender) MemberEnterNotification(ctx context.Context, groupID string, entrantUserID string) {
var err error
defer func() {
if err != nil {
log.ZError(ctx, stringutil.GetFuncName(1)+" failed", err)
}
}()
var group *sdkws.GroupInfo var group *sdkws.GroupInfo
group, err = g.getGroupInfo(ctx, groupID) group, err = g.getGroupInfo(ctx, groupID)
if err != nil { if err != nil {
return return err
} }
var user *sdkws.GroupMemberFullInfo users, err := g.getGroupMembers(ctx, groupID, entrantUserID)
user, err = g.getGroupMember(ctx, groupID, entrantUserID)
if err != nil { if err != nil {
return return err
}
tips := &sdkws.MemberInvitedTips{
Group: group,
InvitedUserList: users,
}
opUserID := mcontext.GetOpUserID(ctx)
if err = g.fillOpUserByUserID(ctx, opUserID, &tips.OpUser, tips.Group.GroupID); err != nil {
return nil
}
switch {
case invitedOpUserID == "":
case invitedOpUserID == opUserID:
tips.InviterUser = tips.OpUser
default:
if err = g.fillOpUserByUserID(ctx, invitedOpUserID, &tips.InviterUser, tips.Group.GroupID); err != nil {
return err
}
} }
tips := &sdkws.MemberEnterTips{Group: group, EntrantUser: user}
g.setVersion(ctx, &tips.GroupMemberVersion, &tips.GroupMemberVersionID, database.GroupMemberVersionName, tips.Group.GroupID) g.setVersion(ctx, &tips.GroupMemberVersion, &tips.GroupMemberVersionID, database.GroupMemberVersionName, tips.Group.GroupID)
g.Notification(ctx, mcontext.GetOpUserID(ctx), group.GroupID, constant.MemberEnterNotification, tips) g.Notification(ctx, mcontext.GetOpUserID(ctx), group.GroupID, constant.MemberInvitedNotification, tips)
return nil
}
func (g *GroupNotificationSender) MemberEnterNotification(ctx context.Context, groupID string, entrantUserID ...string) error {
return g.GroupApplicationAgreeMemberEnterNotification(ctx, groupID, "", entrantUserID...)
} }
func (g *GroupNotificationSender) GroupDismissedNotification(ctx context.Context, tips *sdkws.GroupDismissedTips) { func (g *GroupNotificationSender) GroupDismissedNotification(ctx context.Context, tips *sdkws.GroupDismissedTips) {

@ -30,8 +30,14 @@ func (m *msgServer) ClearMsg(ctx context.Context, req *msg.ClearMsgReq) (_ *msg.
msgNum int msgNum int
start = time.Now() start = time.Now()
) )
clearMsg := func(ctx context.Context) (bool, error) { clearMsg := func(ctx context.Context) (bool, error) {
msgs, err := m.MsgDatabase.GetBeforeMsg(ctx, req.Timestamp, 100) docIDs, err := m.MsgDatabase.GetDocIDs(ctx)
if err != nil {
return false, err
}
msgs, err := m.MsgDatabase.GetBeforeMsg(ctx, req.Timestamp, docIDs, 5000)
if err != nil { if err != nil {
return false, err return false, err
} }
@ -55,19 +61,14 @@ func (m *msgServer) ClearMsg(ctx context.Context, req *msg.ClearMsgReq) (_ *msg.
return true, nil return true, nil
} }
for { _, err = clearMsg(ctx)
keep, err := clearMsg(ctx) if err != nil {
if err != nil { log.ZError(ctx, "clear msg failed", err, "docNum", docNum, "msgNum", msgNum, "cost", time.Since(start))
log.ZError(ctx, "clear msg failed", err, "docNum", docNum, "msgNum", msgNum, "cost", time.Since(start)) return nil, err
return nil, err
}
if !keep {
log.ZInfo(ctx, "clear msg success", "docNum", docNum, "msgNum", msgNum, "cost", time.Since(start))
break
}
log.ZInfo(ctx, "clearing message", "docNum", docNum, "msgNum", msgNum, "cost", time.Since(start))
} }
log.ZDebug(ctx, "clearing message", "docNum", docNum, "msgNum", msgNum, "cost", time.Since(start))
return &msg.ClearMsgResp{}, nil return &msg.ClearMsgResp{}, nil
} }

@ -53,3 +53,12 @@ func (m *msgServer) GetMsgByConversationIDs(ctx context.Context, req *pbmsg.GetM
} }
return &pbmsg.GetMsgByConversationIDsResp{MsgDatas: Msgs}, nil return &pbmsg.GetMsgByConversationIDsResp{MsgDatas: Msgs}, nil
} }
func (m *msgServer) SetUserConversationsMinSeq(ctx context.Context, req *pbmsg.SetUserConversationsMinSeqReq) (*pbmsg.SetUserConversationsMinSeqResp, error) {
for _, userID := range req.UserIDs {
if err := m.MsgDatabase.SetUserConversationsMinSeqs(ctx, userID, map[string]int64{req.ConversationID: req.Seq}); err != nil {
return nil, err
}
}
return &pbmsg.SetUserConversationsMinSeqResp{}, nil
}

@ -16,16 +16,15 @@ package msg
import ( import (
"context" "context"
"github.com/openimsdk/open-im-server/v3/pkg/util/conversationutil"
"github.com/openimsdk/tools/utils/datautil"
"github.com/openimsdk/tools/utils/timeutil"
"github.com/openimsdk/open-im-server/v3/pkg/authverify" "github.com/openimsdk/open-im-server/v3/pkg/authverify"
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor" "github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
"github.com/openimsdk/open-im-server/v3/pkg/util/conversationutil"
"github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/constant"
"github.com/openimsdk/protocol/msg" "github.com/openimsdk/protocol/msg"
"github.com/openimsdk/protocol/sdkws" "github.com/openimsdk/protocol/sdkws"
"github.com/openimsdk/tools/log" "github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/utils/datautil"
"github.com/openimsdk/tools/utils/timeutil"
) )
func (m *msgServer) PullMessageBySeqs(ctx context.Context, req *sdkws.PullMessageBySeqsReq) (*sdkws.PullMessageBySeqsResp, error) { func (m *msgServer) PullMessageBySeqs(ctx context.Context, req *sdkws.PullMessageBySeqsReq) (*sdkws.PullMessageBySeqsResp, error) {
@ -86,6 +85,35 @@ func (m *msgServer) PullMessageBySeqs(ctx context.Context, req *sdkws.PullMessag
return resp, nil return resp, nil
} }
func (m *msgServer) GetSeqMessage(ctx context.Context, req *msg.GetSeqMessageReq) (*msg.GetSeqMessageResp, error) {
resp := &msg.GetSeqMessageResp{
Msgs: make(map[string]*sdkws.PullMsgs),
NotificationMsgs: make(map[string]*sdkws.PullMsgs),
}
for _, conv := range req.Conversations {
_, _, msgs, err := m.MsgDatabase.GetMsgBySeqs(ctx, req.UserID, conv.ConversationID, conv.Seqs)
if err != nil {
return nil, err
}
var pullMsgs *sdkws.PullMsgs
if ok := false; conversationutil.IsNotificationConversationID(conv.ConversationID) {
pullMsgs, ok = resp.NotificationMsgs[conv.ConversationID]
if !ok {
pullMsgs = &sdkws.PullMsgs{}
resp.NotificationMsgs[conv.ConversationID] = pullMsgs
}
} else {
pullMsgs, ok = resp.Msgs[conv.ConversationID]
if !ok {
pullMsgs = &sdkws.PullMsgs{}
resp.Msgs[conv.ConversationID] = pullMsgs
}
}
pullMsgs.Msgs = append(pullMsgs.Msgs, msgs...)
}
return resp, nil
}
func (m *msgServer) GetMaxSeq(ctx context.Context, req *sdkws.GetMaxSeqReq) (*sdkws.GetMaxSeqResp, error) { func (m *msgServer) GetMaxSeq(ctx context.Context, req *sdkws.GetMaxSeqReq) (*sdkws.GetMaxSeqResp, error) {
if err := authverify.CheckAccessV3(ctx, req.UserID, m.config.Share.IMAdminUserID); err != nil { if err := authverify.CheckAccessV3(ctx, req.UserID, m.config.Share.IMAdminUserID); err != nil {
return nil, err return nil, err
@ -104,6 +132,12 @@ func (m *msgServer) GetMaxSeq(ctx context.Context, req *sdkws.GetMaxSeqReq) (*sd
log.ZWarn(ctx, "GetMaxSeqs error", err, "conversationIDs", conversationIDs, "maxSeqs", maxSeqs) log.ZWarn(ctx, "GetMaxSeqs error", err, "conversationIDs", conversationIDs, "maxSeqs", maxSeqs)
return nil, err return nil, err
} }
// avoid pulling messages from sessions with a large number of max seq values of 0
for conversationID, seq := range maxSeqs {
if seq == 0 {
delete(maxSeqs, conversationID)
}
}
resp := new(sdkws.GetMaxSeqResp) resp := new(sdkws.GetMaxSeqResp)
resp.MaxSeqs = maxSeqs resp.MaxSeqs = maxSeqs
return resp, nil return resp, nil

@ -121,7 +121,7 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
conversationRpcClient: rpcclient.NewConversationRpcClient(client, config.Share.RpcRegisterName.Conversation), conversationRpcClient: rpcclient.NewConversationRpcClient(client, config.Share.RpcRegisterName.Conversation),
config: config, config: config,
webhookClient: webhook.NewWebhookClient(config.WebhooksConfig.URL), webhookClient: webhook.NewWebhookClient(config.WebhooksConfig.URL),
queue: memamq.NewMemoryQueue(128, 1024*8), queue: memamq.NewMemoryQueue(16, 1024*1024),
}) })
return nil return nil
} }
@ -312,16 +312,20 @@ func (s *friendServer) GetPaginationFriendsApplyTo(ctx context.Context, req *rel
if err := s.userRpcClient.Access(ctx, req.UserID); err != nil { if err := s.userRpcClient.Access(ctx, req.UserID); err != nil {
return nil, err return nil, err
} }
total, friendRequests, err := s.db.PageFriendRequestToMe(ctx, req.UserID, req.Pagination) total, friendRequests, err := s.db.PageFriendRequestToMe(ctx, req.UserID, req.Pagination)
if err != nil { if err != nil {
return nil, err return nil, err
} }
resp = &relation.GetPaginationFriendsApplyToResp{} resp = &relation.GetPaginationFriendsApplyToResp{}
resp.FriendRequests, err = convert.FriendRequestDB2Pb(ctx, friendRequests, s.userRpcClient.GetUsersInfoMap) resp.FriendRequests, err = convert.FriendRequestDB2Pb(ctx, friendRequests, s.userRpcClient.GetUsersInfoMap)
if err != nil { if err != nil {
return nil, err return nil, err
} }
resp.Total = int32(total) resp.Total = int32(total)
return resp, nil return resp, nil
} }

@ -290,6 +290,7 @@ func (t *thirdServer) apiAddress(prefix, name string) string {
func (t *thirdServer) DeleteOutdatedData(ctx context.Context, req *third.DeleteOutdatedDataReq) (*third.DeleteOutdatedDataResp, error) { func (t *thirdServer) DeleteOutdatedData(ctx context.Context, req *third.DeleteOutdatedDataReq) (*third.DeleteOutdatedDataResp, error) {
var conf config.Third var conf config.Third
expireTime := time.UnixMilli(req.ExpireTime) expireTime := time.UnixMilli(req.ExpireTime)
var deltotal int
findPagination := &sdkws.RequestPagination{ findPagination := &sdkws.RequestPagination{
PageNumber: 1, PageNumber: 1,
ShowNumber: 1000, ShowNumber: 1000,
@ -311,10 +312,8 @@ func (t *thirdServer) DeleteOutdatedData(ctx context.Context, req *third.DeleteO
return nil, errs.Wrap(err) return nil, errs.Wrap(err)
} }
if int(count) < 1 && t.minio != nil { if int(count) < 1 && t.minio != nil {
thumbnailKey, err := t.getMinioImageThumbnailKey(ctx, key) thumbnailKey, _ := t.getMinioImageThumbnailKey(ctx, key)
if err != nil {
return nil, errs.Wrap(err)
}
t.s3dataBase.DeleteObject(ctx, thumbnailKey) t.s3dataBase.DeleteObject(ctx, thumbnailKey)
t.s3dataBase.DelS3Key(ctx, conf.Object.Enable, needDelObjectKeys...) t.s3dataBase.DelS3Key(ctx, conf.Object.Enable, needDelObjectKeys...)
t.s3dataBase.DeleteObject(ctx, key) t.s3dataBase.DeleteObject(ctx, key)
@ -329,7 +328,9 @@ func (t *thirdServer) DeleteOutdatedData(ctx context.Context, req *third.DeleteO
if total < int64(findPagination.ShowNumber) { if total < int64(findPagination.ShowNumber) {
break break
} }
deltotal += int(total)
} }
log.ZDebug(ctx, "DeleteOutdatedData", "delete Total", deltotal)
return &third.DeleteOutdatedDataResp{}, nil return &third.DeleteOutdatedDataResp{}, nil
} }

@ -2,6 +2,8 @@ package user
import ( import (
"context" "context"
"github.com/openimsdk/tools/utils/datautil"
"github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/constant"
pbuser "github.com/openimsdk/protocol/user" pbuser "github.com/openimsdk/protocol/user"
) )
@ -80,3 +82,22 @@ func (s *userServer) SetUserOnlineStatus(ctx context.Context, req *pbuser.SetUse
} }
return &pbuser.SetUserOnlineStatusResp{}, nil return &pbuser.SetUserOnlineStatusResp{}, nil
} }
func (s *userServer) GetAllOnlineUsers(ctx context.Context, req *pbuser.GetAllOnlineUsersReq) (*pbuser.GetAllOnlineUsersResp, error) {
resMap, nextCursor, err := s.online.GetAllOnlineUsers(ctx, req.Cursor)
if err != nil {
return nil, err
}
resp := &pbuser.GetAllOnlineUsersResp{
StatusList: make([]*pbuser.OnlineStatus, 0, len(resMap)),
NextCursor: nextCursor,
}
for userID, plats := range resMap {
resp.StatusList = append(resp.StatusList, &pbuser.OnlineStatus{
UserID: userID,
Status: int32(datautil.If(len(plats) > 0, constant.Online, constant.Offline)),
PlatformIDs: plats,
})
}
return resp, nil
}

@ -17,6 +17,11 @@ package user
import ( import (
"context" "context"
"errors" "errors"
"math/rand"
"strings"
"sync"
"time"
"github.com/openimsdk/open-im-server/v3/internal/rpc/relation" "github.com/openimsdk/open-im-server/v3/internal/rpc/relation"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics" "github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
@ -29,10 +34,6 @@ import (
"github.com/openimsdk/protocol/group" "github.com/openimsdk/protocol/group"
friendpb "github.com/openimsdk/protocol/relation" friendpb "github.com/openimsdk/protocol/relation"
"github.com/openimsdk/tools/db/redisutil" "github.com/openimsdk/tools/db/redisutil"
"math/rand"
"strings"
"sync"
"time"
"github.com/openimsdk/open-im-server/v3/pkg/authverify" "github.com/openimsdk/open-im-server/v3/pkg/authverify"
"github.com/openimsdk/open-im-server/v3/pkg/common/convert" "github.com/openimsdk/open-im-server/v3/pkg/common/convert"
@ -147,41 +148,35 @@ func (s *userServer) UpdateUserInfo(ctx context.Context, req *pbuser.UpdateUserI
return nil, err return nil, err
} }
s.friendNotificationSender.UserInfoUpdatedNotification(ctx, req.UserInfo.UserID) s.friendNotificationSender.UserInfoUpdatedNotification(ctx, req.UserInfo.UserID)
//friends, err := s.friendRpcClient.GetFriendIDs(ctx, req.UserInfo.UserID)
//if err != nil {
// return nil, err
//}
//if req.UserInfo.Nickname != "" || req.UserInfo.FaceURL != "" {
// if err = s.NotificationUserInfoUpdate(ctx, req.UserInfo.UserID,oldUser); err != nil {
// return nil, err
// }
//}
//for _, friendID := range friends {
// s.friendNotificationSender.FriendInfoUpdatedNotification(ctx, req.UserInfo.UserID, friendID)
//}
s.webhookAfterUpdateUserInfo(ctx, &s.config.WebhooksConfig.AfterUpdateUserInfo, req) s.webhookAfterUpdateUserInfo(ctx, &s.config.WebhooksConfig.AfterUpdateUserInfo, req)
if err = s.NotificationUserInfoUpdate(ctx, req.UserInfo.UserID, oldUser); err != nil { if err = s.NotificationUserInfoUpdate(ctx, req.UserInfo.UserID, oldUser); err != nil {
return nil, err return nil, err
} }
return resp, nil return resp, nil
} }
func (s *userServer) UpdateUserInfoEx(ctx context.Context, req *pbuser.UpdateUserInfoExReq) (resp *pbuser.UpdateUserInfoExResp, err error) { func (s *userServer) UpdateUserInfoEx(ctx context.Context, req *pbuser.UpdateUserInfoExReq) (resp *pbuser.UpdateUserInfoExResp, err error) {
resp = &pbuser.UpdateUserInfoExResp{} resp = &pbuser.UpdateUserInfoExResp{}
err = authverify.CheckAccessV3(ctx, req.UserInfo.UserID, s.config.Share.IMAdminUserID) err = authverify.CheckAccessV3(ctx, req.UserInfo.UserID, s.config.Share.IMAdminUserID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if err = s.webhookBeforeUpdateUserInfoEx(ctx, &s.config.WebhooksConfig.BeforeUpdateUserInfoEx, req); err != nil { if err = s.webhookBeforeUpdateUserInfoEx(ctx, &s.config.WebhooksConfig.BeforeUpdateUserInfoEx, req); err != nil {
return nil, err return nil, err
} }
oldUser, err := s.db.GetUserByID(ctx, req.UserInfo.UserID) oldUser, err := s.db.GetUserByID(ctx, req.UserInfo.UserID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
data := convert.UserPb2DBMapEx(req.UserInfo) data := convert.UserPb2DBMapEx(req.UserInfo)
if err = s.db.UpdateByMap(ctx, req.UserInfo.UserID, data); err != nil { if err = s.db.UpdateByMap(ctx, req.UserInfo.UserID, data); err != nil {
return nil, err return nil, err
} }
s.friendNotificationSender.UserInfoUpdatedNotification(ctx, req.UserInfo.UserID) s.friendNotificationSender.UserInfoUpdatedNotification(ctx, req.UserInfo.UserID)
//friends, err := s.friendRpcClient.GetFriendIDs(ctx, req.UserInfo.UserID) //friends, err := s.friendRpcClient.GetFriendIDs(ctx, req.UserInfo.UserID)
//if err != nil { //if err != nil {
@ -199,6 +194,7 @@ func (s *userServer) UpdateUserInfoEx(ctx context.Context, req *pbuser.UpdateUse
if err := s.NotificationUserInfoUpdate(ctx, req.UserInfo.UserID, oldUser); err != nil { if err := s.NotificationUserInfoUpdate(ctx, req.UserInfo.UserID, oldUser); err != nil {
return nil, err return nil, err
} }
return resp, nil return resp, nil
} }
func (s *userServer) SetGlobalRecvMessageOpt(ctx context.Context, req *pbuser.SetGlobalRecvMessageOptReq) (resp *pbuser.SetGlobalRecvMessageOptResp, err error) { func (s *userServer) SetGlobalRecvMessageOpt(ctx context.Context, req *pbuser.SetGlobalRecvMessageOptReq) (resp *pbuser.SetGlobalRecvMessageOptResp, err error) {

@ -25,7 +25,6 @@ import (
pbconversation "github.com/openimsdk/protocol/conversation" pbconversation "github.com/openimsdk/protocol/conversation"
"github.com/openimsdk/protocol/msg" "github.com/openimsdk/protocol/msg"
"github.com/openimsdk/protocol/third"
"github.com/openimsdk/tools/mcontext" "github.com/openimsdk/tools/mcontext"
"github.com/openimsdk/tools/mw" "github.com/openimsdk/tools/mw"
"google.golang.org/grpc" "google.golang.org/grpc"
@ -59,10 +58,10 @@ func Start(ctx context.Context, config *CronTaskConfig) error {
return err return err
} }
thirdConn, err := client.GetConn(ctx, config.Share.RpcRegisterName.Third) // thirdConn, err := client.GetConn(ctx, config.Share.RpcRegisterName.Third)
if err != nil { // if err != nil {
return err // return err
} // }
conversationConn, err := client.GetConn(ctx, config.Share.RpcRegisterName.Conversation) conversationConn, err := client.GetConn(ctx, config.Share.RpcRegisterName.Conversation)
if err != nil { if err != nil {
@ -71,7 +70,7 @@ func Start(ctx context.Context, config *CronTaskConfig) error {
msgClient := msg.NewMsgClient(msgConn) msgClient := msg.NewMsgClient(msgConn)
conversationClient := pbconversation.NewConversationClient(conversationConn) conversationClient := pbconversation.NewConversationClient(conversationConn)
thirdClient := third.NewThirdClient(thirdConn) // thirdClient := third.NewThirdClient(thirdConn)
crontab := cron.New() crontab := cron.New()
@ -80,12 +79,13 @@ func Start(ctx context.Context, config *CronTaskConfig) error {
now := time.Now() now := time.Now()
deltime := now.Add(-time.Hour * 24 * time.Duration(config.CronTask.RetainChatRecords)) deltime := now.Add(-time.Hour * 24 * time.Duration(config.CronTask.RetainChatRecords))
ctx := mcontext.SetOperationID(ctx, fmt.Sprintf("cron_%d_%d", os.Getpid(), deltime.UnixMilli())) ctx := mcontext.SetOperationID(ctx, fmt.Sprintf("cron_%d_%d", os.Getpid(), deltime.UnixMilli()))
log.ZInfo(ctx, "clear chat records", "deltime", deltime, "timestamp", deltime.UnixMilli()) log.ZDebug(ctx, "clear chat records", "deltime", deltime, "timestamp", deltime.UnixMilli())
if _, err := msgClient.ClearMsg(ctx, &msg.ClearMsgReq{Timestamp: deltime.UnixMilli()}); err != nil { if _, err := msgClient.ClearMsg(ctx, &msg.ClearMsgReq{Timestamp: deltime.UnixMilli()}); err != nil {
log.ZError(ctx, "cron clear chat records failed", err, "deltime", deltime, "cont", time.Since(now)) log.ZError(ctx, "cron clear chat records failed", err, "deltime", deltime, "cont", time.Since(now))
return return
} }
log.ZInfo(ctx, "cron clear chat records success", "deltime", deltime, "cont", time.Since(now)) log.ZDebug(ctx, "cron clear chat records success", "deltime", deltime, "cont", time.Since(now))
} }
if _, err := crontab.AddFunc(config.CronTask.CronExecuteTime, clearMsgFunc); err != nil { if _, err := crontab.AddFunc(config.CronTask.CronExecuteTime, clearMsgFunc); err != nil {
return errs.Wrap(err) return errs.Wrap(err)
@ -95,7 +95,7 @@ func Start(ctx context.Context, config *CronTaskConfig) error {
msgDestructFunc := func() { msgDestructFunc := func() {
now := time.Now() now := time.Now()
ctx := mcontext.SetOperationID(ctx, fmt.Sprintf("cron_%d_%d", os.Getpid(), now.UnixMilli())) ctx := mcontext.SetOperationID(ctx, fmt.Sprintf("cron_%d_%d", os.Getpid(), now.UnixMilli()))
log.ZInfo(ctx, "msg destruct cron start", "now", now) log.ZDebug(ctx, "msg destruct cron start", "now", now)
conversations, err := conversationClient.GetConversationsNeedDestructMsgs(ctx, &pbconversation.GetConversationsNeedDestructMsgsReq{}) conversations, err := conversationClient.GetConversationsNeedDestructMsgs(ctx, &pbconversation.GetConversationsNeedDestructMsgsReq{})
if err != nil { if err != nil {
@ -108,29 +108,29 @@ func Start(ctx context.Context, config *CronTaskConfig) error {
return return
} }
} }
log.ZInfo(ctx, "msg destruct cron task completed", "cont", time.Since(now)) log.ZDebug(ctx, "msg destruct cron task completed", "cont", time.Since(now))
} }
if _, err := crontab.AddFunc(config.CronTask.CronExecuteTime, msgDestructFunc); err != nil { if _, err := crontab.AddFunc(config.CronTask.CronExecuteTime, msgDestructFunc); err != nil {
return errs.Wrap(err) return errs.Wrap(err)
} }
// scheduled delete outdated file Objects and their datas in specific time. // // scheduled delete outdated file Objects and their datas in specific time.
deleteObjectFunc := func() { // deleteObjectFunc := func() {
now := time.Now() // now := time.Now()
deleteTime := now.Add(-time.Hour * 24 * time.Duration(config.CronTask.FileExpireTime)) // deleteTime := now.Add(-time.Hour * 24 * time.Duration(config.CronTask.FileExpireTime))
ctx := mcontext.SetOperationID(ctx, fmt.Sprintf("cron_%d_%d", os.Getpid(), deleteTime.UnixMilli())) // ctx := mcontext.SetOperationID(ctx, fmt.Sprintf("cron_%d_%d", os.Getpid(), deleteTime.UnixMilli()))
log.ZInfo(ctx, "deleteoutDatedData ", "deletetime", deleteTime, "timestamp", deleteTime.UnixMilli()) // log.ZDebug(ctx, "deleteoutDatedData ", "deletetime", deleteTime, "timestamp", deleteTime.UnixMilli())
if _, err := thirdClient.DeleteOutdatedData(ctx, &third.DeleteOutdatedDataReq{ExpireTime: deleteTime.UnixMilli()}); err != nil { // if _, err := thirdClient.DeleteOutdatedData(ctx, &third.DeleteOutdatedDataReq{ExpireTime: deleteTime.UnixMilli()}); err != nil {
log.ZError(ctx, "cron deleteoutDatedData failed", err, "deleteTime", deleteTime, "cont", time.Since(now)) // log.ZError(ctx, "cron deleteoutDatedData failed", err, "deleteTime", deleteTime, "cont", time.Since(now))
return // return
} // }
log.ZInfo(ctx, "cron deleteoutDatedData success", "deltime", deleteTime, "cont", time.Since(now)) // log.ZDebug(ctx, "cron deleteoutDatedData success", "deltime", deleteTime, "cont", time.Since(now))
} // }
if _, err := crontab.AddFunc(config.CronTask.CronExecuteTime, deleteObjectFunc); err != nil { // if _, err := crontab.AddFunc(config.CronTask.CronExecuteTime, deleteObjectFunc); err != nil {
return errs.Wrap(err) // return errs.Wrap(err)
} // }
log.ZInfo(ctx, "start cron task", "CronExecuteTime", config.CronTask.CronExecuteTime) log.ZDebug(ctx, "start cron task", "CronExecuteTime", config.CronTask.CronExecuteTime)
crontab.Start() crontab.Start()
<-ctx.Done() <-ctx.Done()
return nil return nil

@ -0,0 +1 @@
package apistruct

@ -18,7 +18,9 @@ const (
CallbackBeforeInviteJoinGroupCommand = "callbackBeforeInviteJoinGroupCommand" CallbackBeforeInviteJoinGroupCommand = "callbackBeforeInviteJoinGroupCommand"
CallbackAfterJoinGroupCommand = "callbackAfterJoinGroupCommand" CallbackAfterJoinGroupCommand = "callbackAfterJoinGroupCommand"
CallbackAfterSetGroupInfoCommand = "callbackAfterSetGroupInfoCommand" CallbackAfterSetGroupInfoCommand = "callbackAfterSetGroupInfoCommand"
CallbackAfterSetGroupInfoEXCommand = "callbackAfterSetGroupInfoCommandEX"
CallbackBeforeSetGroupInfoCommand = "callbackBeforeSetGroupInfoCommand" CallbackBeforeSetGroupInfoCommand = "callbackBeforeSetGroupInfoCommand"
CallbackBeforeSetGroupInfoEXCommand = "callbackBeforeSetGroupInfoEXCommand"
CallbackAfterRevokeMsgCommand = "callbackBeforeAfterMsgCommand" CallbackAfterRevokeMsgCommand = "callbackBeforeAfterMsgCommand"
CallbackBeforeAddBlackCommand = "callbackBeforeAddBlackCommand" CallbackBeforeAddBlackCommand = "callbackBeforeAddBlackCommand"
CallbackAfterAddFriendCommand = "callbackAfterAddFriendCommand" CallbackAfterAddFriendCommand = "callbackAfterAddFriendCommand"

@ -17,6 +17,7 @@ package callbackstruct
import ( import (
"github.com/openimsdk/open-im-server/v3/pkg/apistruct" "github.com/openimsdk/open-im-server/v3/pkg/apistruct"
common "github.com/openimsdk/protocol/sdkws" common "github.com/openimsdk/protocol/sdkws"
"github.com/openimsdk/protocol/wrapperspb"
) )
type CallbackCommand string type CallbackCommand string
@ -242,3 +243,48 @@ type CallbackAfterSetGroupInfoReq struct {
type CallbackAfterSetGroupInfoResp struct { type CallbackAfterSetGroupInfoResp struct {
CommonCallbackResp CommonCallbackResp
} }
type CallbackBeforeSetGroupInfoEXReq struct {
CallbackCommand `json:"callbackCommand"`
OperationID string `json:"operationID"`
GroupID string `json:"groupID"`
GroupName string `json:"groupName"`
Notification *wrapperspb.StringValue `json:"notification"`
Introduction *wrapperspb.StringValue `json:"introduction"`
FaceURL *wrapperspb.StringValue `json:"faceURL"`
Ex *wrapperspb.StringValue `json:"ex"`
NeedVerification *wrapperspb.Int32Value `json:"needVerification"`
LookMemberInfo *wrapperspb.Int32Value `json:"lookMemberInfo"`
ApplyMemberFriend *wrapperspb.Int32Value `json:"applyMemberFriend"`
}
type CallbackBeforeSetGroupInfoEXResp struct {
CommonCallbackResp
GroupID string `json:"groupID"`
GroupName string `json:"groupName"`
Notification *wrapperspb.StringValue `json:"notification"`
Introduction *wrapperspb.StringValue `json:"introduction"`
FaceURL *wrapperspb.StringValue `json:"faceURL"`
Ex *wrapperspb.StringValue `json:"ex"`
NeedVerification *wrapperspb.Int32Value `json:"needVerification"`
LookMemberInfo *wrapperspb.Int32Value `json:"lookMemberInfo"`
ApplyMemberFriend *wrapperspb.Int32Value `json:"applyMemberFriend"`
}
type CallbackAfterSetGroupInfoEXReq struct {
CallbackCommand `json:"callbackCommand"`
OperationID string `json:"operationID"`
GroupID string `json:"groupID"`
GroupName string `json:"groupName"`
Notification *wrapperspb.StringValue `json:"notification"`
Introduction *wrapperspb.StringValue `json:"introduction"`
FaceURL *wrapperspb.StringValue `json:"faceURL"`
Ex *wrapperspb.StringValue `json:"ex"`
NeedVerification *wrapperspb.Int32Value `json:"needVerification"`
LookMemberInfo *wrapperspb.Int32Value `json:"lookMemberInfo"`
ApplyMemberFriend *wrapperspb.Int32Value `json:"applyMemberFriend"`
}
type CallbackAfterSetGroupInfoEXResp struct {
CommonCallbackResp
}

@ -37,7 +37,6 @@ func NewPushRpcCmd() *PushRpcCmd {
ret.configMap = map[string]any{ ret.configMap = map[string]any{
OpenIMPushCfgFileName: &pushConfig.RpcConfig, OpenIMPushCfgFileName: &pushConfig.RpcConfig,
RedisConfigFileName: &pushConfig.RedisConfig, RedisConfigFileName: &pushConfig.RedisConfig,
MongodbConfigFileName: &pushConfig.MongodbConfig,
KafkaConfigFileName: &pushConfig.KafkaConfig, KafkaConfigFileName: &pushConfig.KafkaConfig,
ShareFileName: &pushConfig.Share, ShareFileName: &pushConfig.Share,
NotificationFileName: &pushConfig.NotificationConfig, NotificationFileName: &pushConfig.NotificationConfig,

@ -129,10 +129,11 @@ func (r *RootCmd) applyOptions(opts ...func(*CmdOpts)) *CmdOpts {
} }
func (r *RootCmd) initializeLogger(cmdOpts *CmdOpts) error { func (r *RootCmd) initializeLogger(cmdOpts *CmdOpts) error {
err := log.InitFromConfig( err := log.InitLoggerFromConfig(
cmdOpts.loggerPrefixName, cmdOpts.loggerPrefixName,
r.processName, r.processName,
"", "",
r.log.RemainLogLevel, r.log.RemainLogLevel,
r.log.IsStdout, r.log.IsStdout,
r.log.IsJson, r.log.IsJson,

@ -73,18 +73,21 @@ type Mongo struct {
MaxRetry int `mapstructure:"maxRetry"` MaxRetry int `mapstructure:"maxRetry"`
} }
type Kafka struct { type Kafka struct {
Username string `mapstructure:"username"` Username string `mapstructure:"username"`
Password string `mapstructure:"password"` Password string `mapstructure:"password"`
ProducerAck string `mapstructure:"producerAck"` ProducerAck string `mapstructure:"producerAck"`
CompressType string `mapstructure:"compressType"` CompressType string `mapstructure:"compressType"`
Address []string `mapstructure:"address"` Address []string `mapstructure:"address"`
ToRedisTopic string `mapstructure:"toRedisTopic"` ToRedisTopic string `mapstructure:"toRedisTopic"`
ToMongoTopic string `mapstructure:"toMongoTopic"` ToMongoTopic string `mapstructure:"toMongoTopic"`
ToPushTopic string `mapstructure:"toPushTopic"` ToPushTopic string `mapstructure:"toPushTopic"`
ToRedisGroupID string `mapstructure:"toRedisGroupID"` ToOfflinePushTopic string `mapstructure:"toOfflinePushTopic"`
ToMongoGroupID string `mapstructure:"toMongoGroupID"` ToRedisGroupID string `mapstructure:"toRedisGroupID"`
ToPushGroupID string `mapstructure:"toPushGroupID"` ToMongoGroupID string `mapstructure:"toMongoGroupID"`
Tls TLSConfig `mapstructure:"tls"` ToPushGroupID string `mapstructure:"toPushGroupID"`
ToOfflineGroupID string `mapstructure:"toOfflinePushGroupID"`
Tls TLSConfig `mapstructure:"tls"`
} }
type TLSConfig struct { type TLSConfig struct {
EnableTLS bool `mapstructure:"enableTLS"` EnableTLS bool `mapstructure:"enableTLS"`
@ -97,8 +100,9 @@ type TLSConfig struct {
type API struct { type API struct {
Api struct { Api struct {
ListenIP string `mapstructure:"listenIP"` ListenIP string `mapstructure:"listenIP"`
Ports []int `mapstructure:"ports"` Ports []int `mapstructure:"ports"`
CompressionLevel int `mapstructure:"compressionLevel"`
} `mapstructure:"api"` } `mapstructure:"api"`
Prometheus struct { Prometheus struct {
Enable bool `mapstructure:"enable"` Enable bool `mapstructure:"enable"`
@ -220,6 +224,7 @@ type Push struct {
BadgeCount bool `mapstructure:"badgeCount"` BadgeCount bool `mapstructure:"badgeCount"`
Production bool `mapstructure:"production"` Production bool `mapstructure:"production"`
} `mapstructure:"iosPush"` } `mapstructure:"iosPush"`
FullUserCache bool `mapstructure:"fullUserCache"`
} }
type Auth struct { type Auth struct {
@ -258,7 +263,8 @@ type Group struct {
ListenIP string `mapstructure:"listenIP"` ListenIP string `mapstructure:"listenIP"`
Ports []int `mapstructure:"ports"` Ports []int `mapstructure:"ports"`
} `mapstructure:"rpc"` } `mapstructure:"rpc"`
Prometheus Prometheus `mapstructure:"prometheus"` Prometheus Prometheus `mapstructure:"prometheus"`
EnableHistoryForNewMembers bool `mapstructure:"enableHistoryForNewMembers"`
} }
type Msg struct { type Msg struct {
@ -335,7 +341,8 @@ type Redis struct {
Password string `mapstructure:"password"` Password string `mapstructure:"password"`
ClusterMode bool `mapstructure:"clusterMode"` ClusterMode bool `mapstructure:"clusterMode"`
DB int `mapstructure:"storage"` DB int `mapstructure:"storage"`
MaxRetry int `mapstructure:"MaxRetry"` MaxRetry int `mapstructure:"maxRetry"`
PoolSize int `mapstructure:"poolSize"`
} }
type BeforeConfig struct { type BeforeConfig struct {
@ -421,6 +428,8 @@ type Webhooks struct {
BeforeInviteUserToGroup BeforeConfig `mapstructure:"beforeInviteUserToGroup"` BeforeInviteUserToGroup BeforeConfig `mapstructure:"beforeInviteUserToGroup"`
AfterSetGroupInfo AfterConfig `mapstructure:"afterSetGroupInfo"` AfterSetGroupInfo AfterConfig `mapstructure:"afterSetGroupInfo"`
BeforeSetGroupInfo BeforeConfig `mapstructure:"beforeSetGroupInfo"` BeforeSetGroupInfo BeforeConfig `mapstructure:"beforeSetGroupInfo"`
AfterSetGroupInfoEX AfterConfig `mapstructure:"afterSetGroupInfoEX"`
BeforeSetGroupInfoEX BeforeConfig `mapstructure:"beforeSetGroupInfoEX"`
AfterRevokeMsg AfterConfig `mapstructure:"afterRevokeMsg"` AfterRevokeMsg AfterConfig `mapstructure:"afterRevokeMsg"`
BeforeAddBlack BeforeConfig `mapstructure:"beforeAddBlack"` BeforeAddBlack BeforeConfig `mapstructure:"beforeAddBlack"`
AfterAddFriend AfterConfig `mapstructure:"afterAddFriend"` AfterAddFriend AfterConfig `mapstructure:"afterAddFriend"`
@ -471,6 +480,7 @@ func (r *Redis) Build() *redisutil.Config {
Password: r.Password, Password: r.Password,
DB: r.DB, DB: r.DB,
MaxRetry: r.MaxRetry, MaxRetry: r.MaxRetry,
PoolSize: r.PoolSize,
} }
} }

@ -36,3 +36,26 @@ func TestLoadOpenIMRpcUserConfig(t *testing.T) {
//export IMENV_OPENIM_RPC_USER_RPC_PORTS="10110,10111,10112" //export IMENV_OPENIM_RPC_USER_RPC_PORTS="10110,10111,10112"
assert.Equal(t, []int{10110, 10111, 10112}, user.RPC.Ports) assert.Equal(t, []int{10110, 10111, 10112}, user.RPC.Ports)
} }
func TestLoadNotificationConfig(t *testing.T) {
var noti Notification
err := LoadConfig("../../../config/notification.yml", "IMENV_NOTIFICATION", &noti)
assert.Nil(t, err)
assert.Equal(t, "Your friend's profile has been changed", noti.FriendRemarkSet.OfflinePush.Title)
}
func TestLoadOpenIMThirdConfig(t *testing.T) {
var third Third
err := LoadConfig("../../../config/openim-rpc-third.yml", "IMENV_OPENIM_RPC_THIRD", &third)
assert.Nil(t, err)
assert.Equal(t, "enabled", third.Object.Enable)
assert.Equal(t, "https://oss-cn-chengdu.aliyuncs.com", third.Object.Oss.Endpoint)
assert.Equal(t, "my_bucket_name", third.Object.Oss.Bucket)
assert.Equal(t, "https://my_bucket_name.oss-cn-chengdu.aliyuncs.com", third.Object.Oss.BucketURL)
assert.Equal(t, "AKID1234567890", third.Object.Oss.AccessKeyID)
assert.Equal(t, "abc123xyz789", third.Object.Oss.AccessKeySecret)
assert.Equal(t, "session_token_value", third.Object.Oss.SessionToken) // Uncomment if session token is needed
assert.Equal(t, true, third.Object.Oss.PublicRead)
// Environment: IMENV_OPENIM_RPC_THIRD_OBJECT_ENABLE=enabled;IMENV_OPENIM_RPC_THIRD_OBJECT_OSS_ENDPOINT=https://oss-cn-chengdu.aliyuncs.com;IMENV_OPENIM_RPC_THIRD_OBJECT_OSS_BUCKET=my_bucket_name;IMENV_OPENIM_RPC_THIRD_OBJECT_OSS_BUCKETURL=https://my_bucket_name.oss-cn-chengdu.aliyuncs.com;IMENV_OPENIM_RPC_THIRD_OBJECT_OSS_ACCESSKEYID=AKID1234567890;IMENV_OPENIM_RPC_THIRD_OBJECT_OSS_ACCESSKEYSECRET=abc123xyz789;IMENV_OPENIM_RPC_THIRD_OBJECT_OSS_SESSIONTOKEN=session_token_value;IMENV_OPENIM_RPC_THIRD_OBJECT_OSS_PUBLICREAD=true
}

@ -23,4 +23,8 @@ var (
Name: "msg_offline_push_failed_total", Name: "msg_offline_push_failed_total",
Help: "The number of msg failed offline pushed", Help: "The number of msg failed offline pushed",
}) })
MsgLoneTimePushCounter = prometheus.NewCounter(prometheus.CounterOpts{
Name: "msg_long_time_push_total",
Help: "The number of messages with a push time exceeding 10 seconds",
})
) )

@ -47,9 +47,17 @@ func GetGrpcCusMetrics(registerName string, share *config.Share) []prometheus.Co
case share.RpcRegisterName.MessageGateway: case share.RpcRegisterName.MessageGateway:
return []prometheus.Collector{OnlineUserGauge} return []prometheus.Collector{OnlineUserGauge}
case share.RpcRegisterName.Msg: case share.RpcRegisterName.Msg:
return []prometheus.Collector{SingleChatMsgProcessSuccessCounter, SingleChatMsgProcessFailedCounter, GroupChatMsgProcessSuccessCounter, GroupChatMsgProcessFailedCounter} return []prometheus.Collector{
SingleChatMsgProcessSuccessCounter,
SingleChatMsgProcessFailedCounter,
GroupChatMsgProcessSuccessCounter,
GroupChatMsgProcessFailedCounter,
}
case share.RpcRegisterName.Push: case share.RpcRegisterName.Push:
return []prometheus.Collector{MsgOfflinePushFailedCounter} return []prometheus.Collector{
MsgOfflinePushFailedCounter,
MsgLoneTimePushCounter,
}
case share.RpcRegisterName.Auth: case share.RpcRegisterName.Auth:
return []prometheus.Collector{UserLoginCounter} return []prometheus.Collector{UserLoginCounter}
case share.RpcRegisterName.User: case share.RpcRegisterName.User:

@ -25,7 +25,6 @@ import (
"os" "os"
"os/signal" "os/signal"
"strconv" "strconv"
"sync"
"syscall" "syscall"
"time" "time"
@ -35,7 +34,6 @@ import (
"github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log" "github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/mw" "github.com/openimsdk/tools/mw"
"github.com/openimsdk/tools/system/program"
"github.com/openimsdk/tools/utils/network" "github.com/openimsdk/tools/utils/network"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/credentials/insecure"
@ -54,6 +52,7 @@ func Start[T any](ctx context.Context, discovery *config.Discovery, prometheusCo
log.CInfo(ctx, "RPC server is initializing", "rpcRegisterName", rpcRegisterName, "rpcPort", rpcPort, log.CInfo(ctx, "RPC server is initializing", "rpcRegisterName", rpcRegisterName, "rpcPort", rpcPort,
"prometheusPorts", prometheusConfig.Ports) "prometheusPorts", prometheusConfig.Ports)
rpcTcpAddr := net.JoinHostPort(network.GetListenIP(listenIP), strconv.Itoa(rpcPort)) rpcTcpAddr := net.JoinHostPort(network.GetListenIP(listenIP), strconv.Itoa(rpcPort))
listener, err := net.Listen( listener, err := net.Listen(
"tcp", "tcp",
rpcTcpAddr, rpcTcpAddr,
@ -61,7 +60,6 @@ func Start[T any](ctx context.Context, discovery *config.Discovery, prometheusCo
if err != nil { if err != nil {
return errs.WrapMsg(err, "listen err", "rpcTcpAddr", rpcTcpAddr) return errs.WrapMsg(err, "listen err", "rpcTcpAddr", rpcTcpAddr)
} }
defer listener.Close() defer listener.Close()
client, err := kdisc.NewDiscoveryRegister(discovery, share) client, err := kdisc.NewDiscoveryRegister(discovery, share)
if err != nil { if err != nil {
@ -92,10 +90,6 @@ func Start[T any](ctx context.Context, discovery *config.Discovery, prometheusCo
} }
srv := grpc.NewServer(options...) srv := grpc.NewServer(options...)
once := sync.Once{}
defer func() {
once.Do(srv.GracefulStop)
}()
err = rpcFn(ctx, config, client, srv) err = rpcFn(ctx, config, client, srv)
if err != nil { if err != nil {
@ -113,9 +107,8 @@ func Start[T any](ctx context.Context, discovery *config.Discovery, prometheusCo
} }
var ( var (
netDone = make(chan struct{}, 2) netDone = make(chan struct{}, 2)
netErr error netErr error
httpServer *http.Server
) )
if prometheusConfig.Enable { if prometheusConfig.Enable {
go func() { go func() {
@ -152,18 +145,11 @@ func Start[T any](ctx context.Context, discovery *config.Discovery, prometheusCo
signal.Notify(sigs, syscall.SIGTERM) signal.Notify(sigs, syscall.SIGTERM)
select { select {
case <-sigs: case <-sigs:
program.SIGTERMExit() ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
defer cancel() defer cancel()
if err := gracefulStopWithCtx(ctx, srv.GracefulStop); err != nil { if err := gracefulStopWithCtx(ctx, srv.GracefulStop); err != nil {
return err return err
} }
ctx, cancel = context.WithTimeout(context.Background(), 15*time.Second)
defer cancel()
err := httpServer.Shutdown(ctx)
if err != nil {
return errs.WrapMsg(err, "shutdown err")
}
return nil return nil
case <-netDone: case <-netDone:
return netErr return netErr

@ -1,6 +1,9 @@
package cachekey package cachekey
import "time" import (
"strings"
"time"
)
const ( const (
OnlineKey = "ONLINE:" OnlineKey = "ONLINE:"
@ -11,3 +14,7 @@ const (
func GetOnlineKey(userID string) string { func GetOnlineKey(userID string) string {
return OnlineKey + userID return OnlineKey + userID
} }
func GetOnlineKeyUserID(key string) string {
return strings.TrimPrefix(key, OnlineKey)
}

@ -5,4 +5,5 @@ import "context"
type OnlineCache interface { type OnlineCache interface {
GetOnline(ctx context.Context, userID string) ([]int32, error) GetOnline(ctx context.Context, userID string) ([]int32, error)
SetUserOnline(ctx context.Context, userID string, online, offline []int32) error SetUserOnline(ctx context.Context, userID string, online, offline []int32) error
GetAllOnlineUsers(ctx context.Context, cursor uint64) (map[string][]int32, uint64, error)
} }

@ -4,6 +4,7 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"github.com/dtm-labs/rockscache" "github.com/dtm-labs/rockscache"
"github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log" "github.com/openimsdk/tools/log"
"github.com/redis/go-redis/v9" "github.com/redis/go-redis/v9"
"golang.org/x/sync/singleflight" "golang.org/x/sync/singleflight"
@ -65,6 +66,7 @@ func batchGetCache2[K comparable, V any](ctx context.Context, rcClient *rockscac
} }
bs, err := json.Marshal(value) bs, err := json.Marshal(value)
if err != nil { if err != nil {
log.ZError(ctx, "marshal failed", err)
return nil, err return nil, err
} }
cacheIndex[index] = string(bs) cacheIndex[index] = string(bs)
@ -72,7 +74,7 @@ func batchGetCache2[K comparable, V any](ctx context.Context, rcClient *rockscac
return cacheIndex, nil return cacheIndex, nil
}) })
if err != nil { if err != nil {
return nil, err return nil, errs.WrapMsg(err, "FetchBatch2 failed")
} }
for index, data := range indexCache { for index, data := range indexCache {
if data == "" { if data == "" {
@ -80,7 +82,7 @@ func batchGetCache2[K comparable, V any](ctx context.Context, rcClient *rockscac
} }
var value V var value V
if err := json.Unmarshal([]byte(data), &value); err != nil { if err := json.Unmarshal([]byte(data), &value); err != nil {
return nil, err return nil, errs.WrapMsg(err, "Unmarshal failed")
} }
if cb, ok := any(&value).(BatchCacheCallback[K]); ok { if cb, ok := any(&value).(BatchCacheCallback[K]); ok {
cb.BatchCache(keyId[keys[index]]) cb.BatchCache(keyId[keys[index]])

@ -28,6 +28,10 @@ import (
"time" "time"
) )
const (
rocksCacheTimeout = 11 * time.Second
)
// BatchDeleterRedis is a concrete implementation of the BatchDeleter interface based on Redis and RocksCache. // BatchDeleterRedis is a concrete implementation of the BatchDeleter interface based on Redis and RocksCache.
type BatchDeleterRedis struct { type BatchDeleterRedis struct {
redisClient redis.UniversalClient redisClient redis.UniversalClient
@ -106,6 +110,8 @@ func (c *BatchDeleterRedis) AddKeys(keys ...string) {
// GetRocksCacheOptions returns the default configuration options for RocksCache. // GetRocksCacheOptions returns the default configuration options for RocksCache.
func GetRocksCacheOptions() *rockscache.Options { func GetRocksCacheOptions() *rockscache.Options {
opts := rockscache.NewDefaultOptions() opts := rockscache.NewDefaultOptions()
opts.LockExpire = rocksCacheTimeout
opts.WaitReplicasTimeout = rocksCacheTimeout
opts.StrongConsistency = true opts.StrongConsistency = true
opts.RandomExpireAdjustment = 0.2 opts.RandomExpireAdjustment = 0.2
@ -118,7 +124,7 @@ func getCache[T any](ctx context.Context, rcClient *rockscache.Client, key strin
v, err := rcClient.Fetch2(ctx, key, expire, func() (s string, err error) { v, err := rcClient.Fetch2(ctx, key, expire, func() (s string, err error) {
t, err = fn(ctx) t, err = fn(ctx)
if err != nil { if err != nil {
log.ZError(ctx, "getCache query database failed", err, "key", key) //log.ZError(ctx, "getCache query database failed", err, "key", key)
return "", err return "", err
} }
bs, err := json.Marshal(t) bs, err := json.Marshal(t)

@ -2,12 +2,15 @@ package redis
import ( import (
"context" "context"
"fmt"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
"github.com/openimsdk/protocol/constant"
"github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log" "github.com/openimsdk/tools/log"
"github.com/redis/go-redis/v9" "github.com/redis/go-redis/v9"
"strconv" "strconv"
"strings"
"time" "time"
) )
@ -48,6 +51,36 @@ func (s *userOnline) GetOnline(ctx context.Context, userID string) ([]int32, err
return platformIDs, nil return platformIDs, nil
} }
func (s *userOnline) GetAllOnlineUsers(ctx context.Context, cursor uint64) (map[string][]int32, uint64, error) {
result := make(map[string][]int32)
keys, nextCursor, err := s.rdb.Scan(ctx, cursor, fmt.Sprintf("%s*", cachekey.OnlineKey), constant.ParamMaxLength).Result()
if err != nil {
return nil, 0, err
}
for _, key := range keys {
userID := cachekey.GetOnlineKeyUserID(key)
strValues, err := s.rdb.ZRange(ctx, key, 0, -1).Result()
if err != nil {
return nil, 0, err
}
values := make([]int32, 0, len(strValues))
for _, value := range strValues {
intValue, err := strconv.Atoi(value)
if err != nil {
return nil, 0, errs.Wrap(err)
}
values = append(values, int32(intValue))
}
result[userID] = values
}
return result, nextCursor, nil
}
func (s *userOnline) SetUserOnline(ctx context.Context, userID string, online, offline []int32) error { func (s *userOnline) SetUserOnline(ctx context.Context, userID string, online, offline []int32) error {
script := ` script := `
local key = KEYS[1] local key = KEYS[1]
@ -66,11 +99,10 @@ func (s *userOnline) SetUserOnline(ctx context.Context, userID string, online, o
local change = (num1 ~= num2) or (num2 ~= num3) local change = (num1 ~= num2) or (num2 ~= num3)
if change then if change then
local members = redis.call("ZRANGE", key, 0, -1) local members = redis.call("ZRANGE", key, 0, -1)
table.insert(members, KEYS[2]) table.insert(members, "1")
redis.call("PUBLISH", KEYS[3], table.concat(members, ":")) return members
return 1
else else
return 0 return {"0"}
end end
` `
now := time.Now() now := time.Now()
@ -82,12 +114,24 @@ func (s *userOnline) SetUserOnline(ctx context.Context, userID string, online, o
for _, platformID := range online { for _, platformID := range online {
argv = append(argv, platformID) argv = append(argv, platformID)
} }
keys := []string{s.getUserOnlineKey(userID), userID, s.channelName} keys := []string{s.getUserOnlineKey(userID)}
status, err := s.rdb.Eval(ctx, script, keys, argv).Result() platformIDs, err := s.rdb.Eval(ctx, script, keys, argv).StringSlice()
if err != nil { if err != nil {
log.ZError(ctx, "redis SetUserOnline", err, "userID", userID, "online", online, "offline", offline) log.ZError(ctx, "redis SetUserOnline", err, "userID", userID, "online", online, "offline", offline)
return err return err
} }
log.ZDebug(ctx, "redis SetUserOnline", "userID", userID, "online", online, "offline", offline, "status", status) if len(platformIDs) == 0 {
return errs.ErrInternalServer.WrapMsg("SetUserOnline redis lua invalid return value")
}
if platformIDs[len(platformIDs)-1] != "0" {
log.ZDebug(ctx, "redis SetUserOnline push", "userID", userID, "online", online, "offline", offline, "platformIDs", platformIDs[:len(platformIDs)-1])
platformIDs[len(platformIDs)-1] = userID
msg := strings.Join(platformIDs, ":")
if err := s.rdb.Publish(ctx, s.channelName, msg).Err(); err != nil {
return errs.Wrap(err)
}
} else {
log.ZDebug(ctx, "redis SetUserOnline not push", "userID", userID, "online", online, "offline", offline)
}
return nil return nil
} }

@ -0,0 +1,51 @@
package redis
import (
"context"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/tools/db/redisutil"
"testing"
"time"
)
/*
address: [ 172.16.8.48:7001, 172.16.8.48:7002, 172.16.8.48:7003, 172.16.8.48:7004, 172.16.8.48:7005, 172.16.8.48:7006 ]
username:
password: passwd123
clusterMode: true
db: 0
maxRetry: 10
*/
func TestName111111(t *testing.T) {
conf := config.Redis{
Address: []string{
"172.16.8.124:7001",
"172.16.8.124:7002",
"172.16.8.124:7003",
"172.16.8.124:7004",
"172.16.8.124:7005",
"172.16.8.124:7006",
},
ClusterMode: true,
Password: "passwd123",
//Address: []string{"localhost:16379"},
//Password: "openIM123",
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*1000)
defer cancel()
rdb, err := redisutil.NewRedisClient(ctx, conf.Build())
if err != nil {
panic(err)
}
online := NewUserOnline(rdb)
userID := "a123456"
t.Log(online.GetOnline(ctx, userID))
t.Log(online.SetUserOnline(ctx, userID, []int32{1, 2, 3, 4}, nil))
t.Log(online.GetOnline(ctx, userID))
}
func TestName111(t *testing.T) {
}

@ -74,17 +74,22 @@ func (s *seqUserCacheRedis) GetUserReadSeq(ctx context.Context, conversationID s
} }
func (s *seqUserCacheRedis) SetUserReadSeq(ctx context.Context, conversationID string, userID string, seq int64) error { func (s *seqUserCacheRedis) SetUserReadSeq(ctx context.Context, conversationID string, userID string, seq int64) error {
if seq%s.readSeqWriteRatio == 0 { dbSeq, err := s.GetUserReadSeq(ctx, conversationID, userID)
if err := s.mgo.SetUserReadSeq(ctx, conversationID, userID, seq); err != nil { if err != nil {
return err return err
}
} }
if err := s.rocks.RawSet(ctx, s.getSeqUserReadSeqKey(conversationID, userID), strconv.Itoa(int(seq)), s.readExpireTime); err != nil { if dbSeq < seq {
return errs.Wrap(err) if err := s.rocks.RawSet(ctx, s.getSeqUserReadSeqKey(conversationID, userID), strconv.Itoa(int(seq)), s.readExpireTime); err != nil {
return errs.Wrap(err)
}
} }
return nil return nil
} }
func (s *seqUserCacheRedis) SetUserReadSeqToDB(ctx context.Context, conversationID string, userID string, seq int64) error {
return s.mgo.SetUserReadSeq(ctx, conversationID, userID, seq)
}
func (s *seqUserCacheRedis) SetUserMinSeqs(ctx context.Context, userID string, seqs map[string]int64) error { func (s *seqUserCacheRedis) SetUserMinSeqs(ctx context.Context, userID string, seqs map[string]int64) error {
keys := make([]string, 0, len(seqs)) keys := make([]string, 0, len(seqs))
for conversationID, seq := range seqs { for conversationID, seq := range seqs {
@ -128,13 +133,6 @@ func (s *seqUserCacheRedis) SetUserReadSeqs(ctx context.Context, userID string,
if err := s.setUserRedisReadSeqs(ctx, userID, seqs); err != nil { if err := s.setUserRedisReadSeqs(ctx, userID, seqs); err != nil {
return err return err
} }
for conversationID, seq := range seqs {
if seq%s.readSeqWriteRatio == 0 {
if err := s.mgo.SetUserReadSeq(ctx, conversationID, userID, seq); err != nil {
return err
}
}
}
return nil return nil
} }

@ -9,6 +9,7 @@ type SeqUser interface {
SetUserMinSeq(ctx context.Context, conversationID string, userID string, seq int64) error SetUserMinSeq(ctx context.Context, conversationID string, userID string, seq int64) error
GetUserReadSeq(ctx context.Context, conversationID string, userID string) (int64, error) GetUserReadSeq(ctx context.Context, conversationID string, userID string) (int64, error)
SetUserReadSeq(ctx context.Context, conversationID string, userID string, seq int64) error SetUserReadSeq(ctx context.Context, conversationID string, userID string, seq int64) error
SetUserReadSeqToDB(ctx context.Context, conversationID string, userID string, seq int64) error
SetUserMinSeqs(ctx context.Context, userID string, seqs map[string]int64) error SetUserMinSeqs(ctx context.Context, userID string, seqs map[string]int64) error
SetUserReadSeqs(ctx context.Context, userID string, seqs map[string]int64) error SetUserReadSeqs(ctx context.Context, userID string, seqs map[string]int64) error
GetUserReadSeqs(ctx context.Context, userID string, conversationIDs []string) (map[string]int64, error) GetUserReadSeqs(ctx context.Context, userID string, conversationIDs []string) (map[string]int64, error)

@ -160,7 +160,7 @@ func (f *friendDatabase) BecomeFriends(ctx context.Context, ownerUserID string,
if err != nil { if err != nil {
return err return err
} }
opUserID := mcontext.GetOperationID(ctx) opUserID := mcontext.GetOpUserID(ctx)
friends := make([]*model.Friend, 0, len(friendUserIDs)*2) friends := make([]*model.Friend, 0, len(friendUserIDs)*2)
myFriendsSet := datautil.SliceSetAny(myFriends, func(friend *model.Friend) string { myFriendsSet := datautil.SliceSetAny(myFriends, func(friend *model.Friend) string {
return friend.FriendUserID return friend.FriendUserID

@ -18,14 +18,14 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"errors" "errors"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
"strings" "strings"
"time" "time"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/convert" "github.com/openimsdk/open-im-server/v3/pkg/common/convert"
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/constant"
pbmsg "github.com/openimsdk/protocol/msg" pbmsg "github.com/openimsdk/protocol/msg"
@ -46,16 +46,10 @@ const (
// CommonMsgDatabase defines the interface for message database operations. // CommonMsgDatabase defines the interface for message database operations.
type CommonMsgDatabase interface { type CommonMsgDatabase interface {
// BatchInsertChat2DB inserts a batch of messages into the database for a specific conversation.
BatchInsertChat2DB(ctx context.Context, conversationID string, msgs []*sdkws.MsgData, currentMaxSeq int64) error
// RevokeMsg revokes a message in a conversation. // RevokeMsg revokes a message in a conversation.
RevokeMsg(ctx context.Context, conversationID string, seq int64, revoke *model.RevokeModel) error RevokeMsg(ctx context.Context, conversationID string, seq int64, revoke *model.RevokeModel) error
// MarkSingleChatMsgsAsRead marks messages as read for a single chat by sequence numbers. // MarkSingleChatMsgsAsRead marks messages as read for a single chat by sequence numbers.
MarkSingleChatMsgsAsRead(ctx context.Context, userID string, conversationID string, seqs []int64) error MarkSingleChatMsgsAsRead(ctx context.Context, userID string, conversationID string, seqs []int64) error
// DeleteMessagesFromCache deletes message caches from Redis by sequence numbers.
DeleteMessagesFromCache(ctx context.Context, conversationID string, seqs []int64) error
// BatchInsertChat2Cache increments the sequence number and then batch inserts messages into the cache.
BatchInsertChat2Cache(ctx context.Context, conversationID string, msgs []*sdkws.MsgData) (seq int64, isNewConversation bool, err error)
// GetMsgBySeqsRange retrieves messages from MongoDB by a range of sequence numbers. // GetMsgBySeqsRange retrieves messages from MongoDB by a range of sequence numbers.
GetMsgBySeqsRange(ctx context.Context, userID string, conversationID string, begin, end, num, userMaxSeq int64) (minSeq int64, maxSeq int64, seqMsg []*sdkws.MsgData, err error) GetMsgBySeqsRange(ctx context.Context, userID string, conversationID string, begin, end, num, userMaxSeq int64) (minSeq int64, maxSeq int64, seqMsg []*sdkws.MsgData, err error)
// GetMsgBySeqs retrieves messages for large groups from MongoDB by sequence numbers. // GetMsgBySeqs retrieves messages for large groups from MongoDB by sequence numbers.
@ -89,16 +83,16 @@ type CommonMsgDatabase interface {
// to mq // to mq
MsgToMQ(ctx context.Context, key string, msg2mq *sdkws.MsgData) error MsgToMQ(ctx context.Context, key string, msg2mq *sdkws.MsgData) error
MsgToPushMQ(ctx context.Context, key, conversarionID string, msg2mq *sdkws.MsgData) (int32, int64, error)
MsgToMongoMQ(ctx context.Context, key, conversarionID string, msgs []*sdkws.MsgData, lastSeq int64) error
RangeUserSendCount(ctx context.Context, start time.Time, end time.Time, group bool, ase bool, pageNumber int32, showNumber int32) (msgCount int64, userCount int64, users []*model.UserCount, dateCount map[string]int64, err error) RangeUserSendCount(ctx context.Context, start time.Time, end time.Time, group bool, ase bool, pageNumber int32, showNumber int32) (msgCount int64, userCount int64, users []*model.UserCount, dateCount map[string]int64, err error)
RangeGroupSendCount(ctx context.Context, start time.Time, end time.Time, ase bool, pageNumber int32, showNumber int32) (msgCount int64, userCount int64, groups []*model.GroupCount, dateCount map[string]int64, err error) RangeGroupSendCount(ctx context.Context, start time.Time, end time.Time, ase bool, pageNumber int32, showNumber int32) (msgCount int64, userCount int64, groups []*model.GroupCount, dateCount map[string]int64, err error)
ConvertMsgsDocLen(ctx context.Context, conversationIDs []string) ConvertMsgsDocLen(ctx context.Context, conversationIDs []string)
// clear msg // clear msg
GetBeforeMsg(ctx context.Context, ts int64, limit int) ([]*model.MsgDocModel, error) GetBeforeMsg(ctx context.Context, ts int64, docIds []string, limit int) ([]*model.MsgDocModel, error)
DeleteDocMsgBefore(ctx context.Context, ts int64, doc *model.MsgDocModel) ([]int, error) DeleteDocMsgBefore(ctx context.Context, ts int64, doc *model.MsgDocModel) ([]int, error)
GetDocIDs(ctx context.Context) ([]string, error)
} }
func NewCommonMsgDatabase(msgDocModel database.Msg, msg cache.MsgCache, seqUser cache.SeqUser, seqConversation cache.SeqConversationCache, kafkaConf *config.Kafka) (CommonMsgDatabase, error) { func NewCommonMsgDatabase(msgDocModel database.Msg, msg cache.MsgCache, seqUser cache.SeqUser, seqConversation cache.SeqConversationCache, kafkaConf *config.Kafka) (CommonMsgDatabase, error) {
@ -110,22 +104,12 @@ func NewCommonMsgDatabase(msgDocModel database.Msg, msg cache.MsgCache, seqUser
if err != nil { if err != nil {
return nil, err return nil, err
} }
producerToMongo, err := kafka.NewKafkaProducer(conf, kafkaConf.Address, kafkaConf.ToMongoTopic)
if err != nil {
return nil, err
}
producerToPush, err := kafka.NewKafkaProducer(conf, kafkaConf.Address, kafkaConf.ToPushTopic)
if err != nil {
return nil, err
}
return &commonMsgDatabase{ return &commonMsgDatabase{
msgDocDatabase: msgDocModel, msgDocDatabase: msgDocModel,
msg: msg, msg: msg,
seqUser: seqUser, seqUser: seqUser,
seqConversation: seqConversation, seqConversation: seqConversation,
producer: producerToRedis, producer: producerToRedis,
producerToMongo: producerToMongo,
producerToPush: producerToPush,
}, nil }, nil
} }
@ -136,8 +120,6 @@ type commonMsgDatabase struct {
seqConversation cache.SeqConversationCache seqConversation cache.SeqConversationCache
seqUser cache.SeqUser seqUser cache.SeqUser
producer *kafka.Producer producer *kafka.Producer
producerToMongo *kafka.Producer
producerToPush *kafka.Producer
} }
func (db *commonMsgDatabase) MsgToMQ(ctx context.Context, key string, msg2mq *sdkws.MsgData) error { func (db *commonMsgDatabase) MsgToMQ(ctx context.Context, key string, msg2mq *sdkws.MsgData) error {
@ -145,23 +127,6 @@ func (db *commonMsgDatabase) MsgToMQ(ctx context.Context, key string, msg2mq *sd
return err return err
} }
func (db *commonMsgDatabase) MsgToPushMQ(ctx context.Context, key, conversationID string, msg2mq *sdkws.MsgData) (int32, int64, error) {
partition, offset, err := db.producerToPush.SendMessage(ctx, key, &pbmsg.PushMsgDataToMQ{MsgData: msg2mq, ConversationID: conversationID})
if err != nil {
log.ZError(ctx, "MsgToPushMQ", err, "key", key, "msg2mq", msg2mq)
return 0, 0, err
}
return partition, offset, nil
}
func (db *commonMsgDatabase) MsgToMongoMQ(ctx context.Context, key, conversationID string, messages []*sdkws.MsgData, lastSeq int64) error {
if len(messages) > 0 {
_, _, err := db.producerToMongo.SendMessage(ctx, key, &pbmsg.MsgDataToMongoByMQ{LastSeq: lastSeq, ConversationID: conversationID, MsgData: messages})
return err
}
return nil
}
func (db *commonMsgDatabase) BatchInsertBlock(ctx context.Context, conversationID string, fields []any, key int8, firstSeq int64) error { func (db *commonMsgDatabase) BatchInsertBlock(ctx context.Context, conversationID string, fields []any, key int8, firstSeq int64) error {
if len(fields) == 0 { if len(fields) == 0 {
return nil return nil
@ -263,52 +228,6 @@ func (db *commonMsgDatabase) BatchInsertBlock(ctx context.Context, conversationI
return nil return nil
} }
func (db *commonMsgDatabase) BatchInsertChat2DB(ctx context.Context, conversationID string, msgList []*sdkws.MsgData, currentMaxSeq int64) error {
if len(msgList) == 0 {
return errs.ErrArgs.WrapMsg("msgList is empty")
}
msgs := make([]any, len(msgList))
for i, msg := range msgList {
if msg == nil {
continue
}
var offlinePushModel *model.OfflinePushModel
if msg.OfflinePushInfo != nil {
offlinePushModel = &model.OfflinePushModel{
Title: msg.OfflinePushInfo.Title,
Desc: msg.OfflinePushInfo.Desc,
Ex: msg.OfflinePushInfo.Ex,
IOSPushSound: msg.OfflinePushInfo.IOSPushSound,
IOSBadgeCount: msg.OfflinePushInfo.IOSBadgeCount,
}
}
msgs[i] = &model.MsgDataModel{
SendID: msg.SendID,
RecvID: msg.RecvID,
GroupID: msg.GroupID,
ClientMsgID: msg.ClientMsgID,
ServerMsgID: msg.ServerMsgID,
SenderPlatformID: msg.SenderPlatformID,
SenderNickname: msg.SenderNickname,
SenderFaceURL: msg.SenderFaceURL,
SessionType: msg.SessionType,
MsgFrom: msg.MsgFrom,
ContentType: msg.ContentType,
Content: string(msg.Content),
Seq: msg.Seq,
SendTime: msg.SendTime,
CreateTime: msg.CreateTime,
Status: msg.Status,
Options: msg.Options,
OfflinePush: offlinePushModel,
AtUserIDList: msg.AtUserIDList,
AttachedInfo: msg.AttachedInfo,
Ex: msg.Ex,
}
}
return db.BatchInsertBlock(ctx, conversationID, msgs, updateKeyMsg, msgList[0].Seq)
}
func (db *commonMsgDatabase) RevokeMsg(ctx context.Context, conversationID string, seq int64, revoke *model.RevokeModel) error { func (db *commonMsgDatabase) RevokeMsg(ctx context.Context, conversationID string, seq int64, revoke *model.RevokeModel) error {
return db.BatchInsertBlock(ctx, conversationID, []any{revoke}, updateKeyRevoke, seq) return db.BatchInsertBlock(ctx, conversationID, []any{revoke}, updateKeyRevoke, seq)
} }
@ -328,56 +247,6 @@ func (db *commonMsgDatabase) MarkSingleChatMsgsAsRead(ctx context.Context, userI
return nil return nil
} }
func (db *commonMsgDatabase) DeleteMessagesFromCache(ctx context.Context, conversationID string, seqs []int64) error {
return db.msg.DeleteMessagesFromCache(ctx, conversationID, seqs)
}
func (db *commonMsgDatabase) setHasReadSeqs(ctx context.Context, conversationID string, userSeqMap map[string]int64) error {
for userID, seq := range userSeqMap {
if err := db.seqUser.SetUserReadSeq(ctx, conversationID, userID, seq); err != nil {
return err
}
}
return nil
}
func (db *commonMsgDatabase) BatchInsertChat2Cache(ctx context.Context, conversationID string, msgs []*sdkws.MsgData) (seq int64, isNew bool, err error) {
lenList := len(msgs)
if int64(lenList) > db.msgTable.GetSingleGocMsgNum() {
return 0, false, errs.New("message count exceeds limit", "limit", db.msgTable.GetSingleGocMsgNum()).Wrap()
}
if lenList < 1 {
return 0, false, errs.New("no messages to insert", "minCount", 1).Wrap()
}
currentMaxSeq, err := db.seqConversation.Malloc(ctx, conversationID, int64(len(msgs)))
if err != nil {
log.ZError(ctx, "storage.seq.Malloc", err)
return 0, false, err
}
isNew = currentMaxSeq == 0
lastMaxSeq := currentMaxSeq
userSeqMap := make(map[string]int64)
for _, m := range msgs {
currentMaxSeq++
m.Seq = currentMaxSeq
userSeqMap[m.SendID] = m.Seq
}
failedNum, err := db.msg.SetMessagesToCache(ctx, conversationID, msgs)
if err != nil {
prommetrics.MsgInsertRedisFailedCounter.Add(float64(failedNum))
log.ZError(ctx, "setMessageToCache error", err, "len", len(msgs), "conversationID", conversationID)
} else {
prommetrics.MsgInsertRedisSuccessCounter.Inc()
}
err = db.setHasReadSeqs(ctx, conversationID, userSeqMap)
if err != nil {
log.ZError(ctx, "SetHasReadSeqs error", err, "userSeqMap", userSeqMap, "conversationID", conversationID)
prommetrics.SeqSetFailedCounter.Inc()
}
return lastMaxSeq, isNew, errs.Wrap(err)
}
func (db *commonMsgDatabase) getMsgBySeqs(ctx context.Context, userID, conversationID string, seqs []int64) (totalMsgs []*sdkws.MsgData, err error) { func (db *commonMsgDatabase) getMsgBySeqs(ctx context.Context, userID, conversationID string, seqs []int64) (totalMsgs []*sdkws.MsgData, err error) {
for docID, seqs := range db.msgTable.GetDocIDSeqsMap(conversationID, seqs) { for docID, seqs := range db.msgTable.GetDocIDSeqsMap(conversationID, seqs) {
// log.ZDebug(ctx, "getMsgBySeqs", "docID", docID, "seqs", seqs) // log.ZDebug(ctx, "getMsgBySeqs", "docID", docID, "seqs", seqs)
@ -912,8 +781,25 @@ func (db *commonMsgDatabase) ConvertMsgsDocLen(ctx context.Context, conversation
db.msgDocDatabase.ConvertMsgsDocLen(ctx, conversationIDs) db.msgDocDatabase.ConvertMsgsDocLen(ctx, conversationIDs)
} }
func (db *commonMsgDatabase) GetBeforeMsg(ctx context.Context, ts int64, limit int) ([]*model.MsgDocModel, error) { func (db *commonMsgDatabase) GetBeforeMsg(ctx context.Context, ts int64, docIDs []string, limit int) ([]*model.MsgDocModel, error) {
return db.msgDocDatabase.GetBeforeMsg(ctx, ts, limit) var msgs []*model.MsgDocModel
for i := 0; i < len(docIDs); i += 1000 {
end := i + 1000
if end > len(docIDs) {
end = len(docIDs)
}
res, err := db.msgDocDatabase.GetBeforeMsg(ctx, ts, docIDs[i:end], limit)
if err != nil {
return nil, err
}
msgs = append(msgs, res...)
if len(msgs) >= limit {
return msgs[:limit], nil
}
}
return msgs, nil
} }
func (db *commonMsgDatabase) DeleteDocMsgBefore(ctx context.Context, ts int64, doc *model.MsgDocModel) ([]int, error) { func (db *commonMsgDatabase) DeleteDocMsgBefore(ctx context.Context, ts int64, doc *model.MsgDocModel) ([]int, error) {
@ -936,8 +822,10 @@ func (db *commonMsgDatabase) DeleteDocMsgBefore(ctx context.Context, ts int64, d
return index, err return index, err
} }
if len(index) == notNull { if len(index) == notNull {
log.ZDebug(ctx, "Delete db in Doc", "DocID", doc.DocID, "index", index, "maxSeq", maxSeq)
return index, db.msgDocDatabase.DeleteDoc(ctx, doc.DocID) return index, db.msgDocDatabase.DeleteDoc(ctx, doc.DocID)
} else { } else {
log.ZDebug(ctx, "delete db in index", "DocID", doc.DocID, "index", index, "maxSeq", maxSeq)
return index, db.msgDocDatabase.DeleteMsgByIndex(ctx, doc.DocID, index) return index, db.msgDocDatabase.DeleteMsgByIndex(ctx, doc.DocID, index)
} }
} }
@ -955,3 +843,7 @@ func (db *commonMsgDatabase) setMinSeq(ctx context.Context, conversationID strin
} }
return db.seqConversation.SetMinSeq(ctx, conversationID, seq) return db.seqConversation.SetMinSeq(ctx, conversationID, seq)
} }
func (db *commonMsgDatabase) GetDocIDs(ctx context.Context) ([]string, error) {
return db.msgDocDatabase.GetDocIDs(ctx)
}

@ -0,0 +1,286 @@
package controller
import (
"context"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
pbmsg "github.com/openimsdk/protocol/msg"
"github.com/openimsdk/protocol/sdkws"
"github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/mq/kafka"
"go.mongodb.org/mongo-driver/mongo"
)
type MsgTransferDatabase interface {
// BatchInsertChat2DB inserts a batch of messages into the database for a specific conversation.
BatchInsertChat2DB(ctx context.Context, conversationID string, msgs []*sdkws.MsgData, currentMaxSeq int64) error
// DeleteMessagesFromCache deletes message caches from Redis by sequence numbers.
DeleteMessagesFromCache(ctx context.Context, conversationID string, seqs []int64) error
// BatchInsertChat2Cache increments the sequence number and then batch inserts messages into the cache.
BatchInsertChat2Cache(ctx context.Context, conversationID string, msgs []*sdkws.MsgData) (seq int64, isNewConversation bool, err error)
SetHasReadSeqToDB(ctx context.Context, userID string, conversationID string, hasReadSeq int64) error
// to mq
MsgToPushMQ(ctx context.Context, key, conversationID string, msg2mq *sdkws.MsgData) (int32, int64, error)
MsgToMongoMQ(ctx context.Context, key, conversationID string, msgs []*sdkws.MsgData, lastSeq int64) error
}
func NewMsgTransferDatabase(msgDocModel database.Msg, msg cache.MsgCache, seqUser cache.SeqUser, seqConversation cache.SeqConversationCache, kafkaConf *config.Kafka) (MsgTransferDatabase, error) {
conf, err := kafka.BuildProducerConfig(*kafkaConf.Build())
if err != nil {
return nil, err
}
producerToMongo, err := kafka.NewKafkaProducer(conf, kafkaConf.Address, kafkaConf.ToMongoTopic)
if err != nil {
return nil, err
}
producerToPush, err := kafka.NewKafkaProducer(conf, kafkaConf.Address, kafkaConf.ToPushTopic)
if err != nil {
return nil, err
}
return &msgTransferDatabase{
msgDocDatabase: msgDocModel,
msg: msg,
seqUser: seqUser,
seqConversation: seqConversation,
producerToMongo: producerToMongo,
producerToPush: producerToPush,
}, nil
}
type msgTransferDatabase struct {
msgDocDatabase database.Msg
msgTable model.MsgDocModel
msg cache.MsgCache
seqConversation cache.SeqConversationCache
seqUser cache.SeqUser
producerToMongo *kafka.Producer
producerToPush *kafka.Producer
}
func (db *msgTransferDatabase) BatchInsertChat2DB(ctx context.Context, conversationID string, msgList []*sdkws.MsgData, currentMaxSeq int64) error {
if len(msgList) == 0 {
return errs.ErrArgs.WrapMsg("msgList is empty")
}
msgs := make([]any, len(msgList))
for i, msg := range msgList {
if msg == nil {
continue
}
var offlinePushModel *model.OfflinePushModel
if msg.OfflinePushInfo != nil {
offlinePushModel = &model.OfflinePushModel{
Title: msg.OfflinePushInfo.Title,
Desc: msg.OfflinePushInfo.Desc,
Ex: msg.OfflinePushInfo.Ex,
IOSPushSound: msg.OfflinePushInfo.IOSPushSound,
IOSBadgeCount: msg.OfflinePushInfo.IOSBadgeCount,
}
}
msgs[i] = &model.MsgDataModel{
SendID: msg.SendID,
RecvID: msg.RecvID,
GroupID: msg.GroupID,
ClientMsgID: msg.ClientMsgID,
ServerMsgID: msg.ServerMsgID,
SenderPlatformID: msg.SenderPlatformID,
SenderNickname: msg.SenderNickname,
SenderFaceURL: msg.SenderFaceURL,
SessionType: msg.SessionType,
MsgFrom: msg.MsgFrom,
ContentType: msg.ContentType,
Content: string(msg.Content),
Seq: msg.Seq,
SendTime: msg.SendTime,
CreateTime: msg.CreateTime,
Status: msg.Status,
Options: msg.Options,
OfflinePush: offlinePushModel,
AtUserIDList: msg.AtUserIDList,
AttachedInfo: msg.AttachedInfo,
Ex: msg.Ex,
}
}
return db.BatchInsertBlock(ctx, conversationID, msgs, updateKeyMsg, msgList[0].Seq)
}
func (db *msgTransferDatabase) BatchInsertBlock(ctx context.Context, conversationID string, fields []any, key int8, firstSeq int64) error {
if len(fields) == 0 {
return nil
}
num := db.msgTable.GetSingleGocMsgNum()
// num = 100
for i, field := range fields { // Check the type of the field
var ok bool
switch key {
case updateKeyMsg:
var msg *model.MsgDataModel
msg, ok = field.(*model.MsgDataModel)
if msg != nil && msg.Seq != firstSeq+int64(i) {
return errs.ErrInternalServer.WrapMsg("seq is invalid")
}
case updateKeyRevoke:
_, ok = field.(*model.RevokeModel)
default:
return errs.ErrInternalServer.WrapMsg("key is invalid")
}
if !ok {
return errs.ErrInternalServer.WrapMsg("field type is invalid")
}
}
// Returns true if the document exists in the database, false if the document does not exist in the database
updateMsgModel := func(seq int64, i int) (bool, error) {
var (
res *mongo.UpdateResult
err error
)
docID := db.msgTable.GetDocID(conversationID, seq)
index := db.msgTable.GetMsgIndex(seq)
field := fields[i]
switch key {
case updateKeyMsg:
res, err = db.msgDocDatabase.UpdateMsg(ctx, docID, index, "msg", field)
case updateKeyRevoke:
res, err = db.msgDocDatabase.UpdateMsg(ctx, docID, index, "revoke", field)
}
if err != nil {
return false, err
}
return res.MatchedCount > 0, nil
}
tryUpdate := true
for i := 0; i < len(fields); i++ {
seq := firstSeq + int64(i) // Current sequence number
if tryUpdate {
matched, err := updateMsgModel(seq, i)
if err != nil {
return err
}
if matched {
continue // The current data has been updated, skip the current data
}
}
doc := model.MsgDocModel{
DocID: db.msgTable.GetDocID(conversationID, seq),
Msg: make([]*model.MsgInfoModel, num),
}
var insert int // Inserted data number
for j := i; j < len(fields); j++ {
seq = firstSeq + int64(j)
if db.msgTable.GetDocID(conversationID, seq) != doc.DocID {
break
}
insert++
switch key {
case updateKeyMsg:
doc.Msg[db.msgTable.GetMsgIndex(seq)] = &model.MsgInfoModel{
Msg: fields[j].(*model.MsgDataModel),
}
case updateKeyRevoke:
doc.Msg[db.msgTable.GetMsgIndex(seq)] = &model.MsgInfoModel{
Revoke: fields[j].(*model.RevokeModel),
}
}
}
for i, msgInfo := range doc.Msg {
if msgInfo == nil {
msgInfo = &model.MsgInfoModel{}
doc.Msg[i] = msgInfo
}
if msgInfo.DelList == nil {
doc.Msg[i].DelList = []string{}
}
}
if err := db.msgDocDatabase.Create(ctx, &doc); err != nil {
if mongo.IsDuplicateKeyError(err) {
i-- // already inserted
tryUpdate = true // next block use update mode
continue
}
return err
}
tryUpdate = false // The current block is inserted successfully, and the next block is inserted preferentially
i += insert - 1 // Skip the inserted data
}
return nil
}
func (db *msgTransferDatabase) DeleteMessagesFromCache(ctx context.Context, conversationID string, seqs []int64) error {
return db.msg.DeleteMessagesFromCache(ctx, conversationID, seqs)
}
func (db *msgTransferDatabase) BatchInsertChat2Cache(ctx context.Context, conversationID string, msgs []*sdkws.MsgData) (seq int64, isNew bool, err error) {
lenList := len(msgs)
if int64(lenList) > db.msgTable.GetSingleGocMsgNum() {
return 0, false, errs.New("message count exceeds limit", "limit", db.msgTable.GetSingleGocMsgNum()).Wrap()
}
if lenList < 1 {
return 0, false, errs.New("no messages to insert", "minCount", 1).Wrap()
}
currentMaxSeq, err := db.seqConversation.Malloc(ctx, conversationID, int64(len(msgs)))
if err != nil {
log.ZError(ctx, "storage.seq.Malloc", err)
return 0, false, err
}
isNew = currentMaxSeq == 0
lastMaxSeq := currentMaxSeq
userSeqMap := make(map[string]int64)
for _, m := range msgs {
currentMaxSeq++
m.Seq = currentMaxSeq
userSeqMap[m.SendID] = m.Seq
}
failedNum, err := db.msg.SetMessagesToCache(ctx, conversationID, msgs)
if err != nil {
prommetrics.MsgInsertRedisFailedCounter.Add(float64(failedNum))
log.ZError(ctx, "setMessageToCache error", err, "len", len(msgs), "conversationID", conversationID)
} else {
prommetrics.MsgInsertRedisSuccessCounter.Inc()
}
err = db.setHasReadSeqs(ctx, conversationID, userSeqMap)
if err != nil {
log.ZError(ctx, "SetHasReadSeqs error", err, "userSeqMap", userSeqMap, "conversationID", conversationID)
prommetrics.SeqSetFailedCounter.Inc()
}
return lastMaxSeq, isNew, errs.Wrap(err)
}
func (db *msgTransferDatabase) setHasReadSeqs(ctx context.Context, conversationID string, userSeqMap map[string]int64) error {
for userID, seq := range userSeqMap {
if err := db.seqUser.SetUserReadSeq(ctx, conversationID, userID, seq); err != nil {
return err
}
}
return nil
}
func (db *msgTransferDatabase) SetHasReadSeqToDB(ctx context.Context, userID string, conversationID string, hasReadSeq int64) error {
return db.seqUser.SetUserReadSeqToDB(ctx, conversationID, userID, hasReadSeq)
}
func (db *msgTransferDatabase) MsgToPushMQ(ctx context.Context, key, conversationID string, msg2mq *sdkws.MsgData) (int32, int64, error) {
partition, offset, err := db.producerToPush.SendMessage(ctx, key, &pbmsg.PushMsgDataToMQ{MsgData: msg2mq, ConversationID: conversationID})
if err != nil {
log.ZError(ctx, "MsgToPushMQ", err, "key", key, "msg2mq", msg2mq)
return 0, 0, err
}
return partition, offset, nil
}
func (db *msgTransferDatabase) MsgToMongoMQ(ctx context.Context, key, conversationID string, messages []*sdkws.MsgData, lastSeq int64) error {
if len(messages) > 0 {
_, _, err := db.producerToMongo.SendMessage(ctx, key, &pbmsg.MsgDataToMongoByMQ{LastSeq: lastSeq, ConversationID: conversationID, MsgData: messages})
if err != nil {
log.ZError(ctx, "MsgToMongoMQ", err, "key", key, "conversationID", conversationID, "lastSeq", lastSeq)
return err
}
}
return nil
}

@ -17,21 +17,45 @@ package controller
import ( import (
"context" "context"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/protocol/push"
"github.com/openimsdk/protocol/sdkws"
"github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/mq/kafka"
) )
type PushDatabase interface { type PushDatabase interface {
DelFcmToken(ctx context.Context, userID string, platformID int) error DelFcmToken(ctx context.Context, userID string, platformID int) error
MsgToOfflinePushMQ(ctx context.Context, key string, userIDs []string, msg2mq *sdkws.MsgData) error
} }
type pushDataBase struct { type pushDataBase struct {
cache cache.ThirdCache cache cache.ThirdCache
producerToOfflinePush *kafka.Producer
} }
func NewPushDatabase(cache cache.ThirdCache) PushDatabase { func NewPushDatabase(cache cache.ThirdCache, kafkaConf *config.Kafka) PushDatabase {
return &pushDataBase{cache: cache} conf, err := kafka.BuildProducerConfig(*kafkaConf.Build())
if err != nil {
return nil
}
producerToOfflinePush, err := kafka.NewKafkaProducer(conf, kafkaConf.Address, kafkaConf.ToOfflinePushTopic)
if err != nil {
return nil
}
return &pushDataBase{
cache: cache,
producerToOfflinePush: producerToOfflinePush,
}
} }
func (p *pushDataBase) DelFcmToken(ctx context.Context, userID string, platformID int) error { func (p *pushDataBase) DelFcmToken(ctx context.Context, userID string, platformID int) error {
return p.cache.DelFcmToken(ctx, userID, platformID) return p.cache.DelFcmToken(ctx, userID, platformID)
} }
func (p *pushDataBase) MsgToOfflinePushMQ(ctx context.Context, key string, userIDs []string, msg2mq *sdkws.MsgData) error {
_, _, err := p.producerToOfflinePush.SendMessage(ctx, key, &push.PushMsgReq{MsgData: msg2mq, UserIDs: userIDs})
log.ZInfo(ctx, "message is push to offlinePush topic", "key", key, "userIDs", userIDs, "msg", msg2mq.String())
return err
}

@ -8,6 +8,7 @@ import (
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
"github.com/openimsdk/tools/utils/datautil" "github.com/openimsdk/tools/utils/datautil"
"golang.org/x/exp/rand"
"github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/constant"
"github.com/openimsdk/protocol/msg" "github.com/openimsdk/protocol/msg"
@ -117,9 +118,9 @@ func (m *MsgMgo) GetMsgBySeqIndexIn1Doc(ctx context.Context, userID, docID strin
} }
func (m *MsgMgo) getMsgBySeqIndexIn1Doc(ctx context.Context, userID, docID string, seqs []int64) ([]*model.MsgInfoModel, error) { func (m *MsgMgo) getMsgBySeqIndexIn1Doc(ctx context.Context, userID, docID string, seqs []int64) ([]*model.MsgInfoModel, error) {
indexs := make([]int64, 0, len(seqs)) indexes := make([]int64, 0, len(seqs))
for _, seq := range seqs { for _, seq := range seqs {
indexs = append(indexs, m.model.GetMsgIndex(seq)) indexes = append(indexes, m.model.GetMsgIndex(seq))
} }
pipeline := mongo.Pipeline{ pipeline := mongo.Pipeline{
bson.D{{Key: "$match", Value: bson.D{ bson.D{{Key: "$match", Value: bson.D{
@ -130,7 +131,7 @@ func (m *MsgMgo) getMsgBySeqIndexIn1Doc(ctx context.Context, userID, docID strin
{Key: "doc_id", Value: 1}, {Key: "doc_id", Value: 1},
{Key: "msgs", Value: bson.D{ {Key: "msgs", Value: bson.D{
{Key: "$map", Value: bson.D{ {Key: "$map", Value: bson.D{
{Key: "input", Value: indexs}, {Key: "input", Value: indexes},
{Key: "as", Value: "index"}, {Key: "as", Value: "index"},
{Key: "in", Value: bson.D{ {Key: "in", Value: bson.D{
{Key: "$arrayElemAt", Value: bson.A{"$msgs", "$$index"}}, {Key: "$arrayElemAt", Value: bson.A{"$msgs", "$$index"}},
@ -1226,10 +1227,53 @@ func (m *MsgMgo) ConvertMsgsDocLen(ctx context.Context, conversationIDs []string
} }
} }
func (m *MsgMgo) GetBeforeMsg(ctx context.Context, ts int64, limit int) ([]*model.MsgDocModel, error) { func (m *MsgMgo) GetDocIDs(ctx context.Context) ([]string, error) {
limit := 5000
var skip int
var docIDs []string
var offset int
count, err := m.coll.CountDocuments(ctx, bson.M{})
if err != nil {
return nil, err
}
if count < int64(limit) {
skip = 0
} else {
rand.Seed(uint64(time.Now().UnixMilli()))
skip = rand.Intn(int(count / int64(limit)))
offset = skip * limit
}
log.ZDebug(ctx, "offset", "skip", skip, "offset", offset)
res, err := mongoutil.Aggregate[*model.MsgDocModel](ctx, m.coll, []bson.M{
{
"$project": bson.M{
"doc_id": 1,
},
},
{
"$skip": offset,
},
{
"$limit": limit,
},
})
for _, doc := range res {
docIDs = append(docIDs, doc.DocID)
}
return docIDs, errs.Wrap(err)
}
func (m *MsgMgo) GetBeforeMsg(ctx context.Context, ts int64, docIDs []string, limit int) ([]*model.MsgDocModel, error) {
return mongoutil.Aggregate[*model.MsgDocModel](ctx, m.coll, []bson.M{ return mongoutil.Aggregate[*model.MsgDocModel](ctx, m.coll, []bson.M{
{ {
"$match": bson.M{ "$match": bson.M{
"doc_id": bson.M{
"$in": docIDs,
},
"msgs.msg.send_time": bson.M{ "msgs.msg.send_time": bson.M{
"$lt": ts, "$lt": ts,
}, },

@ -115,5 +115,12 @@ func (s *seqUserMongo) GetUserReadSeqs(ctx context.Context, userID string, conve
} }
func (s *seqUserMongo) SetUserReadSeq(ctx context.Context, conversationID string, userID string, seq int64) error { func (s *seqUserMongo) SetUserReadSeq(ctx context.Context, conversationID string, userID string, seq int64) error {
dbSeq, err := s.GetUserReadSeq(ctx, conversationID, userID)
if err != nil {
return err
}
if dbSeq > seq {
return nil
}
return s.setSeq(ctx, conversationID, userID, seq, "read_seq") return s.setSeq(ctx, conversationID, userID, seq, "read_seq")
} }

@ -167,6 +167,10 @@ func (u *UserMgo) DeleteUserCommand(ctx context.Context, userID string, Type int
filter := bson.M{"userID": userID, "type": Type, "uuid": UUID} filter := bson.M{"userID": userID, "type": Type, "uuid": UUID}
result, err := collection.DeleteOne(ctx, filter) result, err := collection.DeleteOne(ctx, filter)
// when err is not nil, result might be nil
if err != nil {
return errs.Wrap(err)
}
if result.DeletedCount == 0 { if result.DeletedCount == 0 {
// No records found to update // No records found to update
return errs.Wrap(errs.ErrRecordNotFound) return errs.Wrap(errs.ErrRecordNotFound)

@ -16,10 +16,11 @@ package database
import ( import (
"context" "context"
"time"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
"github.com/openimsdk/protocol/msg" "github.com/openimsdk/protocol/msg"
"go.mongodb.org/mongo-driver/mongo" "go.mongodb.org/mongo-driver/mongo"
"time"
) )
type Msg interface { type Msg interface {
@ -44,5 +45,7 @@ type Msg interface {
DeleteDoc(ctx context.Context, docID string) error DeleteDoc(ctx context.Context, docID string) error
DeleteMsgByIndex(ctx context.Context, docID string, index []int) error DeleteMsgByIndex(ctx context.Context, docID string, index []int) error
GetBeforeMsg(ctx context.Context, ts int64, limit int) ([]*model.MsgDocModel, error) GetBeforeMsg(ctx context.Context, ts int64, docIDs []string, limit int) ([]*model.MsgDocModel, error)
GetDocIDs(ctx context.Context) ([]string, error)
} }

@ -92,15 +92,15 @@ type GroupCount struct {
Count int64 `bson:"count"` Count int64 `bson:"count"`
} }
func (MsgDocModel) TableName() string { func (*MsgDocModel) TableName() string {
return MsgTableName return MsgTableName
} }
func (MsgDocModel) GetSingleGocMsgNum() int64 { func (*MsgDocModel) GetSingleGocMsgNum() int64 {
return singleGocMsgNum return singleGocMsgNum
} }
func (MsgDocModel) GetSingleGocMsgNum5000() int64 { func (*MsgDocModel) GetSingleGocMsgNum5000() int64 {
return singleGocMsgNum5000 return singleGocMsgNum5000
} }
@ -108,12 +108,12 @@ func (m *MsgDocModel) IsFull() bool {
return m.Msg[len(m.Msg)-1].Msg != nil return m.Msg[len(m.Msg)-1].Msg != nil
} }
func (m MsgDocModel) GetDocID(conversationID string, seq int64) string { func (m *MsgDocModel) GetDocID(conversationID string, seq int64) string {
seqSuffix := (seq - 1) / singleGocMsgNum seqSuffix := (seq - 1) / singleGocMsgNum
return m.indexGen(conversationID, seqSuffix) return m.indexGen(conversationID, seqSuffix)
} }
func (m MsgDocModel) GetDocIDSeqsMap(conversationID string, seqs []int64) map[string][]int64 { func (m *MsgDocModel) GetDocIDSeqsMap(conversationID string, seqs []int64) map[string][]int64 {
t := make(map[string][]int64) t := make(map[string][]int64)
for i := 0; i < len(seqs); i++ { for i := 0; i < len(seqs); i++ {
docID := m.GetDocID(conversationID, seqs[i]) docID := m.GetDocID(conversationID, seqs[i])
@ -127,15 +127,15 @@ func (m MsgDocModel) GetDocIDSeqsMap(conversationID string, seqs []int64) map[st
return t return t
} }
func (MsgDocModel) GetMsgIndex(seq int64) int64 { func (*MsgDocModel) GetMsgIndex(seq int64) int64 {
return (seq - 1) % singleGocMsgNum return (seq - 1) % singleGocMsgNum
} }
func (MsgDocModel) indexGen(conversationID string, seqSuffix int64) string { func (*MsgDocModel) indexGen(conversationID string, seqSuffix int64) string {
return conversationID + ":" + strconv.FormatInt(seqSuffix, 10) return conversationID + ":" + strconv.FormatInt(seqSuffix, 10)
} }
func (MsgDocModel) GenExceptionMessageBySeqs(seqs []int64) (exceptionMsg []*sdkws.MsgData) { func (*MsgDocModel) GenExceptionMessageBySeqs(seqs []int64) (exceptionMsg []*sdkws.MsgData) {
for _, v := range seqs { for _, v := range seqs {
msgModel := new(sdkws.MsgData) msgModel := new(sdkws.MsgData)
msgModel.Seq = v msgModel.Seq = v

@ -20,7 +20,9 @@ type EvictCallback[K comparable, V any] simplelru.EvictCallback[K, V]
type LRU[K comparable, V any] interface { type LRU[K comparable, V any] interface {
Get(key K, fetch func() (V, error)) (V, error) Get(key K, fetch func() (V, error)) (V, error)
Set(key K, value V)
SetHas(key K, value V) bool SetHas(key K, value V) bool
GetBatch(keys []K, fetch func(keys []K) (map[K]V, error)) (map[K]V, error)
Del(key K) bool Del(key K) bool
Stop() Stop()
} }

@ -51,6 +51,11 @@ type ExpirationLRU[K comparable, V any] struct {
target Target target Target
} }
func (x *ExpirationLRU[K, V]) GetBatch(keys []K, fetch func(keys []K) (map[K]V, error)) (map[K]V, error) {
//TODO implement me
panic("implement me")
}
func (x *ExpirationLRU[K, V]) Get(key K, fetch func() (V, error)) (V, error) { func (x *ExpirationLRU[K, V]) Get(key K, fetch func() (V, error)) (V, error) {
x.lock.Lock() x.lock.Lock()
v, ok := x.core.Get(key) v, ok := x.core.Get(key)
@ -99,5 +104,11 @@ func (x *ExpirationLRU[K, V]) SetHas(key K, value V) bool {
return false return false
} }
func (x *ExpirationLRU[K, V]) Set(key K, value V) {
x.lock.Lock()
defer x.lock.Unlock()
x.core.Add(key, &expirationLruItem[V]{value: value})
}
func (x *ExpirationLRU[K, V]) Stop() { func (x *ExpirationLRU[K, V]) Stop() {
} }

@ -88,18 +88,75 @@ func (x *LayLRU[K, V]) Get(key K, fetch func() (V, error)) (V, error) {
return v.value, v.err return v.value, v.err
} }
//func (x *LayLRU[K, V]) Set(key K, value V) { func (x *LayLRU[K, V]) GetBatch(keys []K, fetch func(keys []K) (map[K]V, error)) (map[K]V, error) {
// x.lock.Lock() var (
// x.core.Add(key, &layLruItem[V]{value: value, expires: time.Now().Add(x.successTTL).UnixMilli()}) err error
// x.lock.Unlock() once sync.Once
//} )
//
x.lock.Lock()
res := make(map[K]V)
queries := make([]K, 0)
setVs := make(map[K]*layLruItem[V])
for _, key := range keys {
v, ok := x.core.Get(key)
x.lock.Unlock()
if ok {
v.lock.Lock()
expires, value, err1 := v.expires, v.value, v.err
v.lock.Unlock()
if expires != 0 && expires > time.Now().UnixMilli() {
x.target.IncrGetHit()
res[key] = value
if err1 != nil {
once.Do(func() {
err = err1
})
}
continue
}
}
queries = append(queries, key)
}
values, err1 := fetch(queries)
if err1 != nil {
once.Do(func() {
err = err1
})
}
for key, val := range values {
v := &layLruItem[V]{}
v.value = val
if err == nil {
v.expires = time.Now().Add(x.successTTL).UnixMilli()
x.target.IncrGetSuccess()
} else {
v.expires = time.Now().Add(x.failedTTL).UnixMilli()
x.target.IncrGetFailed()
}
setVs[key] = v
x.lock.Lock()
x.core.Add(key, v)
x.lock.Unlock()
res[key] = val
}
return res, err
}
//func (x *LayLRU[K, V]) Has(key K) bool { //func (x *LayLRU[K, V]) Has(key K) bool {
// x.lock.Lock() // x.lock.Lock()
// defer x.lock.Unlock() // defer x.lock.Unlock()
// return x.core.Contains(key) // return x.core.Contains(key)
//} //}
func (x *LayLRU[K, V]) Set(key K, value V) {
x.lock.Lock()
defer x.lock.Unlock()
x.core.Add(key, &layLruItem[V]{value: value, expires: time.Now().Add(x.successTTL).UnixMilli()})
}
func (x *LayLRU[K, V]) SetHas(key K, value V) bool { func (x *LayLRU[K, V]) SetHas(key K, value V) bool {
x.lock.Lock() x.lock.Lock()
defer x.lock.Unlock() defer x.lock.Unlock()

@ -32,6 +32,29 @@ type slotLRU[K comparable, V any] struct {
hash func(k K) uint64 hash func(k K) uint64
} }
func (x *slotLRU[K, V]) GetBatch(keys []K, fetch func(keys []K) (map[K]V, error)) (map[K]V, error) {
var (
slotKeys = make(map[uint64][]K)
vs = make(map[K]V)
)
for _, k := range keys {
index := x.getIndex(k)
slotKeys[index] = append(slotKeys[index], k)
}
for k, v := range slotKeys {
batches, err := x.slots[k].GetBatch(v, fetch)
if err != nil {
return nil, err
}
for key, value := range batches {
vs[key] = value
}
}
return vs, nil
}
func (x *slotLRU[K, V]) getIndex(k K) uint64 { func (x *slotLRU[K, V]) getIndex(k K) uint64 {
return x.hash(k) % x.n return x.hash(k) % x.n
} }
@ -40,6 +63,10 @@ func (x *slotLRU[K, V]) Get(key K, fetch func() (V, error)) (V, error) {
return x.slots[x.getIndex(key)].Get(key, fetch) return x.slots[x.getIndex(key)].Get(key, fetch)
} }
func (x *slotLRU[K, V]) Set(key K, value V) {
x.slots[x.getIndex(key)].Set(key, value)
}
func (x *slotLRU[K, V]) SetHas(key K, value V) bool { func (x *slotLRU[K, V]) SetHas(key K, value V) bool {
return x.slots[x.getIndex(key)].SetHas(key, value) return x.slots[x.getIndex(key)].SetHas(key, value)
} }

@ -86,7 +86,7 @@ func (c *ConversationLocalCache) GetConversation(ctx context.Context, userID, co
if err == nil { if err == nil {
log.ZDebug(ctx, "ConversationLocalCache GetConversation return", "userID", userID, "conversationID", conversationID, "value", val) log.ZDebug(ctx, "ConversationLocalCache GetConversation return", "userID", userID, "conversationID", conversationID, "value", val)
} else { } else {
log.ZError(ctx, "ConversationLocalCache GetConversation return", err, "userID", userID, "conversationID", conversationID) log.ZWarn(ctx, "ConversationLocalCache GetConversation return", err, "userID", userID, "conversationID", conversationID)
} }
}() }()
var cache cacheProto[pbconversation.Conversation] var cache cacheProto[pbconversation.Conversation]

@ -2,60 +2,197 @@ package rpccache
import ( import (
"context" "context"
"fmt"
"github.com/openimsdk/protocol/constant"
"github.com/openimsdk/protocol/user"
"math/rand"
"strconv"
"sync"
"sync/atomic"
"time"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
"github.com/openimsdk/open-im-server/v3/pkg/localcache" "github.com/openimsdk/open-im-server/v3/pkg/localcache"
"github.com/openimsdk/open-im-server/v3/pkg/localcache/lru" "github.com/openimsdk/open-im-server/v3/pkg/localcache/lru"
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient" "github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
"github.com/openimsdk/open-im-server/v3/pkg/util/useronline" "github.com/openimsdk/open-im-server/v3/pkg/util/useronline"
"github.com/openimsdk/tools/db/cacheutil"
"github.com/openimsdk/tools/log" "github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/mcontext" "github.com/openimsdk/tools/mcontext"
"github.com/redis/go-redis/v9" "github.com/redis/go-redis/v9"
"math/rand"
"strconv"
"time"
) )
func NewOnlineCache(user rpcclient.UserRpcClient, group *GroupLocalCache, rdb redis.UniversalClient, fn func(ctx context.Context, userID string, platformIDs []int32)) *OnlineCache { func NewOnlineCache(user rpcclient.UserRpcClient, group *GroupLocalCache, rdb redis.UniversalClient, fullUserCache bool, fn func(ctx context.Context, userID string, platformIDs []int32)) (*OnlineCache, error) {
l := &sync.Mutex{}
x := &OnlineCache{ x := &OnlineCache{
user: user, user: user,
group: group, group: group,
local: lru.NewSlotLRU(1024, localcache.LRUStringHash, func() lru.LRU[string, []int32] { fullUserCache: fullUserCache,
Lock: l,
Cond: sync.NewCond(l),
}
ctx := mcontext.SetOperationID(context.TODO(), strconv.FormatInt(time.Now().UnixNano()+int64(rand.Uint32()), 10))
switch x.fullUserCache {
case true:
log.ZDebug(ctx, "fullUserCache is true")
x.mapCache = cacheutil.NewCache[string, []int32]()
go func() {
if err := x.initUsersOnlineStatus(ctx); err != nil {
log.ZError(ctx, "initUsersOnlineStatus failed", err)
}
}()
case false:
log.ZDebug(ctx, "fullUserCache is false")
x.lruCache = lru.NewSlotLRU(1024, localcache.LRUStringHash, func() lru.LRU[string, []int32] {
return lru.NewLayLRU[string, []int32](2048, cachekey.OnlineExpire/2, time.Second*3, localcache.EmptyTarget{}, func(key string, value []int32) {}) return lru.NewLayLRU[string, []int32](2048, cachekey.OnlineExpire/2, time.Second*3, localcache.EmptyTarget{}, func(key string, value []int32) {})
}), })
x.CurrentPhase.Store(DoSubscribeOver)
x.Cond.Broadcast()
} }
go func() { go func() {
ctx := mcontext.SetOperationID(context.Background(), cachekey.OnlineChannel+strconv.FormatUint(rand.Uint64(), 10)) x.doSubscribe(ctx, rdb, fn)
for message := range rdb.Subscribe(ctx, cachekey.OnlineChannel).Channel() { }()
userID, platformIDs, err := useronline.ParseUserOnlineStatus(message.Payload) return x, nil
}
const (
Begin uint32 = iota
DoOnlineStatusOver
DoSubscribeOver
)
type OnlineCache struct {
user rpcclient.UserRpcClient
group *GroupLocalCache
// fullUserCache if enabled, caches the online status of all users using mapCache;
// otherwise, only a portion of users' online statuses (regardless of whether they are online) will be cached using lruCache.
fullUserCache bool
lruCache lru.LRU[string, []int32]
mapCache *cacheutil.Cache[string, []int32]
Lock *sync.Mutex
Cond *sync.Cond
CurrentPhase atomic.Uint32
}
func (o *OnlineCache) initUsersOnlineStatus(ctx context.Context) (err error) {
log.ZDebug(ctx, "init users online status begin")
var (
totalSet atomic.Int64
maxTries = 5
retryInterval = time.Second * 5
resp *user.GetAllOnlineUsersResp
)
defer func(t time.Time) {
log.ZInfo(ctx, "init users online status end", "cost", time.Since(t), "totalSet", totalSet.Load())
o.CurrentPhase.Store(DoOnlineStatusOver)
o.Cond.Broadcast()
}(time.Now())
retryOperation := func(operation func() error, operationName string) error {
for i := 0; i < maxTries; i++ {
if err = operation(); err != nil {
log.ZWarn(ctx, fmt.Sprintf("initUsersOnlineStatus: %s failed", operationName), err)
time.Sleep(retryInterval)
} else {
return nil
}
}
return err
}
cursor := uint64(0)
for resp == nil || resp.NextCursor != 0 {
if err = retryOperation(func() error {
resp, err = o.user.GetAllOnlineUsers(ctx, cursor)
if err != nil { if err != nil {
log.ZError(ctx, "OnlineCache setUserOnline redis subscribe parseUserOnlineStatus", err, "payload", message.Payload, "channel", message.Channel) return err
continue }
for _, u := range resp.StatusList {
if u.Status == constant.Online {
o.setUserOnline(u.UserID, u.PlatformIDs)
}
totalSet.Add(1)
} }
storageCache := x.setUserOnline(userID, platformIDs) cursor = resp.NextCursor
log.ZDebug(ctx, "OnlineCache setUserOnline", "userID", userID, "platformIDs", platformIDs, "payload", message.Payload, "storageCache", storageCache) return nil
}, "getAllOnlineUsers"); err != nil {
return err
}
}
return nil
}
func (o *OnlineCache) doSubscribe(ctx context.Context, rdb redis.UniversalClient, fn func(ctx context.Context, userID string, platformIDs []int32)) {
o.Lock.Lock()
ch := rdb.Subscribe(ctx, cachekey.OnlineChannel).Channel()
for o.CurrentPhase.Load() < DoOnlineStatusOver {
o.Cond.Wait()
}
o.Lock.Unlock()
log.ZInfo(ctx, "begin doSubscribe")
doMessage := func(message *redis.Message) {
userID, platformIDs, err := useronline.ParseUserOnlineStatus(message.Payload)
if err != nil {
log.ZError(ctx, "OnlineCache setHasUserOnline redis subscribe parseUserOnlineStatus", err, "payload", message.Payload, "channel", message.Channel)
return
}
log.ZDebug(ctx, fmt.Sprintf("get subscribe %s message", cachekey.OnlineChannel), "useID", userID, "platformIDs", platformIDs)
switch o.fullUserCache {
case true:
if len(platformIDs) == 0 {
// offline
o.mapCache.Delete(userID)
} else {
o.mapCache.Store(userID, platformIDs)
}
case false:
storageCache := o.setHasUserOnline(userID, platformIDs)
log.ZDebug(ctx, "OnlineCache setHasUserOnline", "userID", userID, "platformIDs", platformIDs, "payload", message.Payload, "storageCache", storageCache)
if fn != nil { if fn != nil {
fn(ctx, userID, platformIDs) fn(ctx, userID, platformIDs)
} }
} }
}() }
return x
}
type OnlineCache struct { if o.CurrentPhase.Load() == DoOnlineStatusOver {
user rpcclient.UserRpcClient for done := false; !done; {
group *GroupLocalCache select {
local lru.LRU[string, []int32] case message := <-ch:
doMessage(message)
default:
o.CurrentPhase.Store(DoSubscribeOver)
o.Cond.Broadcast()
done = true
}
}
}
for message := range ch {
doMessage(message)
}
} }
func (o *OnlineCache) getUserOnlinePlatform(ctx context.Context, userID string) ([]int32, error) { func (o *OnlineCache) getUserOnlinePlatform(ctx context.Context, userID string) ([]int32, error) {
platformIDs, err := o.local.Get(userID, func() ([]int32, error) { platformIDs, err := o.lruCache.Get(userID, func() ([]int32, error) {
return o.user.GetUserOnlinePlatform(ctx, userID) return o.user.GetUserOnlinePlatform(ctx, userID)
}) })
if err != nil { if err != nil {
log.ZError(ctx, "OnlineCache GetUserOnlinePlatform", err, "userID", userID) log.ZError(ctx, "OnlineCache GetUserOnlinePlatform", err, "userID", userID)
return nil, err return nil, err
} }
log.ZDebug(ctx, "OnlineCache GetUserOnlinePlatform", "userID", userID, "platformIDs", platformIDs) //log.ZDebug(ctx, "OnlineCache GetUserOnlinePlatform", "userID", userID, "platformIDs", platformIDs)
return platformIDs, nil return platformIDs, nil
} }
@ -69,6 +206,16 @@ func (o *OnlineCache) GetUserOnlinePlatform(ctx context.Context, userID string)
return platformIDs, nil return platformIDs, nil
} }
// func (o *OnlineCache) GetUserOnlinePlatformBatch(ctx context.Context, userIDs []string) (map[string]int32, error) {
// platformIDs, err := o.getUserOnlinePlatform(ctx, userIDs)
// if err != nil {
// return nil, err
// }
// tmp := make([]int32, len(platformIDs))
// copy(tmp, platformIDs)
// return platformIDs, nil
// }
func (o *OnlineCache) GetUserOnline(ctx context.Context, userID string) (bool, error) { func (o *OnlineCache) GetUserOnline(ctx context.Context, userID string) (bool, error) {
platformIDs, err := o.getUserOnlinePlatform(ctx, userID) platformIDs, err := o.getUserOnlinePlatform(ctx, userID)
if err != nil { if err != nil {
@ -77,10 +224,68 @@ func (o *OnlineCache) GetUserOnline(ctx context.Context, userID string) (bool, e
return len(platformIDs) > 0, nil return len(platformIDs) > 0, nil
} }
func (o *OnlineCache) getUserOnlinePlatformBatch(ctx context.Context, userIDs []string) (map[string][]int32, error) {
platformIDsMap, err := o.lruCache.GetBatch(userIDs, func(missingUsers []string) (map[string][]int32, error) {
platformIDsMap := make(map[string][]int32)
usersStatus, err := o.user.GetUsersOnlinePlatform(ctx, missingUsers)
if err != nil {
return nil, err
}
for _, u := range usersStatus {
platformIDsMap[u.UserID] = u.PlatformIDs
}
return platformIDsMap, nil
})
if err != nil {
log.ZError(ctx, "OnlineCache GetUserOnlinePlatform", err, "userID", userIDs)
return nil, err
}
return platformIDsMap, nil
}
func (o *OnlineCache) GetUsersOnline(ctx context.Context, userIDs []string) ([]string, []string, error) {
t := time.Now()
var (
onlineUserIDs = make([]string, 0, len(userIDs))
offlineUserIDs = make([]string, 0, len(userIDs))
)
switch o.fullUserCache {
case true:
for _, userID := range userIDs {
if _, ok := o.mapCache.Load(userID); ok {
onlineUserIDs = append(onlineUserIDs, userID)
} else {
offlineUserIDs = append(offlineUserIDs, userID)
}
}
case false:
userOnlineMap, err := o.getUserOnlinePlatformBatch(ctx, userIDs)
if err != nil {
return nil, nil, err
}
for key, value := range userOnlineMap {
if len(value) > 0 {
onlineUserIDs = append(onlineUserIDs, key)
} else {
offlineUserIDs = append(offlineUserIDs, key)
}
}
}
log.ZInfo(ctx, "get users online", "online users length", len(userIDs), "offline users length", len(offlineUserIDs), "cost", time.Since(t))
return userIDs, offlineUserIDs, nil
}
//func (o *OnlineCache) GetUsersOnline(ctx context.Context, userIDs []string) ([]string, error) { //func (o *OnlineCache) GetUsersOnline(ctx context.Context, userIDs []string) ([]string, error) {
// onlineUserIDs := make([]string, 0, len(userIDs)) // onlineUserIDs := make([]string, 0, len(userIDs))
// for _, userID := range userIDs { // for _, userID := range userIDs {
// online, err := o.GetUserOnline(ctx, userID) // online, err := o.GetUserOnline(ctx, userID)
// if err != nil { // if err != nil {
// return nil, err // return nil, err
// } // }
@ -111,6 +316,15 @@ func (o *OnlineCache) GetUserOnline(ctx context.Context, userID string) (bool, e
// return onlineUserIDs, nil // return onlineUserIDs, nil
//} //}
func (o *OnlineCache) setUserOnline(userID string, platformIDs []int32) bool { func (o *OnlineCache) setUserOnline(userID string, platformIDs []int32) {
return o.local.SetHas(userID, platformIDs) switch o.fullUserCache {
case true:
o.mapCache.Store(userID, platformIDs)
case false:
o.lruCache.Set(userID, platformIDs)
}
}
func (o *OnlineCache) setHasUserOnline(userID string, platformIDs []int32) bool {
return o.lruCache.SetHas(userID, platformIDs)
} }

@ -77,6 +77,11 @@ func (c *ConversationRpcClient) SetConversationMaxSeq(ctx context.Context, owner
return err return err
} }
func (c *ConversationRpcClient) SetConversationMinSeq(ctx context.Context, ownerUserIDs []string, conversationID string, minSeq int64) error {
_, err := c.Client.SetConversationMinSeq(ctx, &pbconversation.SetConversationMinSeqReq{OwnerUserID: ownerUserIDs, ConversationID: conversationID, MinSeq: minSeq})
return err
}
func (c *ConversationRpcClient) SetConversations(ctx context.Context, userIDs []string, conversation *pbconversation.ConversationReq) error { func (c *ConversationRpcClient) SetConversations(ctx context.Context, userIDs []string, conversation *pbconversation.ConversationReq) error {
_, err := c.Client.SetConversations(ctx, &pbconversation.SetConversationsReq{UserIDs: userIDs, Conversation: conversation}) _, err := c.Client.SetConversations(ctx, &pbconversation.SetConversationsReq{UserIDs: userIDs, Conversation: conversation})
return err return err

@ -17,21 +17,22 @@ package rpcclient
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"time"
"google.golang.org/grpc"
"google.golang.org/protobuf/proto"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/constant"
"github.com/openimsdk/protocol/msg" "github.com/openimsdk/protocol/msg"
"github.com/openimsdk/protocol/sdkws" "github.com/openimsdk/protocol/sdkws"
"github.com/openimsdk/tools/discovery" "github.com/openimsdk/tools/discovery"
"github.com/openimsdk/tools/log" "github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/mcontext"
"github.com/openimsdk/tools/mq/memamq" "github.com/openimsdk/tools/mq/memamq"
"github.com/openimsdk/tools/system/program" "github.com/openimsdk/tools/system/program"
"github.com/openimsdk/tools/utils/idutil" "github.com/openimsdk/tools/utils/idutil"
"github.com/openimsdk/tools/utils/jsonutil" "github.com/openimsdk/tools/utils/jsonutil"
"github.com/openimsdk/tools/utils/timeutil" "github.com/openimsdk/tools/utils/timeutil"
"google.golang.org/grpc"
"google.golang.org/protobuf/proto"
"time"
) )
func newContentTypeConf(conf *config.Notification) map[int32]config.NotificationConfig { func newContentTypeConf(conf *config.Notification) map[int32]config.NotificationConfig {
@ -159,6 +160,15 @@ func (m *MessageRpcClient) SendMsg(ctx context.Context, req *msg.SendMsgReq) (*m
return resp, nil return resp, nil
} }
// SetUserConversationsMinSeq set min seq
func (m *MessageRpcClient) SetUserConversationsMinSeq(ctx context.Context, req *msg.SetUserConversationsMinSeqReq) (*msg.SetUserConversationsMinSeqResp, error) {
resp, err := m.Client.SetUserConversationsMinSeq(ctx, req)
if err != nil {
return nil, err
}
return resp, nil
}
// GetMaxSeq retrieves the maximum sequence number from the gRPC client. // GetMaxSeq retrieves the maximum sequence number from the gRPC client.
// Errors during the gRPC call are wrapped to provide additional context. // Errors during the gRPC call are wrapped to provide additional context.
func (m *MessageRpcClient) GetMaxSeq(ctx context.Context, req *sdkws.GetMaxSeqReq) (*sdkws.GetMaxSeqResp, error) { func (m *MessageRpcClient) GetMaxSeq(ctx context.Context, req *sdkws.GetMaxSeqReq) (*sdkws.GetMaxSeqResp, error) {
@ -174,6 +184,9 @@ func (m *MessageRpcClient) GetMaxSeqs(ctx context.Context, conversationIDs []str
resp, err := m.Client.GetMaxSeqs(ctx, &msg.GetMaxSeqsReq{ resp, err := m.Client.GetMaxSeqs(ctx, &msg.GetMaxSeqsReq{
ConversationIDs: conversationIDs, ConversationIDs: conversationIDs,
}) })
if err != nil {
return nil, err
}
return resp.MaxSeqs, err return resp.MaxSeqs, err
} }
@ -182,6 +195,9 @@ func (m *MessageRpcClient) GetHasReadSeqs(ctx context.Context, userID string, co
UserID: userID, UserID: userID,
ConversationIDs: conversationIDs, ConversationIDs: conversationIDs,
}) })
if err != nil {
return nil, err
}
return resp.MaxSeqs, err return resp.MaxSeqs, err
} }
@ -190,6 +206,9 @@ func (m *MessageRpcClient) GetMsgByConversationIDs(ctx context.Context, docIDs [
ConversationIDs: docIDs, ConversationIDs: docIDs,
MaxSeqs: seqs, MaxSeqs: seqs,
}) })
if err != nil {
return nil, err
}
return resp.MsgDatas, err return resp.MsgDatas, err
} }
@ -204,6 +223,19 @@ func (m *MessageRpcClient) PullMessageBySeqList(ctx context.Context, req *sdkws.
return resp, nil return resp, nil
} }
func (m *MessageRpcClient) GetConversationsHasReadAndMaxSeq(ctx context.Context, req *msg.GetConversationsHasReadAndMaxSeqReq) (*msg.GetConversationsHasReadAndMaxSeqResp, error) {
resp, err := m.Client.GetConversationsHasReadAndMaxSeq(ctx, req)
if err != nil {
// Wrap the error to provide more context if the gRPC call fails.
return nil, err
}
return resp, nil
}
func (m *MessageRpcClient) GetSeqMessage(ctx context.Context, req *msg.GetSeqMessageReq) (*msg.GetSeqMessageResp, error) {
return m.Client.GetSeqMessage(ctx, req)
}
func (m *MessageRpcClient) GetConversationMaxSeq(ctx context.Context, conversationID string) (int64, error) { func (m *MessageRpcClient) GetConversationMaxSeq(ctx context.Context, conversationID string) (int64, error) {
resp, err := m.Client.GetConversationMaxSeq(ctx, &msg.GetConversationMaxSeqReq{ConversationID: conversationID}) resp, err := m.Client.GetConversationMaxSeq(ctx, &msg.GetConversationMaxSeqReq{ConversationID: conversationID})
if err != nil { if err != nil {
@ -252,8 +284,8 @@ func WithUserRpcClient(userRpcClient *UserRpcClient) NotificationSenderOptions {
} }
const ( const (
notificationWorkerCount = 2 notificationWorkerCount = 16
notificationBufferSize = 200 notificationBufferSize = 1024 * 1024 * 2
) )
func NewNotificationSender(conf *config.Notification, opts ...NotificationSenderOptions) *NotificationSender { func NewNotificationSender(conf *config.Notification, opts ...NotificationSenderOptions) *NotificationSender {
@ -280,7 +312,8 @@ func WithRpcGetUserName() NotificationOptions {
} }
func (s *NotificationSender) send(ctx context.Context, sendID, recvID string, contentType, sessionType int32, m proto.Message, opts ...NotificationOptions) { func (s *NotificationSender) send(ctx context.Context, sendID, recvID string, contentType, sessionType int32, m proto.Message, opts ...NotificationOptions) {
ctx = mcontext.WithMustInfoCtx([]string{mcontext.GetOperationID(ctx), mcontext.GetOpUserID(ctx), mcontext.GetOpUserPlatform(ctx), mcontext.GetConnID(ctx)}) //ctx = mcontext.WithMustInfoCtx([]string{mcontext.GetOperationID(ctx), mcontext.GetOpUserID(ctx), mcontext.GetOpUserPlatform(ctx), mcontext.GetConnID(ctx)})
ctx = context.WithoutCancel(ctx)
ctx, cancel := context.WithTimeout(ctx, time.Second*time.Duration(5)) ctx, cancel := context.WithTimeout(ctx, time.Second*time.Duration(5))
defer cancel() defer cancel()
n := sdkws.NotificationElem{Detail: jsonutil.StructToJsonString(m)} n := sdkws.NotificationElem{Detail: jsonutil.StructToJsonString(m)}
@ -337,7 +370,9 @@ func (s *NotificationSender) send(ctx context.Context, sendID, recvID string, co
} }
func (s *NotificationSender) NotificationWithSessionType(ctx context.Context, sendID, recvID string, contentType, sessionType int32, m proto.Message, opts ...NotificationOptions) { func (s *NotificationSender) NotificationWithSessionType(ctx context.Context, sendID, recvID string, contentType, sessionType int32, m proto.Message, opts ...NotificationOptions) {
s.queue.Push(func() { s.send(ctx, sendID, recvID, contentType, sessionType, m, opts...) }) if err := s.queue.Push(func() { s.send(ctx, sendID, recvID, contentType, sessionType, m, opts...) }); err != nil {
log.ZWarn(ctx, "Push to queue failed", err, "sendID", sendID, "recvID", recvID, "msg", jsonutil.StructToJsonString(m))
}
} }
func (s *NotificationSender) Notification(ctx context.Context, sendID, recvID string, contentType int32, m proto.Message, opts ...NotificationOptions) { func (s *NotificationSender) Notification(ctx context.Context, sendID, recvID string, contentType int32, m proto.Message, opts ...NotificationOptions) {

@ -169,6 +169,15 @@ func (u *UserRpcClient) Access(ctx context.Context, ownerUserID string) error {
return authverify.CheckAccessV3(ctx, ownerUserID, u.imAdminUserID) return authverify.CheckAccessV3(ctx, ownerUserID, u.imAdminUserID)
} }
// GetAllUserID retrieves all user IDs with pagination options.
func (u *UserRpcClient) GetAllUserID(ctx context.Context, pageNumber, showNumber int32) (*user.GetAllUserIDResp, error) {
resp, err := u.Client.GetAllUserID(ctx, &user.GetAllUserIDReq{Pagination: &sdkws.RequestPagination{PageNumber: pageNumber, ShowNumber: showNumber}})
if err != nil {
return nil, err
}
return resp, nil
}
// GetAllUserIDs retrieves all user IDs with pagination options. // GetAllUserIDs retrieves all user IDs with pagination options.
func (u *UserRpcClient) GetAllUserIDs(ctx context.Context, pageNumber, showNumber int32) ([]string, error) { func (u *UserRpcClient) GetAllUserIDs(ctx context.Context, pageNumber, showNumber int32) ([]string, error) {
resp, err := u.Client.GetAllUserID(ctx, &user.GetAllUserIDReq{Pagination: &sdkws.RequestPagination{PageNumber: pageNumber, ShowNumber: showNumber}}) resp, err := u.Client.GetAllUserID(ctx, &user.GetAllUserIDReq{Pagination: &sdkws.RequestPagination{PageNumber: pageNumber, ShowNumber: showNumber}})
@ -215,3 +224,7 @@ func (u *UserRpcClient) GetUserOnlinePlatform(ctx context.Context, userID string
} }
return resp[0].PlatformIDs, nil return resp[0].PlatformIDs, nil
} }
func (u *UserRpcClient) GetAllOnlineUsers(ctx context.Context, cursor uint64) (*user.GetAllOnlineUsersResp, error) {
return u.Client.GetAllOnlineUsers(ctx, &user.GetAllOnlineUsersReq{Cursor: cursor})
}

@ -19,6 +19,14 @@ func GenGroupConversationID(groupID string) string {
return "sg_" + groupID return "sg_" + groupID
} }
func IsGroupConversationID(conversationID string) bool {
return strings.HasPrefix(conversationID, "sg_")
}
func IsNotificationConversationID(conversationID string) bool {
return strings.HasPrefix(conversationID, "n_")
}
func GenConversationUniqueKeyForSingle(sendID, recvID string) string { func GenConversationUniqueKeyForSingle(sendID, recvID string) string {
l := []string{sendID, recvID} l := []string{sendID, recvID}
sort.Strings(l) sort.Strings(l)

@ -35,7 +35,7 @@ done
echo "Kafka is ready. Creating topics..." echo "Kafka is ready. Creating topics..."
topics=("toRedis" "toMongo" "toPush") topics=("toRedis" "toMongo" "toPush" "toOfflinePush")
partitions=8 partitions=8
replicationFactor=1 replicationFactor=1

@ -3,8 +3,8 @@ serviceBinaries:
openim-crontask: 1 openim-crontask: 1
openim-rpc-user: 1 openim-rpc-user: 1
openim-msggateway: 1 openim-msggateway: 1
openim-push: 1 openim-push: 8
openim-msgtransfer: 4 openim-msgtransfer: 8
openim-rpc-conversation: 1 openim-rpc-conversation: 1
openim-rpc-auth: 1 openim-rpc-auth: 1
openim-rpc-group: 1 openim-rpc-group: 1

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save