Merge branch 'main' into perf/improve_gzip_performance

pull/1321/head
Gordon 2 years ago committed by GitHub
commit fcfe08616d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -24,38 +24,49 @@ on:
types: [created]
jobs:
create-pr:
sync-issue-to-pr:
runs-on: ubuntu-latest
if: github.event.pull_request.base.ref == 'main'
# && github.event.pull_request.merged == true
steps:
- name: Check out code
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Create PR to release branch
run: |
ISSUEID=$(gh pr view ${{ github.event.pull_request.number }} --repo $OWNER/$REPO | grep -oP 'Fixes #\K\d+')
echo "===========> $ISSUEID"
ISSUE=$(gh issue view $ISSUEID --repo $OWNER/$REPO --json labels,assignees,milestone,title)
echo "===========> $ISSUE"
LABELS=$(echo $ISSUE | jq -r '.labels[] | select(.name) | .name' | jq -R -r -s -c 'split("\n")[:-1] | join(",")')
ASSIGNEES=$(echo $ISSUE | jq -r '.assignees[] | select(.login) | .login' | jq -R -s -c 'split("\n")[:-1] | join(",")')
MILESTONE=$(echo $ISSUE | jq -r '.milestone | select(.title) | .title')
TITLE=$(echo $ISSUE | jq -r '.title')
gh pr edit ${{ github.event.pull_request.number }} --repo $OWNER/$REPO --add-label "$LABELS" --add-assignee "$ASSIGNEES" --milestone "$MILESTONE"
# git checkout -b bot/merge-to-release-$ISSUEID
# git push origin bot/merge-to-release-$ISSUEID
# gh pr create --base release --head bot/merge-to-release-$ISSUEID --title "Merge main to release" --body ""
# gh pr create --base main --head feat/auto-release-pr-624 --title "The bug is fixed" --body "$x" --repo openimsdk/open-im-server --reviewer "cubxxw"
continue-on-error: true
env:
GITHUB_TOKEN: ${{ secrets.BOT_GITHUB_TOKEN }}
GH_TOKEN: ${{ github.token }}
ISSUE: ${{ github.event.issue.html_url }}
OWNER: ${{ github.repository_owner }}
REPO: ${{ github.event.repository.name }}
- name: Checkout code
uses: actions/checkout@v2
- name: Sync Issue to PR
if: github.event_name == 'pull_request' && github.event.pull_request.base.ref == 'main'
run: |
PR_BODY="${{ github.event.pull_request.body }}"
ISSUE_NUMBER=$(echo "$PR_BODY" | grep -oP 'Fixes #\K\d+')
if [[ -z "$ISSUE_NUMBER" ]]; then
echo "No Issue number found."
exit 1
fi
echo "Issue number found: $ISSUE_NUMBER"
# Using GitHub CLI to get issue details
gh issue view "$ISSUE_NUMBER" --repo "${{ github.repository }}" --json labels,assignees,milestone,title > issue_data.json
# Check if jq is installed
if ! command -v jq &> /dev/null; then
echo "Installing jq..."
sudo apt-get install -y jq
fi
# Parse data with jq
LABELS=$(jq -r '.labels | map(.name) | join(",")' issue_data.json)
ASSIGNEES=$(jq -r '.assignees | map(.login) | join(",")' issue_data.json)
MILESTONE=$(jq -r '.milestone.title' issue_data.json)
# Check if any of the fields are empty and set them to None
LABELS=${LABELS:-None}
ASSIGNEES=${ASSIGNEES:-None}
MILESTONE=${MILESTONE:-None}
# Edit the PR with issue details, handling empty fields
gh pr edit "${{ github.event.pull_request.number }}" --repo "${{ github.repository }}" \
${LABELS:+--add-label "$LABELS"} \
${ASSIGNEES:+--add-assignee "$ASSIGNEES"} \
${MILESTONE:+--milestone "$MILESTONE"}
continue-on-error: true
env:
GITHUB_TOKEN: ${{ secrets.BOT_GITHUB_TOKEN }}

@ -39,7 +39,7 @@ jobs:
In addition to Slack, we also offer the following ways to get in touch:
+ <a href="https://join.slack.com/t/openimsdk/shared_invite/zt-22720d66b-o_FvKxMTGXtcnnnHiMqe9Q" target="_blank"><img src="https://img.shields.io/badge/Slack-OpenIM%2B-blueviolet?logo=slack&amp;logoColor=white"></a> We also have Slack channels for you to communicate and discuss. To join, visit https://slack.com/ and join our [👀 Open-IM-Server slack](https://join.slack.com/t/openimsdk/shared_invite/zt-22720d66b-o_FvKxMTGXtcnnnHiMqe9Q) team channel.
+ <a href="https://mail.google.com/mail/u/0/?fs=1&tf=cm&to=winxu81@gmail.com" target="_blank"><img src="https://img.shields.io/badge/gmail-%40OOpenIMSDKCore?style=social&logo=gmail"></a> Get in touch with us on [Gmail](https://mail.google.com/mail/u/0/?fs=1&tf=cm&to=winxu81@gmail.com). If you have any questions or issues that need resolving, or any suggestions and feedback for our open source projects, please feel free to contact us via email.
+ <a href="https://mail.google.com/mail/u/0/?fs=1&tf=cm&to=info@openim.io" target="_blank"><img src="https://img.shields.io/badge/gmail-%40OOpenIMSDKCore?style=social&logo=gmail"></a> Get in touch with us on [Gmail](https://mail.google.com/mail/u/0/?fs=1&tf=cm&to=winxu81@gmail.com). If you have any questions or issues that need resolving, or any suggestions and feedback for our open source projects, please feel free to contact us via email.
+ <a href="https://doc.rentsoft.cn/" target="_blank"><img src="https://img.shields.io/badge/%E5%8D%9A%E5%AE%A2-%40OpenIMSDKCore-blue?style=social&logo=Octopus%20Deploy"></a> Read our [blog](https://doc.rentsoft.cn/). Our blog is a great place to stay up-to-date with Open-IM-Server projects and trends. On the blog, we share our latest developments, tech trends, and other interesting information.
+ <a href="https://github.com/OpenIMSDK/OpenIM-Docs/blob/main/docs/images/WechatIMG20.jpeg" target="_blank"><img src="https://img.shields.io/badge/%E5%BE%AE%E4%BF%A1-OpenIMSDKCore-brightgreen?logo=wechat&style=flat-square"></a> Add [Wechat](https://github.com/OpenIMSDK/OpenIM-Docs/blob/main/docs/images/WechatIMG20.jpeg) and indicate that you are a user or developer of Open-IM-Server. We will process your request as soon as possible.

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
name: Create Tag
name: OpenIM Create Tag
on:
issue_comment:

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
name: 'Github Rebot for Cherry Pick when PR is merged'
name: Github Rebot for Cherry Pick when PR is merged
on:
pull_request_target:
types:

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
name: Check-Coverage
name: OpenIM Check Coverage
on:
workflow_dispatch:

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
name: "OpenIM CLA Assistant"
name: OpenIM CLA Assistant
on:
issue_comment:
types: [created]
@ -33,7 +33,7 @@ env:
OPEN_IM_SERVER_CLA_DOCUMENT: https://github.com/openim-sigs/cla/blob/main/README.md
OPEN_IM_SERVER_SIGNATURES_PATH: signatures/${{ github.event.repository.name }}/cla.json
OPEN_IM_SERVER_ALLOWLIST: kubbot,bot*
OPEN_IM_SERVER_ALLOWLIST: kubbot,bot*,bot-*,bot/*,bot-/*,bot,*[bot]
jobs:
CLAAssistant:

@ -1,17 +1,3 @@
# Copyright © 2023 OpenIM. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: Create Branch on Tag
on:
@ -19,6 +5,10 @@ on:
tags:
- 'v*.*.0'
permissions:
contents: write
actions: write
jobs:
create-branch:
runs-on: ubuntu-latest
@ -36,41 +26,38 @@ jobs:
- name: Install git-chglog
run: make install.git-chglog
- name: Create Branch
- name: Create Branch and Push
env:
TAG_NAME: ${{ github.ref_name }}
run: |
TAG_NAME=${GITHUB_REF/refs\/tags\//}
IFS='.' read -ra VERSION_PARTS <<< "$TAG_NAME"
if [[ "${VERSION_PARTS[2]}" = "0" ]]; then
BRANCH_NAME="release-v${VERSION_PARTS[0]}.${VERSION_PARTS[1]}"
echo "Creating branch $BRANCH_NAME"
git checkout -b "$BRANCH_NAME"
git push origin "$BRANCH_NAME"
else
echo "Not a release tag. Skipping branch creation."
fi
continue-on-error: true
- name: Push Changes
uses: stefanzweifel/git-auto-commit-action@v5
with:
commit_message: "Auto Commit CHANGELOG"
branch: release-v${VERSION_PARTS[0]}.${VERSION_PARTS[1]}
env:
GITHUB_TOKEN: ${{ secrets.BOT_GITHUB_TOKEN }}
- name: Create and Push CHANGELOG
- name: Create and Commit CHANGELOG
if: endsWith(github.ref_name, '.0')
run: |
git checkout main
TAG_NAME=${GITHUB_REF/refs\/tags\//}
git fetch --all
TAG_NAME=${GITHUB_REF#refs/tags/}
IFS='.' read -ra VERSION_PARTS <<< "$TAG_NAME"
if [[ "${VERSION_PARTS[2]}" = "0" ]]; then
cd CHANGELOG
git-chglog --tag-filter-pattern "v${VERSION_PARTS[0]}.${VERSION_PARTS[1]}.*" -o "CHANGELOG-${VERSION_PARTS[0]}.${VERSION_PARTS[1]}.md"
git add "CHANGELOG-${VERSION_PARTS[0]}.${VERSION_PARTS[1]}.md"
git commit -m "Update CHANGELOG for $TAG_NAME"
fi
- name: Push Changes
uses: stefanzweifel/git-auto-commit-action@v5
git checkout main
cd CHANGELOG
git-chglog --tag-filter-pattern "v${VERSION_PARTS[0]}.${VERSION_PARTS[1]}.*" -o "CHANGELOG-${VERSION_PARTS[0]}.${VERSION_PARTS[1]}.md"
git add "CHANGELOG-${VERSION_PARTS[0]}.${VERSION_PARTS[1]}.md"
git commit -m "Update CHANGELOG for $TAG_NAME" || echo "No changes to commit."
continue-on-error: true
- name: Push CHANGELOG to Main
if: steps.create-and-commit-changelog.outputs.changes == 'true'
uses: ad-m/github-push-action@v0.6.0
with:
commit_message: "Auto Commit CHANGELOG"
github_token: ${{ secrets.BOT_GITHUB_TOKEN }}
branch: main
env:
GITHUB_TOKEN: ${{ secrets.BOT_GITHUB_TOKEN }}
continue-on-error: true

@ -1,55 +0,0 @@
# Copyright © 2023 OpenIM open source community. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: OpenIM Deploy for dev
on:
push:
branches:
- 'devops' # Only for the dev branch
- 'main'
paths:
- '.github/workflows/*'
# - '__test__/**' # dev No immediate testing is required
- 'src/**'
- 'Dockerfile'
- 'docker-compose.yml'
- 'bin/*'
jobs:
deploy-dev:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: executing remote ssh commands using password
uses: appleboy/ssh-action@v1.0.0
env:
OWNER: ${{ github.repository_owner }}
REPO: ${{ github.event.repository.name }}
with:
host: "${{ secrets.SG_M1_HOST }}, ${{ secrets.SG_N1_HOST }}, ${{ secrets.SG_N2_HOST}}"
username: ${{ secrets.SG_USERNAME }}
password: ${{ secrets.SG_PASSWORD }}
port: ${{ secrets.SG_PORT }}
envs: OWNER,REPO
script_stop: true
script: |
mkdir -p /test/openim
cd /test/openim
pwd;ls -al
echo "OWNER: $OWNER"
echo "REPO: $REPO"
git clone -b develop https://github.com/${OWNER}/${REPO}.git; cd ${REPO}
docker compose up -d
continue-on-error: true

@ -2,7 +2,7 @@
# Licensed under the MIT License (the "License");
# you may not use this file except in compliance with the License.
name: Dependency Review
name: OpenIM Dependency Review
on: [pull_request]
permissions:

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
name: e2e
name: OpenIM E2E Test
on:
workflow_dispatch:

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
name: first-interaction
name: OpenIM First Interaction
on:
issues:

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
name: 'issue translator'
name: OpenIM Issue Aotu Translator
on:
issue_comment:
types: [created]

@ -12,12 +12,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
name: 'OpenIM Commit Action'
name: OpenIM OpenCommit Action
on:
push:
branches:
- main
# this list of branches is often enough,
# but you may still ignore other public branches
branches-ignore: [main master dev development release]
jobs:
opencommit:
@ -27,20 +28,20 @@ jobs:
permissions: write-all
steps:
- name: Setup Node.js Environment
uses: actions/setup-node@v4
uses: actions/setup-node@v2
with:
node-version: '16'
- uses: actions/checkout@v4
- uses: actions/checkout@v3
with:
fetch-depth: 0
- uses: di-sukharev/opencommit@github-action-v1.0.4
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GITHUB_TOKEN: ${{ secrets.BOT_GITHUB_TOKEN }}
env:
# set openAI api key in repo actions secrets,
# for openAI keys go to: https://platform.openai.com/account/api-keys
# for repo secret go to: https://github.com/kuebcub/settings/secrets/actions
# for repo secret go to: <your_repo_url>/settings/secrets/actions
OCO_OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
# customization
@ -48,5 +49,6 @@ jobs:
OCO_OPENAI_BASE_PATH: ''
OCO_DESCRIPTION: false
OCO_EMOJI: false
OCO_MODEL: gpt-3.5-turbo
OCO_MODEL: gpt-3.5-turbo-16k
OCO_LANGUAGE: en
OCO_PROMPT_MODULE: conventional-commit

@ -41,7 +41,9 @@ jobs:
run: |
git config user.name 'openimbot'
git config user.email 'openimsdk@qq.com'
git checkout -b cicd/patch-${{ github.event.number }}
BRANCH_NAME="auto-pr-$(date +'%Y%m%d%H%M%S')"
git checkout -b $BRANCH_NAME
- uses: actions/setup-node@v4
- name: Setup Go
uses: actions/setup-go@v4
@ -50,6 +52,7 @@ jobs:
sudo make tidy
sudo make tools.verify.go-gitlint
echo "Run go modules tidy successfully"
continue-on-error: true
- name: Run go format
run: |
@ -75,13 +78,8 @@ jobs:
echo "Run unit test and get test coverage successfully"
continue-on-error: true
# - name: Initialize CodeQL
# uses: github/codeql-action/init@v2
# with:
# languages: go
# - name: Perform CodeQL Analysis
# uses: github/codeql-action/analyze@v2
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2
- name: OpenIM verify copyright
run: |
@ -90,30 +88,25 @@ jobs:
echo "OpenIM verify successfully"
continue-on-error: true
# - name: Commit code
# run: |
# git add .
# git commit -m "cicd: bump League Patch to cicd/patch-${{ github.event.number }}"
- name: Create Pull Request
uses: peter-evans/create-pull-request@v5
with:
token: ${{ secrets.BOT_GITHUB_TOKEN }}
commit-message: "cicd: bump League Patch to cicd/patch-${{ github.event.number }}"
title: Bump League Patch to cicd/patch-${{ github.event.number }}
commit-message: "cicd: bump League Patch"
author: kubbot <kubbot@3293172751ysy@gmail.com>
signoff: false
draft: false
branch: "cicd/patch-${{ github.event.number }}"
branch: ''
assignees: cubxxw
reviewers: cubxxw
delete-branch: true
title: "Bump League Patch auto PR: $(date +'%Y%m%d')"
body: |
Review criteria:
- [ ] Disenchanter can connect and issue actions
This is an automated PR. @ ${{ github.actor }}
This is an automated PR. @ $(date +'%Y%m%d')
<sub>[workflow](https://github.com/openimsdk/open-im-server/blob/main/.github/workflows/pull-request.yml).</sub>
base: main
labels: |

@ -1,76 +0,0 @@
# Copyright © 2023 OpenIM. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: OpenIM executes the script validation code
on:
push:
branches:
- main
paths-ignore:
- "docs/**"
- "README.md"
- "README_zh-CN.md"
- "CONTRIBUTING.md"
pull_request:
branches:
- main
paths-ignore:
- "README.md"
- "README_zh-CN.md"
- "CONTRIBUTING.md"
- "docs/**"
env:
GO_VERSION: "1.19"
GOLANGCI_VERSION: "v1.50.1"
jobs:
openim:
name: Test with go ${{ matrix.go_version }} on ${{ matrix.os }}
runs-on: ${{ matrix.os }}
permissions:
contents: write
environment:
name: openim
strategy:
matrix:
go_version: ["1.21"]
os: [ubuntu-latest]
steps:
- name: Setup
uses: actions/checkout@v4
- name: In ${{ matrix.os }} Execute the script validation code
uses: actions/setup-go@v4
with:
go-version: ${{ matrix.go_version }}
id: go
- name: scripts validation
run: |
sudo make verify
continue-on-error: true
- name: verify format
run: |
sudo make format
continue-on-error: true
- name: verify license
run: |
sudo make verify-copyright
continue-on-error: true

@ -60,7 +60,7 @@ representative at an online or offline event.
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
3293172751nss@gmail.com.
`security@openim.io`.
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the

@ -186,7 +186,7 @@ We divide the problem into security and general problems:
Security issues are always treated seriously. As our usual principle, we discourage anyone to spread security issues. If you find a security issue of Open-IM-Server, please do not discuss it in public and even do not open a public issue.
Instead we encourage you to send us a private email to winxu81@gmail.com to report this.
Instead we encourage you to send us a private email to info@openim.io to report this.
#### Reporting general issues
@ -369,7 +369,7 @@ Our most recommended way to get in touch is through [Slack](https://join.slack.c
In addition to Slack, we also offer the following ways to get in touch:
+ <a href="https://join.slack.com/t/openimsdk/shared_invite/zt-22720d66b-o_FvKxMTGXtcnnnHiMqe9Q" target="_blank"><img src="https://img.shields.io/badge/slack-%40OpenIMSDKCore-informational?logo=slack&style=flat-square"></a>: We also have Slack channels for you to communicate and discuss. To join, visit https://slack.com/ and join our [👀 Open-IM-Server slack](https://join.slack.com/t/openimsdk/shared_invite/zt-22720d66b-o_FvKxMTGXtcnnnHiMqe9Q) team channel.
+ <a href="https://mail.google.com/mail/u/0/?fs=1&tf=cm&to=4closetool3@gmail.com" target="_blank"><img src="https://img.shields.io/badge/gmail-%40OOpenIMSDKCore?style=social&logo=gmail"></a>: Get in touch with us on [Gmail](winxu81@gmail.com). If you have any questions or issues that need resolving, or any suggestions and feedback for our open source projects, please feel free to contact us via email.
+ <a href="https://mail.google.com/mail/u/0/?fs=1&tf=cm&to=4closetool3@gmail.com" target="_blank"><img src="https://img.shields.io/badge/gmail-%40OOpenIMSDKCore?style=social&logo=gmail"></a>: Get in touch with us on [Gmail](info@openim.io). If you have any questions or issues that need resolving, or any suggestions and feedback for our open source projects, please feel free to contact us via email.
+ <a href="https://doc.rentsoft.cn/" target="_blank"><img src="https://img.shields.io/badge/%E5%8D%9A%E5%AE%A2-%40OpenIMSDKCore-blue?style=social&logo=Octopus%20Deploy"></a>: Read our [blog](https://doc.rentsoft.cn/). Our blog is a great place to stay up-to-date with Open-IM-Server projects and trends. On the blog, we share our latest developments, tech trends, and other interesting information.
+ <a href="https://github.com/OpenIMSDK/OpenIM-Docs/blob/main/docs/images/WechatIMG20.jpeg" target="_blank"><img src="https://img.shields.io/badge/%E5%BE%AE%E4%BF%A1-OpenIMSDKCore-brightgreen?logo=wechat&style=flat-square"></a>: Add [Wechat](https://github.com/OpenIMSDK/OpenIM-Docs/blob/main/docs/images/WechatIMG20.jpeg) and indicate that you are a user or developer of Open-IM-Server. We will process your request as soon as possible.

@ -29,6 +29,10 @@
</p>
## 🟢 扫描微信进群交流
<img src="https://openim-1253691595.cos.ap-nanjing.myqcloud.com/WechatIMG20.jpeg" width="300">
## Ⓜ️ 关于 OpenIM
OpenIM 不仅仅是一个开源的即时消息组件,它是你的应用程序生态系统的一个不可或缺的部分。查看下面的图表,了解 AppServer、AppClient、OpenIMServer 和 OpenIMSDK 是如何交互的。

@ -17,6 +17,8 @@ package main
import (
"context"
"fmt"
ginProm "github.com/openimsdk/open-im-server/v3/pkg/common/ginPrometheus"
"github.com/openimsdk/open-im-server/v3/pkg/common/prom_metrics"
"net"
_ "net/http/pprof"
"strconv"
@ -26,6 +28,7 @@ import (
"github.com/OpenIMSDK/protocol/constant"
"github.com/OpenIMSDK/tools/discoveryregistry"
"github.com/OpenIMSDK/tools/log"
"github.com/openimsdk/open-im-server/v3/internal/api"
"github.com/openimsdk/open-im-server/v3/pkg/common/cmd"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
@ -42,11 +45,11 @@ func main() {
}
}
func run(port int) error {
log.ZInfo(context.Background(), "Openim api port:", "port", port)
func run(port int, proPort int) error {
log.ZInfo(context.Background(), "Openim api port:", "port", port, "proPort", proPort)
if port == 0 {
err := "port is empty"
if port == 0 || proPort == 0 {
err := "port or proPort is empty:" + strconv.Itoa(port) + "," + strconv.Itoa(proPort)
log.ZError(context.Background(), err, nil)
return fmt.Errorf(err)
@ -81,6 +84,13 @@ func run(port int) error {
}
log.ZInfo(context.Background(), "api register public config to discov success")
router := api.NewGinRouter(client, rdb)
//////////////////////////////
if config.Config.Prometheus.Enable {
p := ginProm.NewPrometheus("app", prom_metrics.GetGinCusMetrics("Api"))
p.SetListenAddress(fmt.Sprintf(":%d", proPort))
p.Use(router)
}
/////////////////////////////////
log.ZInfo(context.Background(), "api init router success")
var address string
if config.Config.Api.ListenIP != "" {

@ -21,6 +21,7 @@ import (
func main() {
msgTransferCmd := cmd.NewMsgTransferCmd()
msgTransferCmd.AddPrometheusPortFlag()
msgTransferCmd.AddTransferProgressFlag()
if err := msgTransferCmd.Exec(); err != nil {
panic(err.Error())
}

@ -382,7 +382,9 @@ callback:
# The number of Prometheus ports per service needs to correspond to rpcPort
# The number of ports needs to be consistent with msg_transfer_service_num in script/path_info.sh
prometheus:
enable: false
enable: true
prometheusUrl: "https://openim.prometheus"
apiPrometheusPort: [20100]
userPrometheusPort: [ 20110 ]
friendPrometheusPort: [ 20120 ]
messagePrometheusPort: [ 20130 ]

@ -383,6 +383,8 @@ callback:
# The number of ports needs to be consistent with msg_transfer_service_num in script/path_info.sh
prometheus:
enable: ${PROMETHEUS_ENABLE}
prometheusUrl: ${PROMETHEUS_URL}
apiPrometheusPort: [${API_PROM_PORT}]
userPrometheusPort: [ ${USER_PROM_PORT} ]
friendPrometheusPort: [ ${FRIEND_PROM_PORT} ]
messagePrometheusPort: [ ${MESSAGE_PROM_PORT} ]

@ -37,8 +37,8 @@ require github.com/google/uuid v1.3.1
require (
github.com/IBM/sarama v1.41.3
github.com/OpenIMSDK/protocol v0.0.30
github.com/OpenIMSDK/tools v0.0.15
github.com/OpenIMSDK/protocol v0.0.31
github.com/OpenIMSDK/tools v0.0.16
github.com/aliyun/aliyun-oss-go-sdk v2.2.9+incompatible
github.com/go-redis/redis v6.15.9+incompatible
github.com/go-sql-driver/mysql v1.7.1

@ -18,10 +18,10 @@ firebase.google.com/go v3.13.0+incompatible/go.mod h1:xlah6XbEyW6tbfSklcfe5FHJIw
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/IBM/sarama v1.41.3 h1:MWBEJ12vHC8coMjdEXFq/6ftO6DUZnQlFYcxtOJFa7c=
github.com/IBM/sarama v1.41.3/go.mod h1:Xxho9HkHd4K/MDUo/T/sOqwtX/17D33++E9Wib6hUdQ=
github.com/OpenIMSDK/protocol v0.0.30 h1:MiHO6PyQMR9ojBHNnSFxCHLmsoE2xZqaiYj975JiZnM=
github.com/OpenIMSDK/protocol v0.0.30/go.mod h1:F25dFrwrIx3lkNoiuf6FkCfxuwf8L4Z8UIsdTHP/r0Y=
github.com/OpenIMSDK/tools v0.0.15 h1:FF3m0TQUG56pJC15a11jmBG6Y1EjXarEW4JV3CBF/Jc=
github.com/OpenIMSDK/tools v0.0.15/go.mod h1:eg+q4A34Qmu73xkY0mt37FHGMCMfC6CtmOnm0kFEGFI=
github.com/OpenIMSDK/protocol v0.0.31 h1:ax43x9aqA6EKNXNukS5MT5BSTqkUmwO4uTvbJLtzCgE=
github.com/OpenIMSDK/protocol v0.0.31/go.mod h1:F25dFrwrIx3lkNoiuf6FkCfxuwf8L4Z8UIsdTHP/r0Y=
github.com/OpenIMSDK/tools v0.0.16 h1:te/GIq2imCMsrRPgU9OObYKbzZ3rT08Lih/o+3QFIz0=
github.com/OpenIMSDK/tools v0.0.16/go.mod h1:eg+q4A34Qmu73xkY0mt37FHGMCMfC6CtmOnm0kFEGFI=
github.com/QcloudApi/qcloud_sign_golang v0.0.0-20141224014652-e4130a326409/go.mod h1:1pk82RBxDY/JZnPQrtqHlUFfCctgdorsd9M06fMynOM=
github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs=
github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs=

@ -53,7 +53,7 @@ func (MessageApi) SetOptions(options map[string]bool, value bool) {
utils.SetSwitchFromOptions(options, constant.IsConversationUpdate, value)
}
func (m MessageApi) newUserSendMsgReq(c *gin.Context, params *apistruct.SendMsg) *msg.SendMsgReq {
func (m MessageApi) newUserSendMsgReq(_ *gin.Context, params *apistruct.SendMsg) *msg.SendMsgReq {
var newContent string
options := make(map[string]bool, 5)
switch params.ContentType {

@ -39,7 +39,6 @@ import (
"github.com/OpenIMSDK/tools/mw"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/prome"
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
)
@ -63,13 +62,6 @@ func NewGinRouter(discov discoveryregistry.SvcDiscoveryRegistry, rdb redis.Unive
u := NewUserApi(*userRpc)
m := NewMessageApi(messageRpc, userRpc)
if config.Config.Prometheus.Enable {
prome.NewApiRequestCounter()
prome.NewApiRequestFailedCounter()
prome.NewApiRequestSuccessCounter()
r.Use(prome.PrometheusMiddleware)
r.GET("/metrics", prome.PrometheusHandler())
}
ParseToken := GinParseToken(rdb)
userRouterGroup := r.Group("/user")
{
@ -151,6 +143,7 @@ func NewGinRouter(discov discoveryregistry.SvcDiscoveryRegistry, rdb redis.Unive
// Third service
thirdGroup := r.Group("/third", ParseToken)
{
thirdGroup.GET("/prometheus", GetPrometheus)
t := NewThirdApi(*thirdRpc)
thirdGroup.POST("/fcm_update_token", t.FcmUpdateToken)
thirdGroup.POST("/set_app_badge", t.SetAppBadge)

@ -15,6 +15,7 @@
package api
import (
config2 "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"math/rand"
"net/http"
"strconv"
@ -118,3 +119,7 @@ func (o *ThirdApi) DeleteLogs(c *gin.Context) {
func (o *ThirdApi) SearchLogs(c *gin.Context) {
a2r.Call(third.ThirdClient.SearchLogs, o.Client, c)
}
func GetPrometheus(c *gin.Context) {
c.Redirect(http.StatusFound, config2.Config.Prometheus.PrometheusUrl)
}

@ -20,6 +20,7 @@ import (
"fmt"
"runtime/debug"
"sync"
"sync/atomic"
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
@ -70,7 +71,7 @@ type Client struct {
IsBackground bool `json:"isBackground"`
ctx *UserConnContext
longConnServer LongConnServer
closed bool
closed atomic.Bool
closedErr error
token string
}
@ -102,18 +103,14 @@ func (c *Client) ResetClient(
c.ctx = ctx
c.longConnServer = longConnServer
c.IsBackground = false
c.closed = false
c.closed.Store(false)
c.closedErr = nil
c.token = token
}
func (c *Client) pingHandler(_ string) error {
c.conn.SetReadDeadline(pongWait)
err := c.writePongMsg()
if err != nil {
return err
}
return nil
_ = c.conn.SetReadDeadline(pongWait)
return c.writePongMsg()
}
func (c *Client) readMessage() {
@ -124,9 +121,11 @@ func (c *Client) readMessage() {
}
c.close()
}()
c.conn.SetReadLimit(maxMessageSize)
_ = c.conn.SetReadDeadline(pongWait)
c.conn.SetPingHandler(c.pingHandler)
for {
messageType, message, returnErr := c.conn.ReadMessage()
if returnErr != nil {
@ -134,11 +133,13 @@ func (c *Client) readMessage() {
c.closedErr = returnErr
return
}
log.ZDebug(c.ctx, "readMessage", "messageType", messageType)
if c.closed == true { // 连接刚置位已经关闭,但是协程还没退出的场景
if c.closed.Load() { // 连接刚置位已经关闭,但是协程还没退出的场景
c.closedErr = ErrConnClosed
return
}
switch messageType {
case MessageBinary:
_ = c.conn.SetReadDeadline(pongWait)
@ -150,9 +151,11 @@ func (c *Client) readMessage() {
case MessageText:
c.closedErr = ErrNotSupportMessageProtocol
return
case PingMessage:
err := c.writePongMsg()
log.ZError(c.ctx, "writePongMsg", err)
case CloseMessage:
c.closedErr = ErrClientClosed
return
@ -163,29 +166,40 @@ func (c *Client) readMessage() {
func (c *Client) handleMessage(message []byte) error {
if c.IsCompress {
var decompressErr error
message, decompressErr = c.longConnServer.DecompressWithPool(message)
if decompressErr != nil {
return utils.Wrap(decompressErr, "")
var err error
message, err = c.longConnServer.DecompressWithPool(message)
if err != nil {
return utils.Wrap(err, "")
}
}
var binaryReq Req
err := c.longConnServer.Decode(message, &binaryReq)
var binaryReq = getReq()
defer freeReq(binaryReq)
err := c.longConnServer.Decode(message, binaryReq)
if err != nil {
return utils.Wrap(err, "")
}
if err := c.longConnServer.Validate(binaryReq); err != nil {
return utils.Wrap(err, "")
}
if binaryReq.SendID != c.UserID {
return utils.Wrap(errors.New("exception conn userID not same to req userID"), binaryReq.String())
}
ctx := mcontext.WithMustInfoCtx(
[]string{binaryReq.OperationID, binaryReq.SendID, constant.PlatformIDToName(c.PlatformID), c.ctx.GetConnID()},
)
log.ZDebug(ctx, "gateway req message", "req", binaryReq.String())
var messageErr error
var resp []byte
var (
resp []byte
messageErr error
)
switch binaryReq.ReqIdentifier {
case WSGetNewestSeq:
resp, messageErr = c.longConnServer.GetSeq(ctx, binaryReq)
@ -208,23 +222,29 @@ func (c *Client) handleMessage(message []byte) error {
)
}
return c.replyMessage(ctx, &binaryReq, messageErr, resp)
return c.replyMessage(ctx, binaryReq, messageErr, resp)
}
func (c *Client) setAppBackgroundStatus(ctx context.Context, req Req) ([]byte, error) {
func (c *Client) setAppBackgroundStatus(ctx context.Context, req *Req) ([]byte, error) {
resp, isBackground, messageErr := c.longConnServer.SetUserDeviceBackground(ctx, req)
if messageErr != nil {
return nil, messageErr
}
c.IsBackground = isBackground
// todo callback
return resp, nil
}
func (c *Client) close() {
if c.closed.Load() {
return
}
c.w.Lock()
defer c.w.Unlock()
c.closed = true
c.closed.Store(true)
c.conn.Close()
c.longConnServer.UnRegister(c)
}
@ -244,6 +264,7 @@ func (c *Client) replyMessage(ctx context.Context, binaryReq *Req, err error, re
if err != nil {
log.ZWarn(ctx, "wireBinaryMsg replyMessage", err, "resp", mReply.String())
}
if binaryReq.ReqIdentifier == WsLogoutMsg {
return errors.New("user logout")
}
@ -276,40 +297,48 @@ func (c *Client) KickOnlineMessage() error {
resp := Resp{
ReqIdentifier: WSKickOnlineMsg,
}
return c.writeBinaryMsg(resp)
err := c.writeBinaryMsg(resp)
c.close()
return err
}
func (c *Client) writeBinaryMsg(resp Resp) error {
c.w.Lock()
defer c.w.Unlock()
if c.closed == true {
if c.closed.Load() {
return nil
}
encodedBuf := bufferPool.Get().([]byte)
resultBuf := bufferPool.Get().([]byte)
encodedBuf, err := c.longConnServer.Encode(resp)
if err != nil {
return utils.Wrap(err, "")
}
c.w.Lock()
defer c.w.Unlock()
_ = c.conn.SetWriteDeadline(writeWait)
if c.IsCompress {
var compressErr error
resultBuf, compressErr = c.longConnServer.CompressWithPool(encodedBuf)
resultBuf, compressErr := c.longConnServer.CompressWithPool(encodedBuf)
if compressErr != nil {
return utils.Wrap(compressErr, "")
}
return c.conn.WriteMessage(MessageBinary, resultBuf)
} else {
return c.conn.WriteMessage(MessageBinary, encodedBuf)
}
return c.conn.WriteMessage(MessageBinary, encodedBuf)
}
func (c *Client) writePongMsg() error {
if c.closed.Load() {
return nil
}
c.w.Lock()
defer c.w.Unlock()
if c.closed == true {
return nil
err := c.conn.SetWriteDeadline(writeWait)
if err != nil {
return utils.Wrap(err, "")
}
_ = c.conn.SetWriteDeadline(writeWait)
return c.conn.WriteMessage(PongMessage, nil)
}

@ -124,7 +124,9 @@ func (c *UserConnContext) GetOperationID() string {
}
func (c *UserConnContext) SetOperationID(operationID string) {
c.Req.URL.Query().Set(OperationID, operationID)
values := c.Req.URL.Query()
values.Set(OperationID, operationID)
c.Req.URL.RawQuery = values.Encode()
}
func (c *UserConnContext) GetToken() string {

@ -33,7 +33,6 @@ import (
"github.com/OpenIMSDK/tools/utils"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/prome"
"github.com/openimsdk/open-im-server/v3/pkg/common/startrpc"
)
@ -69,9 +68,10 @@ func (s *Server) SetLongConnServer(LongConnServer LongConnServer) {
s.LongConnServer = LongConnServer
}
func NewServer(rpcPort int, longConnServer LongConnServer) *Server {
func NewServer(rpcPort int, proPort int, longConnServer LongConnServer) *Server {
return &Server{
rpcPort: rpcPort,
prometheusPort: proPort,
LongConnServer: longConnServer,
pushTerminal: []int{constant.IOSPlatformID, constant.AndroidPlatformID},
}
@ -158,7 +158,6 @@ func (s *Server) SuperGroupOnlineBatchPushOneMsg(
} else {
if utils.IsContainInt(client.PlatformID, s.pushTerminal) {
tempT.OnlinePush = true
prome.Inc(prome.MsgOnlinePushSuccessCounter)
resp = append(resp, temp)
}
}

@ -41,7 +41,7 @@ func RunWsAndServer(rpcPort, wsPort, prometheusPort int) error {
if err != nil {
return err
}
hubServer := NewServer(rpcPort, longServer)
hubServer := NewServer(rpcPort, prometheusPort, longServer)
go func() {
err := hubServer.Start()
if err != nil {

@ -16,6 +16,7 @@ package msggateway
import (
"context"
"sync"
"github.com/OpenIMSDK/protocol/push"
"github.com/OpenIMSDK/tools/discoveryregistry"
@ -49,6 +50,27 @@ func (r *Req) String() string {
return utils.StructToJsonString(tReq)
}
var reqPool = sync.Pool{
New: func() any {
return new(Req)
},
}
func getReq() *Req {
req := reqPool.Get().(*Req)
req.Data = nil
req.MsgIncr = ""
req.OperationID = ""
req.ReqIdentifier = 0
req.SendID = ""
req.Token = ""
return req
}
func freeReq(req *Req) {
reqPool.Put(req)
}
type Resp struct {
ReqIdentifier int32 `json:"reqIdentifier"`
MsgIncr string `json:"msgIncr"`
@ -69,12 +91,12 @@ func (r *Resp) String() string {
}
type MessageHandler interface {
GetSeq(context context.Context, data Req) ([]byte, error)
SendMessage(context context.Context, data Req) ([]byte, error)
SendSignalMessage(context context.Context, data Req) ([]byte, error)
PullMessageBySeqList(context context.Context, data Req) ([]byte, error)
UserLogout(context context.Context, data Req) ([]byte, error)
SetUserDeviceBackground(context context.Context, data Req) ([]byte, bool, error)
GetSeq(context context.Context, data *Req) ([]byte, error)
SendMessage(context context.Context, data *Req) ([]byte, error)
SendSignalMessage(context context.Context, data *Req) ([]byte, error)
PullMessageBySeqList(context context.Context, data *Req) ([]byte, error)
UserLogout(context context.Context, data *Req) ([]byte, error)
SetUserDeviceBackground(context context.Context, data *Req) ([]byte, bool, error)
}
var _ MessageHandler = (*GrpcHandler)(nil)
@ -94,7 +116,7 @@ func NewGrpcHandler(validate *validator.Validate, client discoveryregistry.SvcDi
}
}
func (g GrpcHandler) GetSeq(context context.Context, data Req) ([]byte, error) {
func (g GrpcHandler) GetSeq(context context.Context, data *Req) ([]byte, error) {
req := sdkws.GetMaxSeqReq{}
if err := proto.Unmarshal(data.Data, &req); err != nil {
return nil, err
@ -113,7 +135,7 @@ func (g GrpcHandler) GetSeq(context context.Context, data Req) ([]byte, error) {
return c, nil
}
func (g GrpcHandler) SendMessage(context context.Context, data Req) ([]byte, error) {
func (g GrpcHandler) SendMessage(context context.Context, data *Req) ([]byte, error) {
msgData := sdkws.MsgData{}
if err := proto.Unmarshal(data.Data, &msgData); err != nil {
return nil, err
@ -133,7 +155,7 @@ func (g GrpcHandler) SendMessage(context context.Context, data Req) ([]byte, err
return c, nil
}
func (g GrpcHandler) SendSignalMessage(context context.Context, data Req) ([]byte, error) {
func (g GrpcHandler) SendSignalMessage(context context.Context, data *Req) ([]byte, error) {
resp, err := g.msgRpcClient.SendMsg(context, nil)
if err != nil {
return nil, err
@ -145,7 +167,7 @@ func (g GrpcHandler) SendSignalMessage(context context.Context, data Req) ([]byt
return c, nil
}
func (g GrpcHandler) PullMessageBySeqList(context context.Context, data Req) ([]byte, error) {
func (g GrpcHandler) PullMessageBySeqList(context context.Context, data *Req) ([]byte, error) {
req := sdkws.PullMessageBySeqsReq{}
if err := proto.Unmarshal(data.Data, &req); err != nil {
return nil, err
@ -164,7 +186,7 @@ func (g GrpcHandler) PullMessageBySeqList(context context.Context, data Req) ([]
return c, nil
}
func (g GrpcHandler) UserLogout(context context.Context, data Req) ([]byte, error) {
func (g GrpcHandler) UserLogout(context context.Context, data *Req) ([]byte, error) {
req := push.DelUserPushTokenReq{}
if err := proto.Unmarshal(data.Data, &req); err != nil {
return nil, err
@ -180,7 +202,7 @@ func (g GrpcHandler) UserLogout(context context.Context, data Req) ([]byte, erro
return c, nil
}
func (g GrpcHandler) SetUserDeviceBackground(_ context.Context, data Req) ([]byte, bool, error) {
func (g GrpcHandler) SetUserDeviceBackground(_ context.Context, data *Req) ([]byte, bool, error) {
req := sdkws.SetAppBackgroundStatusReq{}
if err := proto.Unmarshal(data.Data, &req); err != nil {
return nil, false, err

@ -17,6 +17,7 @@ package msggateway
import (
"context"
"errors"
"github.com/openimsdk/open-im-server/v3/pkg/common/prom_metrics"
"net/http"
"strconv"
"sync"
@ -74,8 +75,8 @@ type WsServer struct {
kickHandlerChan chan *kickHandler
clients *UserMap
clientPool sync.Pool
onlineUserNum int64
onlineUserConnNum int64
onlineUserNum atomic.Int64
onlineUserConnNum atomic.Int64
handshakeTimeout time.Duration
hubServer *Server
validate *validator.Validate
@ -220,36 +221,45 @@ func (ws *WsServer) registerClient(client *Client) {
if !userOK {
ws.clients.Set(client.UserID, client)
log.ZDebug(client.ctx, "user not exist", "userID", client.UserID, "platformID", client.PlatformID)
atomic.AddInt64(&ws.onlineUserNum, 1)
atomic.AddInt64(&ws.onlineUserConnNum, 1)
prom_metrics.OnlineUserGauge.Add(1)
ws.onlineUserNum.Add(1)
ws.onlineUserConnNum.Add(1)
} else {
i := &kickHandler{
clientOK: clientOK,
oldClients: oldClients,
newClient: client,
}
ws.kickHandlerChan <- i
ws.multiTerminalLoginChecker(clientOK, oldClients, client)
log.ZDebug(client.ctx, "user exist", "userID", client.UserID, "platformID", client.PlatformID)
if clientOK {
ws.clients.Set(client.UserID, client)
// 已经有同平台的连接存在
log.ZInfo(client.ctx, "repeat login", "userID", client.UserID, "platformID", client.PlatformID, "old remote addr", getRemoteAdders(oldClients))
atomic.AddInt64(&ws.onlineUserConnNum, 1)
ws.onlineUserConnNum.Add(1)
} else {
ws.clients.Set(client.UserID, client)
atomic.AddInt64(&ws.onlineUserConnNum, 1)
ws.onlineUserConnNum.Add(1)
}
}
ws.sendUserOnlineInfoToOtherNode(client.ctx, client)
ws.SetUserOnlineStatus(client.ctx, client, constant.Online)
wg := sync.WaitGroup{}
wg.Add(1)
go func() {
defer wg.Done()
_ = ws.sendUserOnlineInfoToOtherNode(client.ctx, client)
}()
wg.Add(1)
go func() {
defer wg.Done()
ws.SetUserOnlineStatus(client.ctx, client, constant.Online)
}()
wg.Wait()
log.ZInfo(
client.ctx,
"user online",
"online user Num",
ws.onlineUserNum,
ws.onlineUserNum.Load(),
"online user conn Num",
ws.onlineUserConnNum,
ws.onlineUserConnNum.Load(),
)
}
@ -282,7 +292,7 @@ func (ws *WsServer) multiTerminalLoginChecker(clientOK bool, oldClients []*Clien
if clientOK {
isDeleteUser := ws.clients.deleteClients(newClient.UserID, oldClients)
if isDeleteUser {
atomic.AddInt64(&ws.onlineUserNum, -1)
ws.onlineUserNum.Add(-1)
}
for _, c := range oldClients {
err := c.KickOnlineMessage()
@ -350,18 +360,19 @@ func (ws *WsServer) unregisterClient(client *Client) {
defer ws.clientPool.Put(client)
isDeleteUser := ws.clients.delete(client.UserID, client.ctx.GetRemoteAddr())
if isDeleteUser {
atomic.AddInt64(&ws.onlineUserNum, -1)
ws.onlineUserNum.Add(-1)
prom_metrics.OnlineUserGauge.Dec()
}
atomic.AddInt64(&ws.onlineUserConnNum, -1)
ws.onlineUserConnNum.Add(-1)
ws.SetUserOnlineStatus(client.ctx, client, constant.Offline)
log.ZInfo(client.ctx, "user offline", "close reason", client.closedErr, "online user Num", ws.onlineUserNum, "online user conn Num",
ws.onlineUserConnNum,
ws.onlineUserConnNum.Load(),
)
}
func (ws *WsServer) wsHandler(w http.ResponseWriter, r *http.Request) {
connContext := newContext(w, r)
if ws.onlineUserConnNum >= ws.wsMaxConnNum {
if ws.onlineUserConnNum.Load() >= ws.wsMaxConnNum {
httpError(connContext, errs.ErrConnOverMaxNumLimit)
return
}

@ -15,10 +15,17 @@
package msgtransfer
import (
"errors"
"fmt"
"github.com/openimsdk/open-im-server/v3/pkg/common/discovery_register"
"github.com/openimsdk/open-im-server/v3/pkg/common/prom_metrics"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/collectors"
"github.com/prometheus/client_golang/prometheus/promhttp"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
"log"
"net/http"
"sync"
"github.com/OpenIMSDK/tools/mw"
@ -29,7 +36,6 @@ import (
"github.com/openimsdk/open-im-server/v3/pkg/common/db/relation"
relationtb "github.com/openimsdk/open-im-server/v3/pkg/common/db/table/relation"
"github.com/openimsdk/open-im-server/v3/pkg/common/db/unrelation"
"github.com/openimsdk/open-im-server/v3/pkg/common/prome"
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
)
@ -79,7 +85,6 @@ func StartTransfer(prometheusPort int) error {
conversationRpcClient := rpcclient.NewConversationRpcClient(client)
groupRpcClient := rpcclient.NewGroupRpcClient(client)
msgTransfer := NewMsgTransfer(chatLogDatabase, msgDatabase, &conversationRpcClient, &groupRpcClient)
msgTransfer.initPrometheus()
return msgTransfer.Start(prometheusPort)
}
@ -93,21 +98,13 @@ func NewMsgTransfer(chatLogDatabase controller.ChatLogDatabase,
}
}
func (m *MsgTransfer) initPrometheus() {
prome.NewSeqGetSuccessCounter()
prome.NewSeqGetFailedCounter()
prome.NewSeqSetSuccessCounter()
prome.NewSeqSetFailedCounter()
prome.NewMsgInsertRedisSuccessCounter()
prome.NewMsgInsertRedisFailedCounter()
prome.NewMsgInsertMongoSuccessCounter()
prome.NewMsgInsertMongoFailedCounter()
}
func (m *MsgTransfer) Start(prometheusPort int) error {
var wg sync.WaitGroup
wg.Add(1)
fmt.Println("start msg transfer", "prometheusPort:", prometheusPort)
if prometheusPort <= 0 {
return errors.New("prometheusPort not correct")
}
if config.Config.ChatPersistenceMysql {
// go m.persistentCH.persistentConsumerGroup.RegisterHandleAndConsumer(m.persistentCH)
} else {
@ -116,10 +113,21 @@ func (m *MsgTransfer) Start(prometheusPort int) error {
go m.historyCH.historyConsumerGroup.RegisterHandleAndConsumer(m.historyCH)
go m.historyMongoCH.historyConsumerGroup.RegisterHandleAndConsumer(m.historyMongoCH)
// go m.modifyCH.modifyMsgConsumerGroup.RegisterHandleAndConsumer(m.modifyCH)
err := prome.StartPrometheusSrv(prometheusPort)
/*err := prome.StartPrometheusSrv(prometheusPort)
if err != nil {
return err
}*/
////////////////////////////
if config.Config.Prometheus.Enable {
reg := prometheus.NewRegistry()
reg.MustRegister(
collectors.NewGoCollector(),
)
reg.MustRegister(prom_metrics.GetGrpcCusMetrics("Transfer")...)
http.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg}))
log.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", prometheusPort), nil))
}
////////////////////////////////////////
wg.Wait()
return nil
}

@ -16,6 +16,7 @@ package msgtransfer
import (
"context"
"github.com/openimsdk/open-im-server/v3/pkg/common/prom_metrics"
"github.com/IBM/sarama"
"google.golang.org/protobuf/proto"
@ -74,6 +75,9 @@ func (mc *OnlineHistoryMongoConsumerHandler) handleChatWs2Mongo(
"conversationID",
msgFromMQ.ConversationID,
)
prom_metrics.MsgInsertMongoFailedCounter.Inc()
} else {
prom_metrics.MsgInsertMongoSuccessCounter.Inc()
}
var seqs []int64
for _, msg := range msgFromMQ.MsgData {

@ -14,10 +14,6 @@
package push
import (
"github.com/openimsdk/open-im-server/v3/pkg/common/prome"
)
type Consumer struct {
pushCh ConsumerHandler
successCount uint64
@ -29,11 +25,6 @@ func NewConsumer(pusher *Pusher) *Consumer {
}
}
func (c *Consumer) initPrometheus() {
prome.NewMsgOfflinePushSuccessCounter()
prome.NewMsgOfflinePushFailedCounter()
}
func (c *Consumer) Start() {
// statistics.NewStatistics(&c.successCount, config.Config.ModuleName.PushName, fmt.Sprintf("%d second push to
// msg_gateway count", constant.StatisticsTimeInterval), constant.StatisticsTimeInterval)

@ -0,0 +1,17 @@
package dummy
import (
"context"
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush"
)
func NewClient() *Dummy {
return &Dummy{}
}
type Dummy struct {
}
func (d *Dummy) Push(ctx context.Context, userIDs []string, title, content string, opts *offlinepush.Opts) error {
return nil
}

@ -67,7 +67,6 @@ func Start(client discoveryregistry.SvcDiscoveryRegistry, server *grpc.Server) e
go func() {
defer wg.Done()
consumer := NewConsumer(pusher)
consumer.initPrometheus()
consumer.Start()
}()
wg.Wait()

@ -19,11 +19,8 @@ import (
"encoding/json"
"errors"
"github.com/OpenIMSDK/protocol/conversation"
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
"github.com/OpenIMSDK/protocol/constant"
"github.com/OpenIMSDK/protocol/conversation"
"github.com/OpenIMSDK/protocol/msggateway"
"github.com/OpenIMSDK/protocol/sdkws"
"github.com/OpenIMSDK/tools/discoveryregistry"
@ -32,6 +29,7 @@ import (
"github.com/OpenIMSDK/tools/utils"
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush"
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/dummy"
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/fcm"
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/getui"
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/jpush"
@ -39,7 +37,8 @@ import (
"github.com/openimsdk/open-im-server/v3/pkg/common/db/cache"
"github.com/openimsdk/open-im-server/v3/pkg/common/db/controller"
"github.com/openimsdk/open-im-server/v3/pkg/common/db/localcache"
"github.com/openimsdk/open-im-server/v3/pkg/common/prome"
"github.com/openimsdk/open-im-server/v3/pkg/common/prom_metrics"
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
)
@ -52,7 +51,6 @@ type Pusher struct {
msgRpcClient *rpcclient.MessageRpcClient
conversationRpcClient *rpcclient.ConversationRpcClient
groupRpcClient *rpcclient.GroupRpcClient
successCount int
}
var errNoOfflinePusher = errors.New("no offlinePusher is configured")
@ -82,6 +80,8 @@ func NewOfflinePusher(cache cache.MsgModel) offlinepush.OfflinePusher {
offlinePusher = fcm.NewClient(cache)
case "jpush":
offlinePusher = jpush.NewClient()
default:
offlinePusher = dummy.NewClient()
}
return offlinePusher
}
@ -101,24 +101,29 @@ func (p *Pusher) Push2User(ctx context.Context, userIDs []string, msg *sdkws.Msg
if err := callbackOnlinePush(ctx, userIDs, msg); err != nil {
return err
}
// push
wsResults, err := p.GetConnsAndOnlinePush(ctx, msg, userIDs)
if err != nil {
return err
}
isOfflinePush := utils.GetSwitchFromOptions(msg.Options, constant.IsOfflinePush)
log.ZDebug(ctx, "push_result", "ws push result", wsResults, "sendData", msg, "isOfflinePush", isOfflinePush, "push_to_userID", userIDs)
p.successCount++
if isOfflinePush {
for _, v := range wsResults {
if msg.SendID != v.UserID && (!v.OnlinePush) {
if err := callbackOfflinePush(ctx, userIDs, msg, &[]string{}); err != nil {
return err
}
err = p.offlinePushMsg(ctx, msg.SendID, msg, []string{v.UserID})
if err != nil {
return err
}
if !isOfflinePush {
return nil
}
for _, v := range wsResults {
if msg.SendID != v.UserID && (!v.OnlinePush) {
if err = callbackOfflinePush(ctx, userIDs, msg, &[]string{}); err != nil {
return err
}
err = p.offlinePushMsg(ctx, msg.SendID, msg, []string{v.UserID})
if err != nil {
return err
}
}
}
@ -137,14 +142,16 @@ func (p *Pusher) UnmarshalNotificationElem(bytes []byte, t interface{}) error {
func (p *Pusher) Push2SuperGroup(ctx context.Context, groupID string, msg *sdkws.MsgData) (err error) {
log.ZDebug(ctx, "Get super group msg from msg_transfer and push msg", "msg", msg.String(), "groupID", groupID)
var pushToUserIDs []string
if err := callbackBeforeSuperGroupOnlinePush(ctx, groupID, msg, &pushToUserIDs); err != nil {
if err = callbackBeforeSuperGroupOnlinePush(ctx, groupID, msg, &pushToUserIDs); err != nil {
return err
}
if len(pushToUserIDs) == 0 {
pushToUserIDs, err = p.groupLocalCache.GetGroupMemberIDs(ctx, groupID)
if err != nil {
return err
}
switch msg.ContentType {
case constant.MemberQuitNotification:
var tips sdkws.MemberQuitTips
@ -152,7 +159,7 @@ func (p *Pusher) Push2SuperGroup(ctx context.Context, groupID string, msg *sdkws
return err
}
defer func(groupID string, userIDs []string) {
if err := p.DeleteMemberAndSetConversationSeq(ctx, groupID, userIDs); err != nil {
if err = p.DeleteMemberAndSetConversationSeq(ctx, groupID, userIDs); err != nil {
log.ZError(ctx, "MemberQuitNotification DeleteMemberAndSetConversationSeq", err, "groupID", groupID, "userIDs", userIDs)
}
}(groupID, []string{tips.QuitUser.UserID})
@ -164,7 +171,7 @@ func (p *Pusher) Push2SuperGroup(ctx context.Context, groupID string, msg *sdkws
}
kickedUsers := utils.Slice(tips.KickedUserList, func(e *sdkws.GroupMemberFullInfo) string { return e.UserID })
defer func(groupID string, userIDs []string) {
if err := p.DeleteMemberAndSetConversationSeq(ctx, groupID, userIDs); err != nil {
if err = p.DeleteMemberAndSetConversationSeq(ctx, groupID, userIDs); err != nil {
log.ZError(ctx, "MemberKickedNotification DeleteMemberAndSetConversationSeq", err, "groupID", groupID, "userIDs", userIDs)
}
}(groupID, kickedUsers)
@ -180,48 +187,61 @@ func (p *Pusher) Push2SuperGroup(ctx context.Context, groupID string, msg *sdkws
ctx = mcontext.WithOpUserIDContext(ctx, config.Config.Manager.UserID[0])
}
defer func(groupID string) {
if err := p.groupRpcClient.DismissGroup(ctx, groupID); err != nil {
if err = p.groupRpcClient.DismissGroup(ctx, groupID); err != nil {
log.ZError(ctx, "DismissGroup Notification clear members", err, "groupID", groupID)
}
}(groupID)
}
}
}
wsResults, err := p.GetConnsAndOnlinePush(ctx, msg, pushToUserIDs)
if err != nil {
return err
}
log.ZDebug(ctx, "get conn and online push success", "result", wsResults, "msg", msg)
p.successCount++
isOfflinePush := utils.GetSwitchFromOptions(msg.Options, constant.IsOfflinePush)
if isOfflinePush {
var onlineSuccessUserIDs []string
var WebAndPcBackgroundUserIDs []string
onlineSuccessUserIDs = append(onlineSuccessUserIDs, msg.SendID)
var (
onlineSuccessUserIDs = []string{msg.SendID}
webAndPcBackgroundUserIDs []string
)
for _, v := range wsResults {
if v.OnlinePush && v.UserID != msg.SendID {
onlineSuccessUserIDs = append(onlineSuccessUserIDs, v.UserID)
}
if !v.OnlinePush {
if len(v.Resp) != 0 {
for _, singleResult := range v.Resp {
if singleResult.ResultCode == -2 {
if constant.PlatformIDToName(int(singleResult.RecvPlatFormID)) == constant.TerminalPC ||
singleResult.RecvPlatFormID == constant.WebPlatformID {
WebAndPcBackgroundUserIDs = append(WebAndPcBackgroundUserIDs, v.UserID)
}
}
}
if v.OnlinePush {
continue
}
if len(v.Resp) == 0 {
continue
}
for _, singleResult := range v.Resp {
if singleResult.ResultCode != -2 {
continue
}
isPC := constant.PlatformIDToName(int(singleResult.RecvPlatFormID)) == constant.TerminalPC
isWebID := singleResult.RecvPlatFormID == constant.WebPlatformID
if isPC || isWebID {
webAndPcBackgroundUserIDs = append(webAndPcBackgroundUserIDs, v.UserID)
}
}
}
needOfflinePushUserIDs := utils.DifferenceString(onlineSuccessUserIDs, pushToUserIDs)
if msg.ContentType != constant.SignalingNotification {
notNotificationUserIDs, err := p.conversationLocalCache.GetRecvMsgNotNotifyUserIDs(ctx, groupID)
if err != nil {
// log.ZError(ctx, "GetRecvMsgNotNotifyUserIDs failed", err, "groupID", groupID)
return err
}
needOfflinePushUserIDs = utils.SliceSub(needOfflinePushUserIDs, notNotificationUserIDs)
}
// Use offline push messaging
@ -231,6 +251,7 @@ func (p *Pusher) Push2SuperGroup(ctx context.Context, groupID string, msg *sdkws
if err != nil {
return err
}
if len(offlinePushUserIDs) > 0 {
needOfflinePushUserIDs = offlinePushUserIDs
}
@ -247,8 +268,8 @@ func (p *Pusher) Push2SuperGroup(ctx context.Context, groupID string, msg *sdkws
log.ZError(ctx, "offlinePushMsg failed", err, "groupID", groupID, "msg", msg)
return err
}
if _, err := p.GetConnsAndOnlinePush(ctx, msg, utils.IntersectString(resp.UserIDs, WebAndPcBackgroundUserIDs)); err != nil {
log.ZError(ctx, "offlinePushMsg failed", err, "groupID", groupID, "msg", msg, "userIDs", utils.IntersectString(needOfflinePushUserIDs, WebAndPcBackgroundUserIDs))
if _, err := p.GetConnsAndOnlinePush(ctx, msg, utils.IntersectString(resp.UserIDs, webAndPcBackgroundUserIDs)); err != nil {
log.ZError(ctx, "offlinePushMsg failed", err, "groupID", groupID, "msg", msg, "userIDs", utils.IntersectString(needOfflinePushUserIDs, webAndPcBackgroundUserIDs))
return err
}
}
@ -285,10 +306,9 @@ func (p *Pusher) offlinePushMsg(ctx context.Context, conversationID string, msg
}
err = p.offlinePusher.Push(ctx, offlinePushUserIDs, title, content, opts)
if err != nil {
prome.Inc(prome.MsgOfflinePushFailedCounter)
prom_metrics.MsgOfflinePushFailedCounter.Inc()
return err
}
prome.Inc(prome.MsgOfflinePushSuccessCounter)
return nil
}
@ -317,15 +337,18 @@ func (p *Pusher) getOfflinePushInfos(conversationID string, msg *sdkws.MsgData)
err = errNoOfflinePusher
return
}
type AtContent struct {
type atContent struct {
Text string `json:"text"`
AtUserList []string `json:"atUserList"`
IsAtSelf bool `json:"isAtSelf"`
}
opts, err = p.GetOfflinePushOpts(msg)
if err != nil {
return
}
if msg.OfflinePushInfo != nil {
title = msg.OfflinePushInfo.Title
content = msg.OfflinePushInfo.Desc
@ -343,9 +366,9 @@ func (p *Pusher) getOfflinePushInfos(conversationID string, msg *sdkws.MsgData)
case constant.File:
title = constant.ContentType2PushContent[int64(msg.ContentType)]
case constant.AtText:
a := AtContent{}
_ = utils.JsonStringToStruct(string(msg.Content), &a)
if utils.IsContain(conversationID, a.AtUserList) {
ac := atContent{}
_ = utils.JsonStringToStruct(string(msg.Content), &ac)
if utils.IsContain(conversationID, ac.AtUserList) {
title = constant.ContentType2PushContent[constant.AtText] + constant.ContentType2PushContent[constant.Common]
} else {
title = constant.ContentType2PushContent[constant.GroupMsg]

@ -16,6 +16,7 @@ package auth
import (
"context"
"github.com/openimsdk/open-im-server/v3/pkg/common/prom_metrics"
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
@ -73,6 +74,7 @@ func (s *authServer) UserToken(ctx context.Context, req *pbauth.UserTokenReq) (*
if err != nil {
return nil, err
}
prom_metrics.UserLoginCounter.Inc()
resp.Token = token
resp.ExpireTimeSeconds = config.Config.TokenPolicy.Expire * 24 * 60 * 60
return &resp, nil

@ -112,7 +112,7 @@ func (c *conversationServer) SetConversation(ctx context.Context, req *pbconvers
return resp, nil
}
//nolint
// nolint
func (c *conversationServer) SetConversations(ctx context.Context,
req *pbconversation.SetConversationsReq,
) (*pbconversation.SetConversationsResp, error) {

@ -475,11 +475,13 @@ func (s *groupServer) GetGroupAllMember(ctx context.Context, req *pbgroup.GetGro
return nil, err
}
resp.Members = utils.Slice(members, func(e *relationtb.GroupMemberModel) *sdkws.GroupMemberFullInfo {
if e.Nickname == "" {
e.Nickname = publicUserInfoMap[e.UserID].Nickname
}
if e.FaceURL == "" {
e.FaceURL = publicUserInfoMap[e.UserID].FaceURL
if userInfo, ok := publicUserInfoMap[e.UserID]; ok {
if e.Nickname == "" {
e.Nickname = userInfo.Nickname
}
if e.FaceURL == "" {
e.FaceURL = userInfo.FaceURL
}
}
return convert.Db2PbGroupMember(e)
})
@ -626,11 +628,13 @@ func (s *groupServer) GetGroupMembersInfo(ctx context.Context, req *pbgroup.GetG
return nil, err
}
resp.Members = utils.Slice(members, func(e *relationtb.GroupMemberModel) *sdkws.GroupMemberFullInfo {
if e.Nickname == "" {
e.Nickname = publicUserInfoMap[e.UserID].Nickname
}
if e.FaceURL == "" {
e.FaceURL = publicUserInfoMap[e.UserID].FaceURL
if userInfo, ok := publicUserInfoMap[e.UserID]; ok {
if e.Nickname == "" {
e.Nickname = userInfo.Nickname
}
if e.FaceURL == "" {
e.FaceURL = userInfo.FaceURL
}
}
return convert.Db2PbGroupMember(e)
})
@ -686,7 +690,11 @@ func (s *groupServer) GetGroupApplicationList(ctx context.Context, req *pbgroup.
return e.GroupID
})
resp.GroupRequests = utils.Slice(groupRequests, func(e *relationtb.GroupRequestModel) *sdkws.GroupRequest {
return convert.Db2PbGroupRequest(e, userMap[e.UserID], convert.Db2PbGroupInfo(groupMap[e.GroupID], ownerMap[e.GroupID].UserID, groupMemberNumMap[e.GroupID]))
var ownerUserID string
if owner, ok := ownerMap[e.GroupID]; ok {
ownerUserID = owner.UserID
}
return convert.Db2PbGroupRequest(e, userMap[e.UserID], convert.Db2PbGroupInfo(groupMap[e.GroupID], ownerUserID, groupMemberNumMap[e.GroupID]))
})
return resp, nil
}
@ -1052,16 +1060,20 @@ func (s *groupServer) GetGroups(ctx context.Context, req *pbgroup.GetGroupsReq)
ownerMemberMap := utils.SliceToMap(ownerMembers, func(e *relationtb.GroupMemberModel) string {
return e.GroupID
})
if ids := utils.Single(groupIDs, utils.Keys(ownerMemberMap)); len(ids) > 0 {
return nil, errs.ErrDatabase.Wrap("group not owner " + strings.Join(ids, ","))
}
groupMemberNumMap, err := s.GroupDatabase.MapGroupMemberNum(ctx, groupIDs)
if err != nil {
return nil, err
}
resp.Groups = utils.Slice(groups, func(group *relationtb.GroupModel) *pbgroup.CMSGroup {
member := ownerMemberMap[group.GroupID]
return convert.Db2PbCMSGroup(group, member.UserID, member.Nickname, uint32(groupMemberNumMap[group.GroupID]))
var (
userID string
username string
)
if member, ok := ownerMemberMap[group.GroupID]; ok {
userID = member.UserID
username = member.Nickname
}
return convert.Db2PbCMSGroup(group, userID, username, groupMemberNumMap[group.GroupID])
})
return resp, nil
}
@ -1073,18 +1085,20 @@ func (s *groupServer) GetGroupMembersCMS(ctx context.Context, req *pbgroup.GetGr
return nil, err
}
resp.Total = total
nameMap, err := s.GetPublicUserInfoMap(ctx, utils.Filter(members, func(e *relationtb.GroupMemberModel) (string, bool) {
publicUserInfoMap, err := s.GetPublicUserInfoMap(ctx, utils.Filter(members, func(e *relationtb.GroupMemberModel) (string, bool) {
return e.UserID, e.Nickname == "" || e.FaceURL == ""
}), true)
if err != nil {
return nil, err
}
resp.Members = utils.Slice(members, func(e *relationtb.GroupMemberModel) *sdkws.GroupMemberFullInfo {
if e.Nickname == "" {
e.Nickname = nameMap[e.UserID].Nickname
}
if e.FaceURL == "" {
e.FaceURL = nameMap[e.UserID].FaceURL
if userInfo, ok := publicUserInfoMap[e.UserID]; ok {
if e.Nickname == "" {
e.Nickname = userInfo.Nickname
}
if e.FaceURL == "" {
e.FaceURL = userInfo.FaceURL
}
}
return convert.Db2PbGroupMember(e)
})
@ -1113,16 +1127,13 @@ func (s *groupServer) GetUserReqApplicationList(ctx context.Context, req *pbgrou
groupIDs := utils.Distinct(utils.Slice(requests, func(e *relationtb.GroupRequestModel) string {
return e.GroupID
}))
groups, err := s.GroupDatabase.FindNotDismissedGroup(ctx, groupIDs)
groups, err := s.GroupDatabase.FindGroup(ctx, groupIDs)
if err != nil {
return nil, err
}
groupMap := utils.SliceToMap(groups, func(e *relationtb.GroupModel) string {
return e.GroupID
})
if ids := utils.Single(groupIDs, utils.Keys(groupMap)); len(ids) > 0 {
return nil, errs.ErrGroupIDNotFound.Wrap(strings.Join(ids, ","))
}
owners, err := s.FindGroupMember(ctx, groupIDs, nil, []int32{constant.GroupOwner})
if err != nil {
return nil, err
@ -1130,15 +1141,16 @@ func (s *groupServer) GetUserReqApplicationList(ctx context.Context, req *pbgrou
ownerMap := utils.SliceToMap(owners, func(e *relationtb.GroupMemberModel) string {
return e.GroupID
})
if ids := utils.Single(groupIDs, utils.Keys(ownerMap)); len(ids) > 0 {
return nil, errs.ErrData.Wrap("group no owner", strings.Join(ids, ","))
}
groupMemberNum, err := s.GroupDatabase.MapGroupMemberNum(ctx, groupIDs)
if err != nil {
return nil, err
}
resp.GroupRequests = utils.Slice(requests, func(e *relationtb.GroupRequestModel) *sdkws.GroupRequest {
return convert.Db2PbGroupRequest(e, user, convert.Db2PbGroupInfo(groupMap[e.GroupID], ownerMap[e.GroupID].UserID, uint32(groupMemberNum[e.GroupID])))
var ownerUserID string
if owner, ok := ownerMap[e.GroupID]; ok {
ownerUserID = owner.UserID
}
return convert.Db2PbGroupRequest(e, user, convert.Db2PbGroupInfo(groupMap[e.GroupID], ownerUserID, groupMemberNum[e.GroupID]))
})
return resp, nil
}
@ -1471,17 +1483,19 @@ func (s *groupServer) GetUserInGroupMembers(ctx context.Context, req *pbgroup.Ge
return nil, err
}
publicUserInfoMap, err := s.GetPublicUserInfoMap(ctx, utils.Filter(members, func(e *relationtb.GroupMemberModel) (string, bool) {
return e.UserID, e.Nickname == ""
return e.UserID, e.Nickname == "" || e.FaceURL == ""
}), true)
if err != nil {
return nil, err
}
resp.Members = utils.Slice(members, func(e *relationtb.GroupMemberModel) *sdkws.GroupMemberFullInfo {
if e.Nickname == "" {
e.Nickname = publicUserInfoMap[e.UserID].Nickname
}
if e.FaceURL == "" {
e.FaceURL = publicUserInfoMap[e.UserID].FaceURL
if userInfo, ok := publicUserInfoMap[e.UserID]; ok {
if e.Nickname == "" {
e.Nickname = userInfo.Nickname
}
if e.FaceURL == "" {
e.FaceURL = userInfo.FaceURL
}
}
return convert.Db2PbGroupMember(e)
})
@ -1513,11 +1527,13 @@ func (s *groupServer) GetGroupMemberRoleLevel(ctx context.Context, req *pbgroup.
return nil, err
}
resp.Members = utils.Slice(members, func(e *relationtb.GroupMemberModel) *sdkws.GroupMemberFullInfo {
if e.Nickname == "" {
e.Nickname = publicUserInfoMap[e.UserID].Nickname
}
if e.FaceURL == "" {
e.FaceURL = publicUserInfoMap[e.UserID].FaceURL
if userInfo, ok := publicUserInfoMap[e.UserID]; ok {
if e.Nickname == "" {
e.Nickname = userInfo.Nickname
}
if e.FaceURL == "" {
e.FaceURL = userInfo.FaceURL
}
}
return convert.Db2PbGroupMember(e)
})
@ -1553,15 +1569,16 @@ func (s *groupServer) GetGroupUsersReqApplicationList(ctx context.Context, req *
ownerMap := utils.SliceToMap(owners, func(e *relationtb.GroupMemberModel) string {
return e.GroupID
})
if ids := utils.Single(groupIDs, utils.Keys(ownerMap)); len(ids) > 0 {
return nil, errs.ErrData.Wrap("group no owner", strings.Join(ids, ","))
}
groupMemberNum, err := s.GroupDatabase.MapGroupMemberNum(ctx, groupIDs)
if err != nil {
return nil, err
}
resp.GroupRequests = utils.Slice(requests, func(e *relationtb.GroupRequestModel) *sdkws.GroupRequest {
return convert.Db2PbGroupRequest(e, nil, convert.Db2PbGroupInfo(groupMap[e.GroupID], ownerMap[e.GroupID].UserID, uint32(groupMemberNum[e.GroupID])))
var ownerUserID string
if owner, ok := ownerMap[e.GroupID]; ok {
ownerUserID = owner.UserID
}
return convert.Db2PbGroupRequest(e, nil, convert.Db2PbGroupInfo(groupMap[e.GroupID], ownerUserID, groupMemberNum[e.GroupID]))
})
resp.Total = total
return resp, nil

@ -30,17 +30,15 @@ type MessageInterceptorFunc func(ctx context.Context, req *msg.SendMsgReq) (*sdk
func MessageHasReadEnabled(_ context.Context, req *msg.SendMsgReq) (*sdkws.MsgData, error) {
switch {
case req.MsgData.ContentType == constant.HasReadReceipt && req.MsgData.SessionType == constant.SingleChatType:
if config.Config.SingleMessageHasReadReceiptEnable {
return req.MsgData, nil
} else {
if !config.Config.SingleMessageHasReadReceiptEnable {
return nil, errs.ErrMessageHasReadDisable.Wrap()
}
return req.MsgData, nil
case req.MsgData.ContentType == constant.HasReadReceipt && req.MsgData.SessionType == constant.SuperGroupChatType:
if config.Config.GroupMessageHasReadReceiptEnable {
return req.MsgData, nil
} else {
if !config.Config.GroupMessageHasReadReceiptEnable {
return nil, errs.ErrMessageHasReadDisable.Wrap()
}
return req.MsgData, nil
}
return req.MsgData, nil
}

@ -16,6 +16,7 @@ package msg
import (
"context"
"github.com/openimsdk/open-im-server/v3/pkg/common/prom_metrics"
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
@ -28,8 +29,6 @@ import (
"github.com/OpenIMSDK/tools/log"
"github.com/OpenIMSDK/tools/mcontext"
"github.com/OpenIMSDK/tools/utils"
promepkg "github.com/openimsdk/open-im-server/v3/pkg/common/prome"
)
func (m *msgServer) SendMsg(ctx context.Context, req *pbmsg.SendMsgReq) (resp *pbmsg.SendMsgResp, error error) {
@ -59,9 +58,8 @@ func (m *msgServer) sendMsgSuperGroupChat(
ctx context.Context,
req *pbmsg.SendMsgReq,
) (resp *pbmsg.SendMsgResp, err error) {
promepkg.Inc(promepkg.WorkSuperGroupChatMsgRecvSuccessCounter)
if err = m.messageVerification(ctx, req); err != nil {
promepkg.Inc(promepkg.WorkSuperGroupChatMsgProcessFailedCounter)
prom_metrics.GroupChatMsgProcessFailedCounter.Inc()
return nil, err
}
if err = callbackBeforeSendGroupMsg(ctx, req); err != nil {
@ -80,7 +78,7 @@ func (m *msgServer) sendMsgSuperGroupChat(
if err = callbackAfterSendGroupMsg(ctx, req); err != nil {
log.ZWarn(ctx, "CallbackAfterSendGroupMsg", err)
}
promepkg.Inc(promepkg.WorkSuperGroupChatMsgProcessSuccessCounter)
prom_metrics.GroupChatMsgProcessSuccessCounter.Inc()
resp = &pbmsg.SendMsgResp{}
resp.SendTime = req.MsgData.SendTime
resp.ServerMsgID = req.MsgData.ServerMsgID
@ -133,9 +131,7 @@ func (m *msgServer) sendMsgNotification(
ctx context.Context,
req *pbmsg.SendMsgReq,
) (resp *pbmsg.SendMsgResp, err error) {
promepkg.Inc(promepkg.SingleChatMsgRecvSuccessCounter)
if err := m.MsgDatabase.MsgToMQ(ctx, utils.GenConversationUniqueKeyForSingle(req.MsgData.SendID, req.MsgData.RecvID), req.MsgData); err != nil {
promepkg.Inc(promepkg.SingleChatMsgProcessFailedCounter)
return nil, err
}
resp = &pbmsg.SendMsgResp{
@ -147,7 +143,6 @@ func (m *msgServer) sendMsgNotification(
}
func (m *msgServer) sendMsgSingleChat(ctx context.Context, req *pbmsg.SendMsgReq) (resp *pbmsg.SendMsgResp, err error) {
promepkg.Inc(promepkg.SingleChatMsgRecvSuccessCounter)
if err := m.messageVerification(ctx, req); err != nil {
return nil, err
}
@ -166,7 +161,7 @@ func (m *msgServer) sendMsgSingleChat(ctx context.Context, req *pbmsg.SendMsgReq
}
}
if !isSend {
promepkg.Inc(promepkg.SingleChatMsgProcessFailedCounter)
prom_metrics.SingleChatMsgProcessFailedCounter.Inc()
return nil, nil
} else {
if err = callbackBeforeSendSingleMsg(ctx, req); err != nil {
@ -176,7 +171,7 @@ func (m *msgServer) sendMsgSingleChat(ctx context.Context, req *pbmsg.SendMsgReq
return nil, err
}
if err := m.MsgDatabase.MsgToMQ(ctx, utils.GenConversationUniqueKeyForSingle(req.MsgData.SendID, req.MsgData.RecvID), req.MsgData); err != nil {
promepkg.Inc(promepkg.SingleChatMsgProcessFailedCounter)
prom_metrics.SingleChatMsgProcessFailedCounter.Inc()
return nil, err
}
err = callbackAfterSendSingleMsg(ctx, req)
@ -188,7 +183,7 @@ func (m *msgServer) sendMsgSingleChat(ctx context.Context, req *pbmsg.SendMsgReq
ClientMsgID: req.MsgData.ClientMsgID,
SendTime: req.MsgData.SendTime,
}
promepkg.Inc(promepkg.SingleChatMsgProcessSuccessCounter)
prom_metrics.SingleChatMsgProcessSuccessCounter.Inc()
return resp, nil
}
}

@ -28,7 +28,6 @@ import (
"github.com/openimsdk/open-im-server/v3/pkg/common/db/controller"
"github.com/openimsdk/open-im-server/v3/pkg/common/db/localcache"
"github.com/openimsdk/open-im-server/v3/pkg/common/db/unrelation"
"github.com/openimsdk/open-im-server/v3/pkg/common/prome"
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
)
@ -94,27 +93,10 @@ func Start(client discoveryregistry.SvcDiscoveryRegistry, server *grpc.Server) e
}
s.notificationSender = rpcclient.NewNotificationSender(rpcclient.WithLocalSendMsg(s.SendMsg))
s.addInterceptorHandler(MessageHasReadEnabled)
s.initPrometheus()
msg.RegisterMsgServer(server, s)
return nil
}
func (m *msgServer) initPrometheus() {
prome.NewMsgPullFromRedisSuccessCounter()
prome.NewMsgPullFromRedisFailedCounter()
prome.NewMsgPullFromMongoSuccessCounter()
prome.NewMsgPullFromMongoFailedCounter()
prome.NewSingleChatMsgRecvSuccessCounter()
prome.NewGroupChatMsgRecvSuccessCounter()
prome.NewWorkSuperGroupChatMsgRecvSuccessCounter()
prome.NewSingleChatMsgProcessSuccessCounter()
prome.NewSingleChatMsgProcessFailedCounter()
prome.NewGroupChatMsgProcessSuccessCounter()
prome.NewGroupChatMsgProcessFailedCounter()
prome.NewWorkSuperGroupChatMsgProcessSuccessCounter()
prome.NewWorkSuperGroupChatMsgProcessFailedCounter()
}
func (m *msgServer) conversationAndGetRecvID(conversation *conversation.Conversation, userID string) (recvID string) {
if conversation.ConversationType == constant.SingleChatType ||
conversation.ConversationType == constant.NotificationChatType {

@ -67,7 +67,7 @@ func Start(client discoveryregistry.SvcDiscoveryRegistry, server *grpc.Server) e
var o s3.Interface
switch config.Config.Object.Enable {
case "minio":
o, err = minio.NewMinio()
o, err = minio.NewMinio(cache.NewMinioCache(rdb))
case "cos":
o, err = cos.NewCos()
case "oss":
@ -78,11 +78,17 @@ func Start(client discoveryregistry.SvcDiscoveryRegistry, server *grpc.Server) e
if err != nil {
return err
}
//specialerror.AddErrHandler(func(err error) errs.CodeError {
// if o.IsNotFound(err) {
// return errs.ErrRecordNotFound
// }
// return nil
//})
third.RegisterThirdServer(server, &thirdServer{
apiURL: apiURL,
thirdDatabase: controller.NewThirdDatabase(cache.NewMsgCacheModel(rdb), db),
userRpcClient: rpcclient.NewUserRpcClient(client),
s3dataBase: controller.NewS3Database(o, relation.NewObjectInfo(db)),
s3dataBase: controller.NewS3Database(rdb, o, relation.NewObjectInfo(db)),
defaultExpire: time.Hour * 24 * 7,
})
return nil

@ -16,49 +16,126 @@ package tools
import (
"context"
"time"
"github.com/OpenIMSDK/tools/log"
"github.com/OpenIMSDK/tools/mcontext"
"github.com/OpenIMSDK/tools/utils"
"github.com/openimsdk/open-im-server/v3/pkg/common/db/table/relation"
"math/rand"
"time"
)
//func (c *MsgTool) ConversationsDestructMsgs() {
// log.ZInfo(context.Background(), "start msg destruct cron task")
// ctx := mcontext.NewCtx(utils.GetSelfFuncName())
// conversations, err := c.conversationDatabase.GetConversationIDsNeedDestruct(ctx)
// if err != nil {
// log.ZError(ctx, "get conversation id need destruct failed", err)
// return
// }
// log.ZDebug(context.Background(), "nums conversations need destruct", "nums", len(conversations))
// for _, conversation := range conversations {
// ctx = mcontext.NewCtx(utils.GetSelfFuncName() + "-" + utils.OperationIDGenerator() + "-" + conversation.ConversationID + "-" + conversation.OwnerUserID)
// log.ZDebug(
// ctx,
// "UserMsgsDestruct",
// "conversationID",
// conversation.ConversationID,
// "ownerUserID",
// conversation.OwnerUserID,
// "msgDestructTime",
// conversation.MsgDestructTime,
// "lastMsgDestructTime",
// conversation.LatestMsgDestructTime,
// )
// now := time.Now()
// seqs, err := c.msgDatabase.UserMsgsDestruct(ctx, conversation.OwnerUserID, conversation.ConversationID, conversation.MsgDestructTime, conversation.LatestMsgDestructTime)
// if err != nil {
// log.ZError(ctx, "user msg destruct failed", err, "conversationID", conversation.ConversationID, "ownerUserID", conversation.OwnerUserID)
// continue
// }
// if len(seqs) > 0 {
// if err := c.conversationDatabase.UpdateUsersConversationFiled(ctx, []string{conversation.OwnerUserID}, conversation.ConversationID, map[string]interface{}{"latest_msg_destruct_time": now}); err != nil {
// log.ZError(ctx, "updateUsersConversationFiled failed", err, "conversationID", conversation.ConversationID, "ownerUserID", conversation.OwnerUserID)
// continue
// }
// if err := c.msgNotificationSender.UserDeleteMsgsNotification(ctx, conversation.OwnerUserID, conversation.ConversationID, seqs); err != nil {
// log.ZError(ctx, "userDeleteMsgsNotification failed", err, "conversationID", conversation.ConversationID, "ownerUserID", conversation.OwnerUserID)
// }
// }
// }
//}
func (c *MsgTool) ConversationsDestructMsgs() {
log.ZInfo(context.Background(), "start msg destruct cron task")
ctx := mcontext.NewCtx(utils.GetSelfFuncName())
conversations, err := c.conversationDatabase.GetConversationIDsNeedDestruct(ctx)
num, err := c.conversationDatabase.GetAllConversationIDsNumber(ctx)
if err != nil {
log.ZError(ctx, "get conversation id need destruct failed", err)
log.ZError(ctx, "GetAllConversationIDsNumber failed", err)
return
}
log.ZDebug(context.Background(), "nums conversations need destruct", "nums", len(conversations))
for _, conversation := range conversations {
ctx = mcontext.NewCtx(utils.GetSelfFuncName() + "-" + utils.OperationIDGenerator() + "-" + conversation.ConversationID + "-" + conversation.OwnerUserID)
log.ZDebug(
ctx,
"UserMsgsDestruct",
"conversationID",
conversation.ConversationID,
"ownerUserID",
conversation.OwnerUserID,
"msgDestructTime",
conversation.MsgDestructTime,
"lastMsgDestructTime",
conversation.LatestMsgDestructTime,
)
now := time.Now()
seqs, err := c.msgDatabase.UserMsgsDestruct(ctx, conversation.OwnerUserID, conversation.ConversationID, conversation.MsgDestructTime, conversation.LatestMsgDestructTime)
const batchNum = 50
log.ZDebug(ctx, "GetAllConversationIDsNumber", "num", num)
if num == 0 {
return
}
count := int(num/batchNum + num/batchNum/2)
if count < 1 {
count = 1
}
maxPage := 1 + num/batchNum
if num%batchNum != 0 {
maxPage++
}
for i := 0; i < count; i++ {
pageNumber := rand.Int63() % maxPage
conversationIDs, err := c.conversationDatabase.PageConversationIDs(ctx, int32(pageNumber), batchNum)
if err != nil {
log.ZError(ctx, "user msg destruct failed", err, "conversationID", conversation.ConversationID, "ownerUserID", conversation.OwnerUserID)
log.ZError(ctx, "PageConversationIDs failed", err, "pageNumber", pageNumber)
continue
}
if len(seqs) > 0 {
if err := c.conversationDatabase.UpdateUsersConversationFiled(ctx, []string{conversation.OwnerUserID}, conversation.ConversationID, map[string]interface{}{"latest_msg_destruct_time": now}); err != nil {
log.ZError(ctx, "updateUsersConversationFiled failed", err, "conversationID", conversation.ConversationID, "ownerUserID", conversation.OwnerUserID)
log.ZError(ctx, "PageConversationIDs failed", err, "pageNumber", pageNumber, "conversationIDsNum", len(conversationIDs), "conversationIDs", conversationIDs)
if len(conversationIDs) == 0 {
continue
}
conversations, err := c.conversationDatabase.GetConversationsByConversationID(ctx, conversationIDs)
if err != nil {
log.ZError(ctx, "GetConversationsByConversationID failed", err, "conversationIDs", conversationIDs)
continue
}
temp := make([]*relation.ConversationModel, 0, len(conversations))
for i, conversation := range conversations {
if conversation.IsMsgDestruct && conversation.MsgDestructTime != 0 && (time.Now().Unix() > (conversation.MsgDestructTime+conversation.LatestMsgDestructTime.Unix()+8*60*60)) || conversation.LatestMsgDestructTime.IsZero() {
temp = append(temp, conversations[i])
}
}
for _, conversation := range temp {
ctx = mcontext.NewCtx(utils.GetSelfFuncName() + "-" + utils.OperationIDGenerator() + "-" + conversation.ConversationID + "-" + conversation.OwnerUserID)
log.ZDebug(
ctx,
"UserMsgsDestruct",
"conversationID",
conversation.ConversationID,
"ownerUserID",
conversation.OwnerUserID,
"msgDestructTime",
conversation.MsgDestructTime,
"lastMsgDestructTime",
conversation.LatestMsgDestructTime,
)
now := time.Now()
seqs, err := c.msgDatabase.UserMsgsDestruct(ctx, conversation.OwnerUserID, conversation.ConversationID, conversation.MsgDestructTime, conversation.LatestMsgDestructTime)
if err != nil {
log.ZError(ctx, "user msg destruct failed", err, "conversationID", conversation.ConversationID, "ownerUserID", conversation.OwnerUserID)
continue
}
if err := c.msgNotificationSender.UserDeleteMsgsNotification(ctx, conversation.OwnerUserID, conversation.ConversationID, seqs); err != nil {
log.ZError(ctx, "userDeleteMsgsNotification failed", err, "conversationID", conversation.ConversationID, "ownerUserID", conversation.OwnerUserID)
if len(seqs) > 0 {
if err := c.conversationDatabase.UpdateUsersConversationFiled(ctx, []string{conversation.OwnerUserID}, conversation.ConversationID, map[string]interface{}{"latest_msg_destruct_time": now}); err != nil {
log.ZError(ctx, "updateUsersConversationFiled failed", err, "conversationID", conversation.ConversationID, "ownerUserID", conversation.OwnerUserID)
continue
}
if err := c.msgNotificationSender.UserDeleteMsgsNotification(ctx, conversation.OwnerUserID, conversation.ConversationID, seqs); err != nil {
log.ZError(ctx, "userDeleteMsgsNotification failed", err, "conversationID", conversation.ConversationID, "ownerUserID", conversation.OwnerUserID)
}
}
}
}

@ -39,13 +39,13 @@ func StartTask() error {
log.ZInfo(context.Background(), "start chatRecordsClearTime cron task", "cron config", config.Config.ChatRecordsClearTime)
_, err = c.AddFunc(config.Config.ChatRecordsClearTime, msgTool.AllConversationClearMsgAndFixSeq)
if err != nil {
fmt.Println("start allConversationClearMsgAndFixSeq cron failed", err.Error(), config.Config.ChatRecordsClearTime)
log.ZError(context.Background(), "start allConversationClearMsgAndFixSeq cron failed", err)
panic(err)
}
log.ZInfo(context.Background(), "start msgDestruct cron task", "cron config", config.Config.MsgDestructTime)
_, err = c.AddFunc(config.Config.MsgDestructTime, msgTool.ConversationsDestructMsgs)
if err != nil {
fmt.Println("start conversationsDestructMsgs cron failed", err.Error(), config.Config.ChatRecordsClearTime)
log.ZError(context.Background(), "start conversationsDestructMsgs cron failed", err)
panic(err)
}
c.Start()

@ -29,6 +29,7 @@ import (
"github.com/OpenIMSDK/tools/mw"
"github.com/OpenIMSDK/tools/tx"
"github.com/OpenIMSDK/tools/utils"
"math/rand"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/db/cache"
@ -102,18 +103,55 @@ func InitMsgTool() (*MsgTool, error) {
return msgTool, nil
}
//func (c *MsgTool) AllConversationClearMsgAndFixSeq() {
// ctx := mcontext.NewCtx(utils.GetSelfFuncName())
// log.ZInfo(ctx, "============================ start del cron task ============================")
// conversationIDs, err := c.conversationDatabase.GetAllConversationIDs(ctx)
// if err != nil {
// log.ZError(ctx, "GetAllConversationIDs failed", err)
// return
// }
// for _, conversationID := range conversationIDs {
// conversationIDs = append(conversationIDs, utils.GetNotificationConversationIDByConversationID(conversationID))
// }
// c.ClearConversationsMsg(ctx, conversationIDs)
// log.ZInfo(ctx, "============================ start del cron finished ============================")
//}
func (c *MsgTool) AllConversationClearMsgAndFixSeq() {
ctx := mcontext.NewCtx(utils.GetSelfFuncName())
log.ZInfo(ctx, "============================ start del cron task ============================")
conversationIDs, err := c.conversationDatabase.GetAllConversationIDs(ctx)
num, err := c.conversationDatabase.GetAllConversationIDsNumber(ctx)
if err != nil {
log.ZError(ctx, "GetAllConversationIDs failed", err)
log.ZError(ctx, "GetAllConversationIDsNumber failed", err)
return
}
for _, conversationID := range conversationIDs {
conversationIDs = append(conversationIDs, utils.GetNotificationConversationIDByConversationID(conversationID))
const batchNum = 50
log.ZDebug(ctx, "GetAllConversationIDsNumber", "num", num)
if num == 0 {
return
}
count := int(num/batchNum + num/batchNum/2)
if count < 1 {
count = 1
}
maxPage := 1 + num/batchNum
if num%batchNum != 0 {
maxPage++
}
for i := 0; i < count; i++ {
pageNumber := rand.Int63() % maxPage
conversationIDs, err := c.conversationDatabase.PageConversationIDs(ctx, int32(pageNumber), batchNum)
if err != nil {
log.ZError(ctx, "PageConversationIDs failed", err, "pageNumber", pageNumber)
continue
}
log.ZDebug(ctx, "PageConversationIDs failed", "pageNumber", pageNumber, "conversationIDsNum", len(conversationIDs), "conversationIDs", conversationIDs)
if len(conversationIDs) == 0 {
continue
}
c.ClearConversationsMsg(ctx, conversationIDs)
}
c.ClearConversationsMsg(ctx, conversationIDs)
log.ZInfo(ctx, "============================ start del cron finished ============================")
}

@ -41,7 +41,7 @@ type SendMsgReq struct {
type BatchSendMsgReq struct {
SendMsg
IsSendAll bool `json:"isSendAll"`
RecvIDs []string `json:"recvIDs" binding:"required"`
RecvIDs []string `json:"recvIDs" binding:"required"`
}
type BatchSendMsgResp struct {

@ -16,9 +16,11 @@ package cmd
import (
"fmt"
"github.com/OpenIMSDK/protocol/constant"
config2 "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/spf13/cobra"
config2 "github.com/openimsdk/open-im-server/v3/pkg/common/config"
)
type ApiCmd struct {
@ -32,9 +34,9 @@ func NewApiCmd() *ApiCmd {
return ret
}
func (a *ApiCmd) AddApi(f func(port int) error) {
func (a *ApiCmd) AddApi(f func(port int, promPort int) error) {
a.Command.RunE = func(cmd *cobra.Command, args []string) error {
return f(a.getPortFlag(cmd))
return f(a.getPortFlag(cmd), a.getPrometheusPortFlag(cmd))
}
}
@ -42,8 +44,8 @@ func (a *ApiCmd) GetPortFromConfig(portType string) int {
fmt.Println("GetPortFromConfig:", portType)
if portType == constant.FlagPort {
return config2.Config.Api.OpenImApiPort[0]
} else {
return 0
} else if portType == constant.FlagPrometheusPort {
return config2.Config.Prometheus.ApiPrometheusPort[0]
}
return 0
}

@ -66,7 +66,7 @@ func (m *MsgGatewayCmd) GetPortFromConfig(portType string) int {
} else if portType == constant.FlagPort {
return v3config.Config.LongConnSvr.OpenImMessageGatewayPort[0]
} else if portType == constant.FlagPrometheusPort {
return 0
return v3config.Config.Prometheus.MessageGatewayPrometheusPort[0]
} else {
return 0
}

@ -15,6 +15,9 @@
package cmd
import (
"fmt"
"github.com/OpenIMSDK/protocol/constant"
config2 "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/spf13/cobra"
"github.com/openimsdk/open-im-server/v3/internal/msgtransfer"
@ -40,3 +43,25 @@ func (m *MsgTransferCmd) Exec() error {
m.addRunE()
return m.Execute()
}
func (m *MsgTransferCmd) GetPortFromConfig(portType string) int {
fmt.Println("GetPortFromConfig:", portType)
if portType == constant.FlagPort {
return 0
} else if portType == constant.FlagPrometheusPort {
n := m.getTransferProgressFlagValue()
return config2.Config.Prometheus.MessageTransferPrometheusPort[n]
}
return 0
}
func (m *MsgTransferCmd) AddTransferProgressFlag() {
m.Command.Flags().IntP(constant.FlagTransferProgressIndex, "n", 0, "transfer progress index")
}
func (m *MsgTransferCmd) getTransferProgressFlagValue() int {
nindex, err := m.Command.Flags().GetInt(constant.FlagTransferProgressIndex)
if err != nil {
fmt.Println("get transfercmd error,make sure it is k8s env or not")
return 0
}
return nindex
}

@ -16,11 +16,13 @@ package cmd
import (
"errors"
"github.com/OpenIMSDK/protocol/constant"
config2 "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/spf13/cobra"
"google.golang.org/grpc"
config2 "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/OpenIMSDK/tools/discoveryregistry"
"github.com/openimsdk/open-im-server/v3/pkg/common/startrpc"
@ -59,34 +61,58 @@ func (a *RpcCmd) GetPortFromConfig(portType string) int {
if portType == constant.FlagPort {
return config2.Config.RpcPort.OpenImPushPort[0]
}
if portType == constant.FlagPrometheusPort {
return config2.Config.Prometheus.PushPrometheusPort[0]
}
case RpcAuthServer:
if portType == constant.FlagPort {
return config2.Config.RpcPort.OpenImAuthPort[0]
}
if portType == constant.FlagPrometheusPort {
return config2.Config.Prometheus.AuthPrometheusPort[0]
}
case RpcConversationServer:
if portType == constant.FlagPort {
return config2.Config.RpcPort.OpenImConversationPort[0]
}
if portType == constant.FlagPrometheusPort {
return config2.Config.Prometheus.ConversationPrometheusPort[0]
}
case RpcFriendServer:
if portType == constant.FlagPort {
return config2.Config.RpcPort.OpenImFriendPort[0]
}
if portType == constant.FlagPrometheusPort {
return config2.Config.Prometheus.FriendPrometheusPort[0]
}
case RpcGroupServer:
if portType == constant.FlagPort {
return config2.Config.RpcPort.OpenImGroupPort[0]
}
if portType == constant.FlagPrometheusPort {
return config2.Config.Prometheus.GroupPrometheusPort[0]
}
case RpcMsgServer:
if portType == constant.FlagPort {
return config2.Config.RpcPort.OpenImMessagePort[0]
}
if portType == constant.FlagPrometheusPort {
return config2.Config.Prometheus.MessagePrometheusPort[0]
}
case RpcThirdServer:
if portType == constant.FlagPort {
return config2.Config.RpcPort.OpenImThirdPort[0]
}
if portType == constant.FlagPrometheusPort {
return config2.Config.Prometheus.ThirdPrometheusPort[0]
}
case RpcUserServer:
if portType == constant.FlagPort {
return config2.Config.RpcPort.OpenImUserPort[0]
}
if portType == constant.FlagPrometheusPort {
return config2.Config.Prometheus.UserPrometheusPort[0]
}
}
return 0
}

@ -262,18 +262,20 @@ type configStruct struct {
} `yaml:"callback"`
Prometheus struct {
Enable bool `yaml:"enable"`
UserPrometheusPort []int `yaml:"userPrometheusPort"`
FriendPrometheusPort []int `yaml:"friendPrometheusPort"`
MessagePrometheusPort []int `yaml:"messagePrometheusPort"`
MessageGatewayPrometheusPort []int `yaml:"messageGatewayPrometheusPort"`
GroupPrometheusPort []int `yaml:"groupPrometheusPort"`
AuthPrometheusPort []int `yaml:"authPrometheusPort"`
PushPrometheusPort []int `yaml:"pushPrometheusPort"`
ConversationPrometheusPort []int `yaml:"conversationPrometheusPort"`
RtcPrometheusPort []int `yaml:"rtcPrometheusPort"`
MessageTransferPrometheusPort []int `yaml:"messageTransferPrometheusPort"`
ThirdPrometheusPort []int `yaml:"thirdPrometheusPort"`
Enable bool `yaml:"enable"`
PrometheusUrl string `yaml:"prometheusUrl"`
ApiPrometheusPort []int `yaml:"apiPrometheusPort"`
UserPrometheusPort []int `yaml:"userPrometheusPort"`
FriendPrometheusPort []int `yaml:"friendPrometheusPort"`
MessagePrometheusPort []int `yaml:"messagePrometheusPort"`
MessageGatewayPrometheusPort []int `yaml:"messageGatewayPrometheusPort"`
GroupPrometheusPort []int `yaml:"groupPrometheusPort"`
AuthPrometheusPort []int `yaml:"authPrometheusPort"`
PushPrometheusPort []int `yaml:"pushPrometheusPort"`
ConversationPrometheusPort []int `yaml:"conversationPrometheusPort"`
RtcPrometheusPort []int `yaml:"rtcPrometheusPort"`
MessageTransferPrometheusPort []int `yaml:"messageTransferPrometheusPort"`
ThirdPrometheusPort []int `yaml:"thirdPrometheusPort"`
} `yaml:"prometheus"`
Notification notification `yaml:"notification"`
}

@ -21,8 +21,9 @@ import (
"path/filepath"
"github.com/OpenIMSDK/protocol/constant"
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
"gopkg.in/yaml.v3"
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
)
//go:embed version
@ -34,6 +35,16 @@ const (
DefaultFolderPath = "../config/"
)
// return absolude path join ../config/, this is k8s container config path
func GetDefaultConfigPath() string {
b, err := filepath.Abs(os.Args[0])
if err != nil {
fmt.Println("filepath.Abs error,err=", err)
return ""
}
return filepath.Join(filepath.Dir(b), "../config/")
}
// getProjectRoot returns the absolute path of the project root directory
func GetProjectRoot() string {
b, _ := filepath.Abs(os.Args[0])
@ -64,9 +75,11 @@ func initConfig(config interface{}, configName, configFolderPath string) error {
_, err := os.Stat(configFolderPath)
if err != nil {
if !os.IsNotExist(err) {
fmt.Println("stat config path error:", err.Error())
return fmt.Errorf("stat config path error: %w", err)
}
configFolderPath = filepath.Join(GetProjectRoot(), "config", configName)
fmt.Println("flag's path,enviment's path,default path all is not exist,using project path:", configFolderPath)
}
data, err := os.ReadFile(configFolderPath)
if err != nil {
@ -85,7 +98,7 @@ func InitConfig(configFolderPath string) error {
if envConfigPath != "" {
configFolderPath = envConfigPath
} else {
configFolderPath = DefaultFolderPath
configFolderPath = GetDefaultConfigPath()
}
}

@ -18,6 +18,7 @@ import (
"context"
"encoding/json"
"errors"
"github.com/OpenIMSDK/tools/mw/specialerror"
"time"
"github.com/dtm-labs/rockscache"
@ -209,6 +210,9 @@ func batchGetCache2[T any, K comparable](
return fns(ctx, key)
})
if err != nil {
if errs.ErrRecordNotFound.Is(specialerror.ErrCode(errs.Unwrap(err))) {
continue
}
return nil, err
}
res = append(res, val)

@ -51,7 +51,7 @@ const (
getuiTaskID = "GETUI_TASK_ID"
signalCache = "SIGNAL_CACHE:"
signalListCache = "SIGNAL_LIST_CACHE:"
fcmToken = "FCM_TOKEN:"
FCM_TOKEN = "FCM_TOKEN:"
messageCache = "MESSAGE_CACHE:"
messageDelUserList = "MESSAGE_DEL_USER_LIST:"
@ -650,15 +650,15 @@ func (c *msgCache) GetSendMsgStatus(ctx context.Context, id string) (int32, erro
}
func (c *msgCache) SetFcmToken(ctx context.Context, account string, platformID int, fcmToken string, expireTime int64) (err error) {
return errs.Wrap(c.rdb.Set(ctx, fcmToken+account+":"+strconv.Itoa(platformID), fcmToken, time.Duration(expireTime)*time.Second).Err())
return errs.Wrap(c.rdb.Set(ctx, FCM_TOKEN+account+":"+strconv.Itoa(platformID), fcmToken, time.Duration(expireTime)*time.Second).Err())
}
func (c *msgCache) GetFcmToken(ctx context.Context, account string, platformID int) (string, error) {
return utils.Wrap2(c.rdb.Get(ctx, fcmToken+account+":"+strconv.Itoa(platformID)).Result())
return utils.Wrap2(c.rdb.Get(ctx, FCM_TOKEN+account+":"+strconv.Itoa(platformID)).Result())
}
func (c *msgCache) DelFcmToken(ctx context.Context, account string, platformID int) error {
return errs.Wrap(c.rdb.Del(ctx, fcmToken+account+":"+strconv.Itoa(platformID)).Err())
return errs.Wrap(c.rdb.Del(ctx, FCM_TOKEN+account+":"+strconv.Itoa(platformID)).Err())
}
func (c *msgCache) IncrUserBadgeUnreadCountSum(ctx context.Context, userID string) (int, error) {

@ -0,0 +1,190 @@
package cache
import (
"context"
"github.com/dtm-labs/rockscache"
"github.com/openimsdk/open-im-server/v3/pkg/common/db/s3"
relationtb "github.com/openimsdk/open-im-server/v3/pkg/common/db/table/relation"
"github.com/redis/go-redis/v9"
"strconv"
"time"
)
type ObjectCache interface {
metaCache
GetName(ctx context.Context, name string) (*relationtb.ObjectModel, error)
DelObjectName(names ...string) ObjectCache
}
func NewObjectCacheRedis(rdb redis.UniversalClient, objDB relationtb.ObjectInfoModelInterface) ObjectCache {
rcClient := rockscache.NewClient(rdb, rockscache.NewDefaultOptions())
return &objectCacheRedis{
rcClient: rcClient,
expireTime: time.Hour * 12,
objDB: objDB,
metaCache: NewMetaCacheRedis(rcClient),
}
}
type objectCacheRedis struct {
metaCache
objDB relationtb.ObjectInfoModelInterface
rcClient *rockscache.Client
expireTime time.Duration
}
func (g *objectCacheRedis) NewCache() ObjectCache {
return &objectCacheRedis{
rcClient: g.rcClient,
expireTime: g.expireTime,
objDB: g.objDB,
metaCache: NewMetaCacheRedis(g.rcClient, g.metaCache.GetPreDelKeys()...),
}
}
func (g *objectCacheRedis) DelObjectName(names ...string) ObjectCache {
objectCache := g.NewCache()
keys := make([]string, 0, len(names))
for _, name := range names {
keys = append(keys, g.getObjectKey(name))
}
objectCache.AddKeys(keys...)
return objectCache
}
func (g *objectCacheRedis) getObjectKey(name string) string {
return "OBJECT:" + name
}
func (g *objectCacheRedis) GetName(ctx context.Context, name string) (*relationtb.ObjectModel, error) {
return getCache(ctx, g.rcClient, g.getObjectKey(name), g.expireTime, func(ctx context.Context) (*relationtb.ObjectModel, error) {
return g.objDB.Take(ctx, name)
})
}
type S3Cache interface {
metaCache
GetKey(ctx context.Context, engine string, key string) (*s3.ObjectInfo, error)
DelS3Key(engine string, keys ...string) S3Cache
}
func NewS3Cache(rdb redis.UniversalClient, s3 s3.Interface) S3Cache {
rcClient := rockscache.NewClient(rdb, rockscache.NewDefaultOptions())
return &s3CacheRedis{
rcClient: rcClient,
expireTime: time.Hour * 12,
s3: s3,
metaCache: NewMetaCacheRedis(rcClient),
}
}
type s3CacheRedis struct {
metaCache
s3 s3.Interface
rcClient *rockscache.Client
expireTime time.Duration
}
func (g *s3CacheRedis) NewCache() S3Cache {
return &s3CacheRedis{
rcClient: g.rcClient,
expireTime: g.expireTime,
s3: g.s3,
metaCache: NewMetaCacheRedis(g.rcClient, g.metaCache.GetPreDelKeys()...),
}
}
func (g *s3CacheRedis) DelS3Key(engine string, keys ...string) S3Cache {
s3cache := g.NewCache()
ks := make([]string, 0, len(keys))
for _, key := range keys {
ks = append(ks, g.getS3Key(engine, key))
}
s3cache.AddKeys(ks...)
return s3cache
}
func (g *s3CacheRedis) getS3Key(engine string, name string) string {
return "S3:" + engine + ":" + name
}
func (g *s3CacheRedis) GetKey(ctx context.Context, engine string, name string) (*s3.ObjectInfo, error) {
return getCache(ctx, g.rcClient, g.getS3Key(engine, name), g.expireTime, func(ctx context.Context) (*s3.ObjectInfo, error) {
return g.s3.StatObject(ctx, name)
})
}
type MinioCache interface {
metaCache
GetImageObjectKeyInfo(ctx context.Context, key string, fn func(ctx context.Context) (*MinioImageInfo, error)) (*MinioImageInfo, error)
GetThumbnailKey(ctx context.Context, key string, format string, width int, height int, minioCache func(ctx context.Context) (string, error)) (string, error)
DelObjectImageInfoKey(keys ...string) MinioCache
DelImageThumbnailKey(key string, format string, width int, height int) MinioCache
}
func NewMinioCache(rdb redis.UniversalClient) MinioCache {
rcClient := rockscache.NewClient(rdb, rockscache.NewDefaultOptions())
return &minioCacheRedis{
rcClient: rcClient,
expireTime: time.Hour * 24 * 7,
metaCache: NewMetaCacheRedis(rcClient),
}
}
type minioCacheRedis struct {
metaCache
rcClient *rockscache.Client
expireTime time.Duration
}
func (g *minioCacheRedis) NewCache() MinioCache {
return &minioCacheRedis{
rcClient: g.rcClient,
expireTime: g.expireTime,
metaCache: NewMetaCacheRedis(g.rcClient, g.metaCache.GetPreDelKeys()...),
}
}
func (g *minioCacheRedis) DelObjectImageInfoKey(keys ...string) MinioCache {
s3cache := g.NewCache()
ks := make([]string, 0, len(keys))
for _, key := range keys {
ks = append(ks, g.getObjectImageInfoKey(key))
}
s3cache.AddKeys(ks...)
return s3cache
}
func (g *minioCacheRedis) DelImageThumbnailKey(key string, format string, width int, height int) MinioCache {
s3cache := g.NewCache()
s3cache.AddKeys(g.getMinioImageThumbnailKey(key, format, width, height))
return s3cache
}
func (g *minioCacheRedis) getObjectImageInfoKey(key string) string {
return "MINIO:IMAGE:" + key
}
func (g *minioCacheRedis) getMinioImageThumbnailKey(key string, format string, width int, height int) string {
return "MINIO:THUMBNAIL:" + format + ":w" + strconv.Itoa(width) + ":h" + strconv.Itoa(height) + ":" + key
}
func (g *minioCacheRedis) GetImageObjectKeyInfo(ctx context.Context, key string, fn func(ctx context.Context) (*MinioImageInfo, error)) (*MinioImageInfo, error) {
info, err := getCache(ctx, g.rcClient, g.getObjectImageInfoKey(key), g.expireTime, fn)
if err != nil {
return nil, err
}
return info, nil
}
func (g *minioCacheRedis) GetThumbnailKey(ctx context.Context, key string, format string, width int, height int, minioCache func(ctx context.Context) (string, error)) (string, error) {
return getCache(ctx, g.rcClient, g.getMinioImageThumbnailKey(key, format, width, height), g.expireTime, minioCache)
}
type MinioImageInfo struct {
IsImg bool `json:"isImg"`
Width int `json:"width"`
Height int `json:"height"`
Format string `json:"format"`
Etag string `json:"etag"`
}

@ -50,6 +50,8 @@ type ConversationDatabase interface {
GetConversationIDs(ctx context.Context, userID string) ([]string, error)
GetUserConversationIDsHash(ctx context.Context, ownerUserID string) (hash uint64, err error)
GetAllConversationIDs(ctx context.Context) ([]string, error)
GetAllConversationIDsNumber(ctx context.Context) (int64, error)
PageConversationIDs(ctx context.Context, pageNumber, showNumber int32) (conversationIDs []string, err error)
//GetUserAllHasReadSeqs(ctx context.Context, ownerUserID string) (map[string]int64, error)
GetConversationsByConversationID(ctx context.Context, conversationIDs []string) ([]*relationtb.ConversationModel, error)
GetConversationIDsNeedDestruct(ctx context.Context) ([]*relationtb.ConversationModel, error)
@ -72,6 +74,9 @@ type conversationDatabase struct {
func (c *conversationDatabase) SetUsersConversationFiledTx(ctx context.Context, userIDs []string, conversation *relationtb.ConversationModel, filedMap map[string]interface{}) (err error) {
cache := c.cache.NewCache()
if conversation.GroupID != "" {
cache = cache.DelSuperGroupRecvMsgNotNotifyUserIDs(conversation.GroupID).DelSuperGroupRecvMsgNotNotifyUserIDsHash(conversation.GroupID)
}
if err := c.tx.Transaction(func(tx any) error {
conversationTx := c.conversationDB.NewTx(tx)
haveUserIDs, err := conversationTx.FindUserID(ctx, userIDs, []string{conversation.ConversationID})
@ -199,6 +204,13 @@ func (c *conversationDatabase) GetUserAllConversation(ctx context.Context, owner
func (c *conversationDatabase) SetUserConversations(ctx context.Context, ownerUserID string, conversations []*relationtb.ConversationModel) error {
cache := c.cache.NewCache()
groupIDs := utils.Distinct(utils.Filter(conversations, func(e *relationtb.ConversationModel) (string, bool) {
return e.GroupID, e.GroupID != ""
}))
for _, groupID := range groupIDs {
cache = cache.DelSuperGroupRecvMsgNotNotifyUserIDs(groupID).DelSuperGroupRecvMsgNotNotifyUserIDsHash(groupID)
}
if err := c.tx.Transaction(func(tx any) error {
var conversationIDs []string
for _, conversation := range conversations {
@ -295,6 +307,14 @@ func (c *conversationDatabase) GetAllConversationIDs(ctx context.Context) ([]str
return c.conversationDB.GetAllConversationIDs(ctx)
}
func (c *conversationDatabase) GetAllConversationIDsNumber(ctx context.Context) (int64, error) {
return c.conversationDB.GetAllConversationIDsNumber(ctx)
}
func (c *conversationDatabase) PageConversationIDs(ctx context.Context, pageNumber, showNumber int32) ([]string, error) {
return c.conversationDB.PageConversationIDs(ctx, pageNumber, showNumber)
}
//func (c *conversationDatabase) GetUserAllHasReadSeqs(ctx context.Context, ownerUserID string) (map[string]int64, error) {
// return c.cache.GetUserAllHasReadSeqs(ctx, ownerUserID)
//}

@ -17,6 +17,7 @@ package controller
import (
"context"
"errors"
"github.com/openimsdk/open-im-server/v3/pkg/common/prom_metrics"
"time"
"github.com/redis/go-redis/v9"
@ -30,8 +31,6 @@ import (
unrelationtb "github.com/openimsdk/open-im-server/v3/pkg/common/db/table/unrelation"
"github.com/openimsdk/open-im-server/v3/pkg/common/db/unrelation"
"github.com/openimsdk/open-im-server/v3/pkg/common/kafka"
"github.com/openimsdk/open-im-server/v3/pkg/common/prome"
"go.mongodb.org/mongo-driver/mongo"
pbmsg "github.com/OpenIMSDK/protocol/msg"
@ -355,10 +354,9 @@ func (db *commonMsgDatabase) DelUserDeleteMsgsList(ctx context.Context, conversa
func (db *commonMsgDatabase) BatchInsertChat2Cache(ctx context.Context, conversationID string, msgs []*sdkws.MsgData) (seq int64, isNew bool, err error) {
currentMaxSeq, err := db.cache.GetMaxSeq(ctx, conversationID)
if err != nil && errs.Unwrap(err) != redis.Nil {
prome.Inc(prome.SeqGetFailedCounter)
log.ZError(ctx, "db.cache.GetMaxSeq", err)
return 0, false, err
}
prome.Inc(prome.SeqGetSuccessCounter)
lenList := len(msgs)
if int64(lenList) > db.msg.GetSingleGocMsgNum() {
return 0, false, errors.New("too large")
@ -378,23 +376,20 @@ func (db *commonMsgDatabase) BatchInsertChat2Cache(ctx context.Context, conversa
}
failedNum, err := db.cache.SetMessageToCache(ctx, conversationID, msgs)
if err != nil {
prome.Add(prome.MsgInsertRedisFailedCounter, failedNum)
prom_metrics.MsgInsertRedisFailedCounter.Add(float64(failedNum))
log.ZError(ctx, "setMessageToCache error", err, "len", len(msgs), "conversationID", conversationID)
} else {
prome.Inc(prome.MsgInsertRedisSuccessCounter)
prom_metrics.MsgInsertRedisSuccessCounter.Inc()
}
err = db.cache.SetMaxSeq(ctx, conversationID, currentMaxSeq)
if err != nil {
prome.Inc(prome.SeqSetFailedCounter)
} else {
prome.Inc(prome.SeqSetSuccessCounter)
log.ZError(ctx, "db.cache.SetMaxSeq error", err, "conversationID", conversationID)
prom_metrics.SeqSetFailedCounter.Inc()
}
err2 := db.cache.SetHasReadSeqs(ctx, conversationID, userSeqMap)
if err != nil {
log.ZError(ctx, "SetHasReadSeqs error", err2, "userSeqMap", userSeqMap, "conversationID", conversationID)
prome.Inc(prome.SeqSetFailedCounter)
} else {
prome.Inc(prome.SeqSetSuccessCounter)
prom_metrics.SeqSetFailedCounter.Inc()
}
return lastMaxSeq, isNew, utils.Wrap(err, "")
}
@ -441,6 +436,25 @@ func (db *commonMsgDatabase) getMsgBySeqsRange(ctx context.Context, userID strin
return seqMsgs, nil
}
// GetMsgBySeqsRange In the context of group chat, we have the following parameters:
//
// "maxSeq" of a conversation: It represents the maximum value of messages in the group conversation.
// "minSeq" of a conversation (default: 1): It represents the minimum value of messages in the group conversation.
//
// For a user's perspective regarding the group conversation, we have the following parameters:
//
// "userMaxSeq": It represents the user's upper limit for message retrieval in the group. If not set (default: 0),
// it means the upper limit is the same as the conversation's "maxSeq".
// "userMinSeq": It represents the user's starting point for message retrieval in the group. If not set (default: 0),
// it means the starting point is the same as the conversation's "minSeq".
//
// The scenarios for these parameters are as follows:
//
// For users who have been kicked out of the group, "userMaxSeq" can be set as the maximum value they had before
// being kicked out. This limits their ability to retrieve messages up to a certain point.
// For new users joining the group, if they don't need to receive old messages,
// "userMinSeq" can be set as the same value as the conversation's "maxSeq" at the moment they join the group.
// This ensures that their message retrieval starts from the point they joined.
func (db *commonMsgDatabase) GetMsgBySeqsRange(ctx context.Context, userID string, conversationID string, begin, end, num, userMaxSeq int64) (int64, int64, []*sdkws.MsgData, error) {
userMinSeq, err := db.cache.GetConversationUserMinSeq(ctx, conversationID, userID)
if err != nil && errs.Unwrap(err) != redis.Nil {
@ -453,6 +467,7 @@ func (db *commonMsgDatabase) GetMsgBySeqsRange(ctx context.Context, userID strin
if userMinSeq > minSeq {
minSeq = userMinSeq
}
//"minSeq" represents the startSeq value that the user can retrieve.
if minSeq > end {
log.ZInfo(ctx, "minSeq > end", "minSeq", minSeq, "end", end)
return 0, 0, nil, nil
@ -467,23 +482,41 @@ func (db *commonMsgDatabase) GetMsgBySeqsRange(ctx context.Context, userID strin
maxSeq = userMaxSeq
}
}
//"maxSeq" represents the endSeq value that the user can retrieve.
if begin < minSeq {
begin = minSeq
}
if end > maxSeq {
end = maxSeq
}
//"begin" and "end" represent the actual startSeq and endSeq values that the user can retrieve.
if end < begin {
return 0, 0, nil, errs.ErrArgs.Wrap("seq end < begin")
}
var seqs []int64
for i := end; i > end-num; i-- {
if i >= begin {
seqs = append([]int64{i}, seqs...)
} else {
break
if end-begin+1 <= num {
for i := begin; i <= end; i++ {
seqs = append(seqs, i)
}
}
} else {
for i := end - num + 1; i <= end; i++ {
seqs = append(seqs, i)
}
}
//167 178 10
//if end-num < {
//
//}
//var seqs []int64
//for i := end; i > end-num; i-- {
// if i >= begin {
// seqs = append([]int64{i}, seqs...)
// } else {
// break
// }
//}
if len(seqs) == 0 {
return 0, 0, nil, nil
}
@ -493,7 +526,7 @@ func (db *commonMsgDatabase) GetMsgBySeqsRange(ctx context.Context, userID strin
cachedMsgs, failedSeqs, err := db.cache.GetMessagesBySeq(ctx, conversationID, seqs)
if err != nil {
if err != redis.Nil {
prome.Add(prome.MsgPullFromRedisFailedCounter, len(failedSeqs))
log.ZError(ctx, "get message from redis exception", err, "conversationID", conversationID, "seqs", seqs)
}
}
@ -530,7 +563,7 @@ func (db *commonMsgDatabase) GetMsgBySeqsRange(ctx context.Context, userID strin
cachedMsgs, failedSeqs2, err := db.cache.GetMessagesBySeq(ctx, conversationID, reGetSeqsCache)
if err != nil {
if err != redis.Nil {
prome.Add(prome.MsgPullFromRedisFailedCounter, len(failedSeqs2))
log.ZError(ctx, "get message from redis exception", err, "conversationID", conversationID, "seqs", reGetSeqsCache)
}
}
@ -543,15 +576,14 @@ func (db *commonMsgDatabase) GetMsgBySeqsRange(ctx context.Context, userID strin
log.ZDebug(ctx, "msgs not exist in redis", "seqs", failedSeqs)
}
// get from cache or db
prome.Add(prome.MsgPullFromRedisSuccessCounter, len(successMsgs))
if len(failedSeqs) > 0 {
mongoMsgs, err := db.getMsgBySeqsRange(ctx, userID, conversationID, failedSeqs, begin, end)
if err != nil {
prome.Add(prome.MsgPullFromMongoFailedCounter, len(failedSeqs))
return 0, 0, nil, err
}
prome.Add(prome.MsgPullFromMongoSuccessCounter, len(mongoMsgs))
successMsgs = append(successMsgs, mongoMsgs...)
successMsgs = append(mongoMsgs, successMsgs...)
}
return minSeq, maxSeq, successMsgs, nil
@ -582,7 +614,6 @@ func (db *commonMsgDatabase) GetMsgBySeqs(ctx context.Context, userID string, co
successMsgs, failedSeqs, err := db.cache.GetMessagesBySeq(ctx, conversationID, newSeqs)
if err != nil {
if err != redis.Nil {
prome.Add(prome.MsgPullFromRedisFailedCounter, len(failedSeqs))
log.ZError(ctx, "get message from redis exception", err, "failedSeqs", failedSeqs, "conversationID", conversationID)
}
}
@ -602,14 +633,14 @@ func (db *commonMsgDatabase) GetMsgBySeqs(ctx context.Context, userID string, co
"conversationID",
conversationID,
)
prome.Add(prome.MsgPullFromRedisSuccessCounter, len(successMsgs))
if len(failedSeqs) > 0 {
mongoMsgs, err := db.getMsgBySeqs(ctx, userID, conversationID, failedSeqs)
if err != nil {
prome.Add(prome.MsgPullFromMongoFailedCounter, len(failedSeqs))
return 0, 0, nil, err
}
prome.Add(prome.MsgPullFromMongoSuccessCounter, len(mongoMsgs))
successMsgs = append(successMsgs, mongoMsgs...)
}
return minSeq, maxSeq, successMsgs, nil

@ -16,12 +16,13 @@ package controller
import (
"context"
"path/filepath"
"time"
"github.com/openimsdk/open-im-server/v3/pkg/common/db/cache"
"github.com/openimsdk/open-im-server/v3/pkg/common/db/s3"
"github.com/openimsdk/open-im-server/v3/pkg/common/db/s3/cont"
"github.com/openimsdk/open-im-server/v3/pkg/common/db/table/relation"
"github.com/redis/go-redis/v9"
"path/filepath"
"time"
)
type S3Database interface {
@ -34,16 +35,18 @@ type S3Database interface {
SetObject(ctx context.Context, info *relation.ObjectModel) error
}
func NewS3Database(s3 s3.Interface, obj relation.ObjectInfoModelInterface) S3Database {
func NewS3Database(rdb redis.UniversalClient, s3 s3.Interface, obj relation.ObjectInfoModelInterface) S3Database {
return &s3Database{
s3: cont.New(s3),
obj: obj,
s3: cont.New(cache.NewS3Cache(rdb, s3), s3),
cache: cache.NewObjectCacheRedis(rdb, obj),
db: obj,
}
}
type s3Database struct {
s3 *cont.Controller
obj relation.ObjectInfoModelInterface
s3 *cont.Controller
cache cache.ObjectCache
db relation.ObjectInfoModelInterface
}
func (s *s3Database) PartSize(ctx context.Context, size int64) (int64, error) {
@ -67,11 +70,14 @@ func (s *s3Database) CompleteMultipartUpload(ctx context.Context, uploadID strin
}
func (s *s3Database) SetObject(ctx context.Context, info *relation.ObjectModel) error {
return s.obj.SetObject(ctx, info)
if err := s.db.SetObject(ctx, info); err != nil {
return err
}
return s.cache.DelObjectName(info.Name).ExecDel(ctx)
}
func (s *s3Database) AccessURL(ctx context.Context, name string, expire time.Duration, opt *s3.AccessURLOption) (time.Time, string, error) {
obj, err := s.obj.Take(ctx, name)
obj, err := s.cache.GetName(ctx, name)
if err != nil {
return time.Time{}, "", err
}

@ -16,9 +16,7 @@ package relation
import (
"context"
"github.com/OpenIMSDK/tools/errs"
"gorm.io/gorm"
"github.com/OpenIMSDK/protocol/constant"
@ -145,7 +143,7 @@ func (c *ConversationGorm) FindRecvMsgNotNotifyUserIDs(
return userIDs, utils.Wrap(
c.db(ctx).
Where("group_id = ? and recv_msg_opt = ?", groupID, constant.ReceiveNotNotifyMessage).
Pluck("user_id", &userIDs).
Pluck("owner_user_id", &userIDs).
Error,
"",
)
@ -158,7 +156,7 @@ func (c *ConversationGorm) FindSuperGroupRecvMsgNotNotifyUserIDs(
return userIDs, utils.Wrap(
c.db(ctx).
Where("group_id = ? and recv_msg_opt = ? and conversation_type = ?", groupID, constant.ReceiveNotNotifyMessage, constant.SuperGroupChatType).
Pluck("user_id", &userIDs).
Pluck("owner_user_id", &userIDs).
Error,
"",
)
@ -188,6 +186,18 @@ func (c *ConversationGorm) GetAllConversationIDs(ctx context.Context) (conversat
)
}
func (c *ConversationGorm) GetAllConversationIDsNumber(ctx context.Context) (int64, error) {
var num int64
err := c.db(ctx).Select("COUNT(DISTINCT conversation_id)").Model(&relation.ConversationModel{}).Count(&num).Error
return num, errs.Wrap(err)
}
func (c *ConversationGorm) PageConversationIDs(ctx context.Context, pageNumber, showNumber int32) (conversationIDs []string, err error) {
err = c.db(ctx).Distinct("conversation_id").Limit(int(showNumber)).Offset(int((pageNumber-1)*showNumber)).Pluck("conversation_id", &conversationIDs).Error
err = errs.Wrap(err)
return
}
func (c *ConversationGorm) GetUserAllHasReadSeqs(
ctx context.Context,
ownerUserID string,

@ -22,10 +22,11 @@ import (
"github.com/OpenIMSDK/tools/log"
"github.com/OpenIMSDK/tools/mw/specialerror"
mysqldriver "github.com/go-sql-driver/mysql"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"gorm.io/driver/mysql"
"gorm.io/gorm"
"gorm.io/gorm/logger"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
)
const (

@ -20,6 +20,7 @@ import (
"encoding/hex"
"errors"
"fmt"
"github.com/openimsdk/open-im-server/v3/pkg/common/db/cache"
"path"
"strings"
"time"
@ -32,12 +33,16 @@ import (
"github.com/openimsdk/open-im-server/v3/pkg/common/db/s3"
)
func New(impl s3.Interface) *Controller {
return &Controller{impl: impl}
func New(cache cache.S3Cache, impl s3.Interface) *Controller {
return &Controller{
cache: cache,
impl: impl,
}
}
type Controller struct {
impl s3.Interface
cache cache.S3Cache
impl s3.Interface
}
func (c *Controller) HashPath(md5 string) string {
@ -69,8 +74,12 @@ func (c *Controller) PartLimit() *s3.PartLimit {
return c.impl.PartLimit()
}
func (c *Controller) StatObject(ctx context.Context, name string) (*s3.ObjectInfo, error) {
return c.cache.GetKey(ctx, c.impl.Engine(), name)
}
func (c *Controller) GetHashObject(ctx context.Context, hash string) (*s3.ObjectInfo, error) {
return c.impl.StatObject(ctx, c.HashPath(hash))
return c.StatObject(ctx, c.HashPath(hash))
}
func (c *Controller) InitiateUpload(ctx context.Context, hash string, size int64, expire time.Duration, maxParts int) (*InitiateUploadResult, error) {
@ -94,7 +103,7 @@ func (c *Controller) InitiateUpload(ctx context.Context, hash string, size int64
if maxParts > 0 && partNumber > 0 && partNumber < maxParts {
return nil, errors.New(fmt.Sprintf("too many parts: %d", partNumber))
}
if info, err := c.impl.StatObject(ctx, c.HashPath(hash)); err == nil {
if info, err := c.StatObject(ctx, c.HashPath(hash)); err == nil {
return nil, &HashAlreadyExistsError{Object: info}
} else if !c.impl.IsNotFound(err) {
return nil, err
@ -135,7 +144,7 @@ func (c *Controller) InitiateUpload(ctx context.Context, hash string, size int64
}
var authSign *s3.AuthSignResult
if maxParts > 0 {
partNumbers := make([]int, partNumber)
partNumbers := make([]int, maxParts)
for i := 0; i < maxParts; i++ {
partNumbers[i] = i + 1
}
@ -168,13 +177,13 @@ func (c *Controller) CompleteUpload(ctx context.Context, uploadID string, partHa
fmt.Println("CompleteUpload sum:", hex.EncodeToString(md5Sum[:]), "upload hash:", upload.Hash)
return nil, errors.New("md5 mismatching")
}
if info, err := c.impl.StatObject(ctx, c.HashPath(upload.Hash)); err == nil {
if info, err := c.StatObject(ctx, c.HashPath(upload.Hash)); err == nil {
return &UploadResult{
Key: info.Key,
Size: info.Size,
Hash: info.ETag,
}, nil
} else if !c.impl.IsNotFound(err) {
} else if !c.IsNotFound(err) {
return nil, err
}
cleanObject := make(map[string]struct{})
@ -200,7 +209,7 @@ func (c *Controller) CompleteUpload(ctx context.Context, uploadID string, partHa
}
targetKey = result.Key
case UploadTypePresigned:
uploadInfo, err := c.impl.StatObject(ctx, upload.Key)
uploadInfo, err := c.StatObject(ctx, upload.Key)
if err != nil {
return nil, err
}
@ -230,6 +239,9 @@ func (c *Controller) CompleteUpload(ctx context.Context, uploadID string, partHa
default:
return nil, errors.New("invalid upload id type")
}
if err := c.cache.DelS3Key(c.impl.Engine(), targetKey).ExecDel(ctx); err != nil {
return nil, err
}
return &UploadResult{
Key: targetKey,
Size: upload.Size,
@ -253,7 +265,7 @@ func (c *Controller) AuthSign(ctx context.Context, uploadID string, partNumbers
}
func (c *Controller) IsNotFound(err error) bool {
return c.impl.IsNotFound(err)
return c.impl.IsNotFound(err) || errs.ErrRecordNotFound.Is(err)
}
func (c *Controller) AccessURL(ctx context.Context, name string, expire time.Duration, opt *s3.AccessURLOption) (string, error) {

@ -15,20 +15,14 @@
package minio
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"image"
"image/gif"
"image/jpeg"
"image/png"
"github.com/openimsdk/open-im-server/v3/pkg/common/db/cache"
"io"
"net/http"
"net/url"
"path"
"path/filepath"
"reflect"
"strconv"
"strings"
@ -56,13 +50,13 @@ const (
)
const (
maxImageWidth = 1024
maxImageHeight = 1024
maxImageSize = 1024 * 1024 * 50
pathInfo = "openim/thumbnail"
maxImageWidth = 1024
maxImageHeight = 1024
maxImageSize = 1024 * 1024 * 50
imageThumbnailPath = "openim/thumbnail"
)
func NewMinio() (s3.Interface, error) {
func NewMinio(cache cache.MinioCache) (s3.Interface, error) {
u, err := url.Parse(config.Config.Object.Minio.Endpoint)
if err != nil {
return nil, err
@ -80,6 +74,7 @@ func NewMinio() (s3.Interface, error) {
core: &minio.Core{Client: client},
lock: &sync.Mutex{},
init: false,
cache: cache,
}
if config.Config.Object.Minio.SignEndpoint == "" || config.Config.Object.Minio.SignEndpoint == config.Config.Object.Minio.Endpoint {
m.opts = opts
@ -124,6 +119,7 @@ type Minio struct {
lock sync.Locker
init bool
prefix string
cache cache.MinioCache
}
func (m *Minio) initMinio(ctx context.Context) error {
@ -227,6 +223,7 @@ func (m *Minio) CompleteMultipartUpload(ctx context.Context, uploadID string, na
if err != nil {
return nil, err
}
m.delObjectImageInfoKey(ctx, name, upload.Size)
return &s3.CompleteMultipartUploadResult{
Location: upload.Location,
Bucket: upload.Bucket,
@ -389,7 +386,7 @@ func (m *Minio) ListUploadedParts(ctx context.Context, uploadID string, name str
return res, nil
}
func (m *Minio) presignedGetObject(ctx context.Context, name string, expire time.Duration, query url.Values) (string, error) {
func (m *Minio) PresignedGetObject(ctx context.Context, name string, expire time.Duration, query url.Values) (string, error) {
if expire <= 0 {
expire = time.Hour * 24 * 365 * 99 // 99 years
} else if expire < time.Second {
@ -427,109 +424,9 @@ func (m *Minio) AccessURL(ctx context.Context, name string, expire time.Duration
}
}
if opt.Image == nil || (opt.Image.Width < 0 && opt.Image.Height < 0 && opt.Image.Format == "") || (opt.Image.Width > maxImageWidth || opt.Image.Height > maxImageHeight) {
return m.presignedGetObject(ctx, name, expire, reqParams)
return m.PresignedGetObject(ctx, name, expire, reqParams)
}
fileInfo, err := m.StatObject(ctx, name)
if err != nil {
return "", err
}
if fileInfo.Size > maxImageSize {
return "", errors.New("file size too large")
}
objectInfoPath := path.Join(pathInfo, fileInfo.ETag, "image.json")
var (
img image.Image
info minioImageInfo
)
data, err := m.getObjectData(ctx, objectInfoPath, 1024)
if err == nil {
if err := json.Unmarshal(data, &info); err != nil {
return "", fmt.Errorf("unmarshal minio image info.json error: %w", err)
}
if info.NotImage {
return "", errors.New("not image")
}
} else if m.IsNotFound(err) {
reader, err := m.core.Client.GetObject(ctx, m.bucket, name, minio.GetObjectOptions{})
if err != nil {
return "", err
}
defer reader.Close()
imageInfo, format, err := ImageStat(reader)
if err == nil {
info.NotImage = false
info.Format = format
info.Width, info.Height = ImageWidthHeight(imageInfo)
img = imageInfo
} else {
info.NotImage = true
}
data, err := json.Marshal(&info)
if err != nil {
return "", err
}
if _, err := m.core.Client.PutObject(ctx, m.bucket, objectInfoPath, bytes.NewReader(data), int64(len(data)), minio.PutObjectOptions{}); err != nil {
return "", err
}
} else {
return "", err
}
if opt.Image.Width > info.Width || opt.Image.Width <= 0 {
opt.Image.Width = info.Width
}
if opt.Image.Height > info.Height || opt.Image.Height <= 0 {
opt.Image.Height = info.Height
}
opt.Image.Format = strings.ToLower(opt.Image.Format)
if opt.Image.Format == formatJpg {
opt.Image.Format = formatJpeg
}
switch opt.Image.Format {
case formatPng:
case formatJpeg:
case formatGif:
default:
if info.Format == formatGif {
opt.Image.Format = formatGif
} else {
opt.Image.Format = formatJpeg
}
}
reqParams.Set("response-content-type", "image/"+opt.Image.Format)
if opt.Image.Width == info.Width && opt.Image.Height == info.Height && opt.Image.Format == info.Format {
return m.presignedGetObject(ctx, name, expire, reqParams)
}
cacheKey := filepath.Join(pathInfo, fileInfo.ETag, fmt.Sprintf("image_w%d_h%d.%s", opt.Image.Width, opt.Image.Height, opt.Image.Format))
if _, err := m.core.Client.StatObject(ctx, m.bucket, cacheKey, minio.StatObjectOptions{}); err == nil {
return m.presignedGetObject(ctx, cacheKey, expire, reqParams)
} else if !m.IsNotFound(err) {
return "", err
}
if img == nil {
reader, err := m.core.Client.GetObject(ctx, m.bucket, name, minio.GetObjectOptions{})
if err != nil {
return "", err
}
defer reader.Close()
img, _, err = ImageStat(reader)
if err != nil {
return "", err
}
}
thumbnail := resizeImage(img, opt.Image.Width, opt.Image.Height)
buf := bytes.NewBuffer(nil)
switch opt.Image.Format {
case formatPng:
err = png.Encode(buf, thumbnail)
case formatJpeg:
err = jpeg.Encode(buf, thumbnail, nil)
case formatGif:
err = gif.Encode(buf, thumbnail, nil)
}
if _, err := m.core.Client.PutObject(ctx, m.bucket, cacheKey, buf, int64(buf.Len()), minio.PutObjectOptions{}); err != nil {
return "", err
}
return m.presignedGetObject(ctx, cacheKey, expire, reqParams)
return m.getImageThumbnailURL(ctx, name, expire, opt.Image)
}
func (m *Minio) getObjectData(ctx context.Context, name string, limit int64) ([]byte, error) {
@ -541,5 +438,5 @@ func (m *Minio) getObjectData(ctx context.Context, name string, limit int64) ([]
if limit < 0 {
return io.ReadAll(object)
}
return io.ReadAll(io.LimitReader(object, 1024))
return io.ReadAll(io.LimitReader(object, limit))
}

@ -1,22 +0,0 @@
// Copyright © 2023 OpenIM. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package minio
type minioImageInfo struct {
NotImage bool `json:"notImage,omitempty"`
Width int `json:"width,omitempty"`
Height int `json:"height,omitempty"`
Format string `json:"format,omitempty"`
}

@ -0,0 +1,134 @@
package minio
import (
"bytes"
"context"
"errors"
"fmt"
"github.com/OpenIMSDK/tools/errs"
"github.com/OpenIMSDK/tools/log"
"github.com/minio/minio-go/v7"
"github.com/openimsdk/open-im-server/v3/pkg/common/db/cache"
"github.com/openimsdk/open-im-server/v3/pkg/common/db/s3"
"image"
"image/gif"
"image/jpeg"
"image/png"
"net/url"
"path/filepath"
"strings"
"time"
)
func (m *Minio) getImageThumbnailURL(ctx context.Context, name string, expire time.Duration, opt *s3.Image) (string, error) {
var img image.Image
info, err := m.cache.GetImageObjectKeyInfo(ctx, name, func(ctx context.Context) (info *cache.MinioImageInfo, err error) {
info, img, err = m.getObjectImageInfo(ctx, name)
return
})
if err != nil {
return "", err
}
if !info.IsImg {
return "", errs.ErrData.Wrap("object not image")
}
if opt.Width > info.Width || opt.Width <= 0 {
opt.Width = info.Width
}
if opt.Height > info.Height || opt.Height <= 0 {
opt.Height = info.Height
}
opt.Format = strings.ToLower(opt.Format)
if opt.Format == formatJpg {
opt.Format = formatJpeg
}
switch opt.Format {
case formatPng, formatJpeg, formatGif:
default:
opt.Format = ""
}
reqParams := make(url.Values)
if opt.Width == info.Width && opt.Height == info.Height && (opt.Format == info.Format || opt.Format == "") {
reqParams.Set("response-content-type", "image/"+info.Format)
return m.PresignedGetObject(ctx, name, expire, reqParams)
}
if opt.Format == "" {
switch opt.Format {
case formatGif:
opt.Format = formatGif
case formatJpeg:
opt.Format = formatJpeg
case formatPng:
opt.Format = formatPng
default:
opt.Format = formatPng
}
}
key, err := m.cache.GetThumbnailKey(ctx, name, opt.Format, opt.Width, opt.Height, func(ctx context.Context) (string, error) {
if img == nil {
reader, err := m.core.Client.GetObject(ctx, m.bucket, name, minio.GetObjectOptions{})
if err != nil {
return "", err
}
defer reader.Close()
img, _, err = ImageStat(reader)
if err != nil {
return "", err
}
}
thumbnail := resizeImage(img, opt.Width, opt.Height)
buf := bytes.NewBuffer(nil)
switch opt.Format {
case formatPng:
err = png.Encode(buf, thumbnail)
case formatJpeg:
err = jpeg.Encode(buf, thumbnail, nil)
case formatGif:
err = gif.Encode(buf, thumbnail, nil)
}
cacheKey := filepath.Join(imageThumbnailPath, info.Etag, fmt.Sprintf("image_w%d_h%d.%s", opt.Width, opt.Height, opt.Format))
if _, err := m.core.Client.PutObject(ctx, m.bucket, cacheKey, buf, int64(buf.Len()), minio.PutObjectOptions{}); err != nil {
return "", err
}
return cacheKey, nil
})
if err != nil {
return "", err
}
reqParams.Set("response-content-type", "image/"+opt.Format)
return m.PresignedGetObject(ctx, key, expire, reqParams)
}
func (m *Minio) getObjectImageInfo(ctx context.Context, name string) (*cache.MinioImageInfo, image.Image, error) {
fileInfo, err := m.StatObject(ctx, name)
if err != nil {
return nil, nil, err
}
if fileInfo.Size > maxImageSize {
return nil, nil, errors.New("file size too large")
}
imageData, err := m.getObjectData(ctx, name, fileInfo.Size)
if err != nil {
return nil, nil, err
}
var info cache.MinioImageInfo
imageInfo, format, err := ImageStat(bytes.NewReader(imageData))
if err == nil {
info.IsImg = true
info.Format = format
info.Width, info.Height = ImageWidthHeight(imageInfo)
} else {
info.IsImg = false
}
info.Etag = fileInfo.ETag
return &info, imageInfo, nil
}
func (m *Minio) delObjectImageInfoKey(ctx context.Context, key string, size int64) {
if size > 0 && size > maxImageSize {
return
}
if err := m.cache.DelObjectImageInfoKey(key).ExecDel(ctx); err != nil {
log.ZError(ctx, "DelObjectImageInfoKey failed", err, "key", key)
}
}

@ -63,6 +63,8 @@ type ConversationModelInterface interface {
GetUserRecvMsgOpt(ctx context.Context, ownerUserID, conversationID string) (opt int, err error)
FindSuperGroupRecvMsgNotNotifyUserIDs(ctx context.Context, groupID string) ([]string, error)
GetAllConversationIDs(ctx context.Context) ([]string, error)
GetAllConversationIDsNumber(ctx context.Context) (int64, error)
PageConversationIDs(ctx context.Context, pageNumber, showNumber int32) (conversationIDs []string, err error)
GetUserAllHasReadSeqs(ctx context.Context, ownerUserID string) (hashReadSeqs map[string]int64, err error)
GetConversationsByConversationID(ctx context.Context, conversationIDs []string) ([]*ConversationModel, error)
GetConversationIDsNeedDestruct(ctx context.Context) ([]*ConversationModel, error)

@ -4,12 +4,14 @@ import (
"context"
"errors"
"fmt"
"time"
"github.com/OpenIMSDK/tools/discoveryregistry"
openkeeper "github.com/OpenIMSDK/tools/discoveryregistry/zookeeper"
"github.com/OpenIMSDK/tools/log"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"google.golang.org/grpc"
"time"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
)
func NewDiscoveryRegister(envType string) (discoveryregistry.SvcDiscoveryRegistry, error) {

@ -0,0 +1,417 @@
package ginPrometheus
import (
"bytes"
"fmt"
"io/ioutil"
"net/http"
"os"
"strconv"
"time"
"github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
var defaultMetricPath = "/metrics"
// counter, counter_vec, gauge, gauge_vec,
// histogram, histogram_vec, summary, summary_vec
var reqCnt = &Metric{
ID: "reqCnt",
Name: "requests_total",
Description: "How many HTTP requests processed, partitioned by status code and HTTP method.",
Type: "counter_vec",
Args: []string{"code", "method", "handler", "host", "url"}}
var reqDur = &Metric{
ID: "reqDur",
Name: "request_duration_seconds",
Description: "The HTTP request latencies in seconds.",
Type: "histogram_vec",
Args: []string{"code", "method", "url"},
}
var resSz = &Metric{
ID: "resSz",
Name: "response_size_bytes",
Description: "The HTTP response sizes in bytes.",
Type: "summary"}
var reqSz = &Metric{
ID: "reqSz",
Name: "request_size_bytes",
Description: "The HTTP request sizes in bytes.",
Type: "summary"}
var standardMetrics = []*Metric{
reqCnt,
reqDur,
resSz,
reqSz,
}
/*
RequestCounterURLLabelMappingFn is a function which can be supplied to the middleware to control
the cardinality of the request counter's "url" label, which might be required in some contexts.
For instance, if for a "/customer/:name" route you don't want to generate a time series for every
possible customer name, you could use this function:
func(c *gin.Context) string {
url := c.Request.URL.Path
for _, p := range c.Params {
if p.Key == "name" {
url = strings.Replace(url, p.Value, ":name", 1)
break
}
}
return url
}
which would map "/customer/alice" and "/customer/bob" to their template "/customer/:name".
*/
type RequestCounterURLLabelMappingFn func(c *gin.Context) string
// Metric is a definition for the name, description, type, ID, and
// prometheus.Collector type (i.e. CounterVec, Summary, etc) of each metric
type Metric struct {
MetricCollector prometheus.Collector
ID string
Name string
Description string
Type string
Args []string
}
// Prometheus contains the metrics gathered by the instance and its path
type Prometheus struct {
reqCnt *prometheus.CounterVec
reqDur *prometheus.HistogramVec
reqSz, resSz prometheus.Summary
router *gin.Engine
listenAddress string
Ppg PrometheusPushGateway
MetricsList []*Metric
MetricsPath string
ReqCntURLLabelMappingFn RequestCounterURLLabelMappingFn
// gin.Context string to use as a prometheus URL label
URLLabelFromContext string
}
// PrometheusPushGateway contains the configuration for pushing to a Prometheus pushgateway (optional)
type PrometheusPushGateway struct {
// Push interval in seconds
PushIntervalSeconds time.Duration
// Push Gateway URL in format http://domain:port
// where JOBNAME can be any string of your choice
PushGatewayURL string
// Local metrics URL where metrics are fetched from, this could be ommited in the future
// if implemented using prometheus common/expfmt instead
MetricsURL string
// pushgateway job name, defaults to "gin"
Job string
}
// NewPrometheus generates a new set of metrics with a certain subsystem name
func NewPrometheus(subsystem string, customMetricsList ...[]*Metric) *Prometheus {
subsystem = "app"
var metricsList []*Metric
if len(customMetricsList) > 1 {
panic("Too many args. NewPrometheus( string, <optional []*Metric> ).")
} else if len(customMetricsList) == 1 {
metricsList = customMetricsList[0]
}
for _, metric := range standardMetrics {
metricsList = append(metricsList, metric)
}
p := &Prometheus{
MetricsList: metricsList,
MetricsPath: defaultMetricPath,
ReqCntURLLabelMappingFn: func(c *gin.Context) string {
return c.Request.URL.Path
},
}
p.registerMetrics(subsystem)
return p
}
// SetPushGateway sends metrics to a remote pushgateway exposed on pushGatewayURL
// every pushIntervalSeconds. Metrics are fetched from metricsURL
func (p *Prometheus) SetPushGateway(pushGatewayURL, metricsURL string, pushIntervalSeconds time.Duration) {
p.Ppg.PushGatewayURL = pushGatewayURL
p.Ppg.MetricsURL = metricsURL
p.Ppg.PushIntervalSeconds = pushIntervalSeconds
p.startPushTicker()
}
// SetPushGatewayJob job name, defaults to "gin"
func (p *Prometheus) SetPushGatewayJob(j string) {
p.Ppg.Job = j
}
// SetListenAddress for exposing metrics on address. If not set, it will be exposed at the
// same address of the gin engine that is being used
func (p *Prometheus) SetListenAddress(address string) {
p.listenAddress = address
if p.listenAddress != "" {
p.router = gin.Default()
}
}
// SetListenAddressWithRouter for using a separate router to expose metrics. (this keeps things like GET /metrics out of
// your content's access log).
func (p *Prometheus) SetListenAddressWithRouter(listenAddress string, r *gin.Engine) {
p.listenAddress = listenAddress
if len(p.listenAddress) > 0 {
p.router = r
}
}
// SetMetricsPath set metrics paths
func (p *Prometheus) SetMetricsPath(e *gin.Engine) {
if p.listenAddress != "" {
p.router.GET(p.MetricsPath, prometheusHandler())
p.runServer()
} else {
e.GET(p.MetricsPath, prometheusHandler())
}
}
// SetMetricsPathWithAuth set metrics paths with authentication
func (p *Prometheus) SetMetricsPathWithAuth(e *gin.Engine, accounts gin.Accounts) {
if p.listenAddress != "" {
p.router.GET(p.MetricsPath, gin.BasicAuth(accounts), prometheusHandler())
p.runServer()
} else {
e.GET(p.MetricsPath, gin.BasicAuth(accounts), prometheusHandler())
}
}
func (p *Prometheus) runServer() {
if p.listenAddress != "" {
go p.router.Run(p.listenAddress)
}
}
func (p *Prometheus) getMetrics() []byte {
response, _ := http.Get(p.Ppg.MetricsURL)
defer response.Body.Close()
body, _ := ioutil.ReadAll(response.Body)
return body
}
func (p *Prometheus) getPushGatewayURL() string {
h, _ := os.Hostname()
if p.Ppg.Job == "" {
p.Ppg.Job = "gin"
}
return p.Ppg.PushGatewayURL + "/metrics/job/" + p.Ppg.Job + "/instance/" + h
}
func (p *Prometheus) sendMetricsToPushGateway(metrics []byte) {
req, err := http.NewRequest("POST", p.getPushGatewayURL(), bytes.NewBuffer(metrics))
client := &http.Client{}
if _, err = client.Do(req); err != nil {
fmt.Println("Error sending to push gateway error:", err.Error())
}
}
func (p *Prometheus) startPushTicker() {
ticker := time.NewTicker(time.Second * p.Ppg.PushIntervalSeconds)
go func() {
for range ticker.C {
p.sendMetricsToPushGateway(p.getMetrics())
}
}()
}
// NewMetric associates prometheus.Collector based on Metric.Type
func NewMetric(m *Metric, subsystem string) prometheus.Collector {
var metric prometheus.Collector
switch m.Type {
case "counter_vec":
metric = prometheus.NewCounterVec(
prometheus.CounterOpts{
Subsystem: subsystem,
Name: m.Name,
Help: m.Description,
},
m.Args,
)
case "counter":
metric = prometheus.NewCounter(
prometheus.CounterOpts{
Subsystem: subsystem,
Name: m.Name,
Help: m.Description,
},
)
case "gauge_vec":
metric = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Subsystem: subsystem,
Name: m.Name,
Help: m.Description,
},
m.Args,
)
case "gauge":
metric = prometheus.NewGauge(
prometheus.GaugeOpts{
Subsystem: subsystem,
Name: m.Name,
Help: m.Description,
},
)
case "histogram_vec":
metric = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Subsystem: subsystem,
Name: m.Name,
Help: m.Description,
},
m.Args,
)
case "histogram":
metric = prometheus.NewHistogram(
prometheus.HistogramOpts{
Subsystem: subsystem,
Name: m.Name,
Help: m.Description,
},
)
case "summary_vec":
metric = prometheus.NewSummaryVec(
prometheus.SummaryOpts{
Subsystem: subsystem,
Name: m.Name,
Help: m.Description,
},
m.Args,
)
case "summary":
metric = prometheus.NewSummary(
prometheus.SummaryOpts{
Subsystem: subsystem,
Name: m.Name,
Help: m.Description,
},
)
}
return metric
}
func (p *Prometheus) registerMetrics(subsystem string) {
for _, metricDef := range p.MetricsList {
metric := NewMetric(metricDef, subsystem)
if err := prometheus.Register(metric); err != nil {
fmt.Println("could not be registered in Prometheus,metricDef.Name:", metricDef.Name, " error:", err.Error())
}
switch metricDef {
case reqCnt:
p.reqCnt = metric.(*prometheus.CounterVec)
case reqDur:
p.reqDur = metric.(*prometheus.HistogramVec)
case resSz:
p.resSz = metric.(prometheus.Summary)
case reqSz:
p.reqSz = metric.(prometheus.Summary)
}
metricDef.MetricCollector = metric
}
}
// Use adds the middleware to a gin engine.
func (p *Prometheus) Use(e *gin.Engine) {
e.Use(p.HandlerFunc())
p.SetMetricsPath(e)
}
// UseWithAuth adds the middleware to a gin engine with BasicAuth.
func (p *Prometheus) UseWithAuth(e *gin.Engine, accounts gin.Accounts) {
e.Use(p.HandlerFunc())
p.SetMetricsPathWithAuth(e, accounts)
}
// HandlerFunc defines handler function for middleware
func (p *Prometheus) HandlerFunc() gin.HandlerFunc {
return func(c *gin.Context) {
if c.Request.URL.Path == p.MetricsPath {
c.Next()
return
}
start := time.Now()
reqSz := computeApproximateRequestSize(c.Request)
c.Next()
status := strconv.Itoa(c.Writer.Status())
elapsed := float64(time.Since(start)) / float64(time.Second)
resSz := float64(c.Writer.Size())
url := p.ReqCntURLLabelMappingFn(c)
if len(p.URLLabelFromContext) > 0 {
u, found := c.Get(p.URLLabelFromContext)
if !found {
u = "unknown"
}
url = u.(string)
}
p.reqDur.WithLabelValues(status, c.Request.Method, url).Observe(elapsed)
p.reqCnt.WithLabelValues(status, c.Request.Method, c.HandlerName(), c.Request.Host, url).Inc()
p.reqSz.Observe(float64(reqSz))
p.resSz.Observe(resSz)
}
}
func prometheusHandler() gin.HandlerFunc {
h := promhttp.Handler()
return func(c *gin.Context) {
h.ServeHTTP(c.Writer, c.Request)
}
}
func computeApproximateRequestSize(r *http.Request) int {
s := 0
if r.URL != nil {
s = len(r.URL.Path)
}
s += len(r.Method)
s += len(r.Proto)
for name, values := range r.Header {
s += len(name)
for _, value := range values {
s += len(value)
}
}
s += len(r.Host)
// r.Form and r.MultipartForm are assumed to be included in r.URL.
if r.ContentLength != -1 {
s += int(r.ContentLength)
}
return s
}

@ -31,14 +31,25 @@ import (
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
)
var client http.Client
var (
// define http client.
client = &http.Client{
Timeout: 15 * time.Second, // max timeout is 15s
}
)
func init() {
// reset http default transport
http.DefaultTransport.(*http.Transport).MaxConnsPerHost = 100 // default: 2
}
func Get(url string) (response []byte, err error) {
client := http.Client{Timeout: 5 * time.Second}
resp, err := client.Get(url)
hclient := http.Client{Timeout: 5 * time.Second}
resp, err := hclient.Get(url)
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
@ -47,26 +58,23 @@ func Get(url string) (response []byte, err error) {
return body, nil
}
func Post(
ctx context.Context,
url string,
header map[string]string,
data interface{},
timeout int,
) (content []byte, err error) {
func Post(ctx context.Context, url string, header map[string]string, data interface{}, timeout int) (content []byte, err error) {
if timeout > 0 {
var cancel func()
ctx, cancel = context.WithTimeout(ctx, time.Second*time.Duration(timeout))
defer cancel()
}
jsonStr, err := json.Marshal(data)
if err != nil {
return nil, err
}
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewBuffer(jsonStr))
if err != nil {
return nil, err
}
if operationID, _ := ctx.Value(constant.OperationID).(string); operationID != "" {
req.Header.Set(constant.OperationID, operationID)
}
@ -74,25 +82,22 @@ func Post(
req.Header.Set(k, v)
}
req.Header.Add("content-type", "application/json; charset=utf-8")
resp, err := client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
result, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
return result, nil
}
func PostReturn(
ctx context.Context,
url string,
header map[string]string,
input, output interface{},
timeOutSecond int,
) error {
func PostReturn(ctx context.Context, url string, header map[string]string, input, output interface{}, timeOutSecond int) error {
b, err := Post(ctx, url, header, input, timeOutSecond)
if err != nil {
return err
@ -101,17 +106,13 @@ func PostReturn(
return err
}
func callBackPostReturn(
ctx context.Context,
url, command string,
input interface{},
output callbackstruct.CallbackResp,
callbackConfig config.CallBackConfig,
) error {
func callBackPostReturn(ctx context.Context, url, command string, input interface{}, output callbackstruct.CallbackResp, callbackConfig config.CallBackConfig) error {
defer log.ZDebug(ctx, "callback", "url", url, "command", command, "input", input, "callbackConfig", callbackConfig)
v := urllib.Values{}
v.Set(constant.CallbackCommand, command)
url = url + "?" + v.Encode()
b, err := Post(ctx, url, nil, input, callbackConfig.CallbackTimeOut)
if err != nil {
if callbackConfig.CallbackFailedContinue != nil && *callbackConfig.CallbackFailedContinue {
@ -120,6 +121,7 @@ func callBackPostReturn(
}
return errs.ErrNetwork.Wrap(err.Error())
}
if err = json.Unmarshal(b, output); err != nil {
if callbackConfig.CallbackFailedContinue != nil && *callbackConfig.CallbackFailedContinue {
log.ZWarn(ctx, "callback failed but continue", err, "url", url)
@ -127,15 +129,10 @@ func callBackPostReturn(
}
return errs.ErrData.Wrap(err.Error())
}
return output.Parse()
}
func CallBackPostReturn(
ctx context.Context,
url string,
req callbackstruct.CallbackReq,
resp callbackstruct.CallbackResp,
callbackConfig config.CallBackConfig,
) error {
func CallBackPostReturn(ctx context.Context, url string, req callbackstruct.CallbackReq, resp callbackstruct.CallbackResp, callbackConfig config.CallBackConfig) error {
return callBackPostReturn(ctx, url, req.GetCallbackCommand(), req, resp, callbackConfig)
}

@ -28,8 +28,6 @@ import (
"github.com/IBM/sarama"
"google.golang.org/protobuf/proto"
prome "github.com/openimsdk/open-im-server/v3/pkg/common/prome"
)
const (
@ -131,8 +129,8 @@ func (p *Producer) SendMessage(ctx context.Context, key string, msg proto.Messag
kMsg.Headers = header
partition, offset, err := p.producer.SendMessage(kMsg)
log.ZDebug(ctx, "ByteEncoder SendMessage end", "key ", kMsg.Key, "key length", kMsg.Value.Length())
if err == nil {
prome.Inc(prome.SendMsgCounter)
if err != nil {
log.ZWarn(ctx, "p.producer.SendMessage error", err)
}
return partition, offset, utils.Wrap(err, "")
}

@ -0,0 +1,45 @@
package prom_metrics
import (
grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
config2 "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/ginPrometheus"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/collectors"
)
func NewGrpcPromObj(cusMetrics []prometheus.Collector) (*prometheus.Registry, *grpc_prometheus.ServerMetrics, error) {
////////////////////////////////////////////////////////
reg := prometheus.NewRegistry()
grpcMetrics := grpc_prometheus.NewServerMetrics()
grpcMetrics.EnableHandlingTimeHistogram()
cusMetrics = append(cusMetrics, grpcMetrics, collectors.NewGoCollector())
reg.MustRegister(cusMetrics...)
return reg, grpcMetrics, nil
}
func GetGrpcCusMetrics(registerName string) []prometheus.Collector {
switch registerName {
case config2.Config.RpcRegisterName.OpenImMessageGatewayName:
return []prometheus.Collector{OnlineUserGauge}
case config2.Config.RpcRegisterName.OpenImMsgName:
return []prometheus.Collector{SingleChatMsgProcessSuccessCounter, SingleChatMsgProcessFailedCounter, GroupChatMsgProcessSuccessCounter, GroupChatMsgProcessFailedCounter}
case "Transfer":
return []prometheus.Collector{MsgInsertRedisSuccessCounter, MsgInsertRedisFailedCounter, MsgInsertMongoSuccessCounter, MsgInsertMongoFailedCounter, SeqSetFailedCounter}
case config2.Config.RpcRegisterName.OpenImPushName:
return []prometheus.Collector{MsgOfflinePushFailedCounter}
case config2.Config.RpcRegisterName.OpenImAuthName:
return []prometheus.Collector{UserLoginCounter}
default:
return nil
}
}
func GetGinCusMetrics(name string) []*ginPrometheus.Metric {
switch name {
case "Api":
return []*ginPrometheus.Metric{ApiCustomCnt}
default:
return []*ginPrometheus.Metric{ApiCustomCnt}
}
}

@ -0,0 +1,16 @@
package prom_metrics
import ginProm "github.com/openimsdk/open-im-server/v3/pkg/common/ginPrometheus"
/*
labels := prometheus.Labels{"label_one": "any", "label_two": "value"}
ApiCustomCnt.MetricCollector.(*prometheus.CounterVec).With(labels).Inc()
*/
var (
ApiCustomCnt = &ginProm.Metric{
Name: "custom_total",
Description: "Custom counter events.",
Type: "counter_vec",
Args: []string{"label_one", "label_two"},
}
)

@ -0,0 +1,12 @@
package prom_metrics
import (
"github.com/prometheus/client_golang/prometheus"
)
var (
UserLoginCounter = prometheus.NewCounter(prometheus.CounterOpts{
Name: "user_login_total",
Help: "The number of user login",
})
)

@ -0,0 +1,24 @@
package prom_metrics
import (
"github.com/prometheus/client_golang/prometheus"
)
var (
SingleChatMsgProcessSuccessCounter = prometheus.NewCounter(prometheus.CounterOpts{
Name: "single_chat_msg_process_success_total",
Help: "The number of single chat msg successful processed",
})
SingleChatMsgProcessFailedCounter = prometheus.NewCounter(prometheus.CounterOpts{
Name: "single_chat_msg_process_failed_total",
Help: "The number of single chat msg failed processed",
})
GroupChatMsgProcessSuccessCounter = prometheus.NewCounter(prometheus.CounterOpts{
Name: "group_chat_msg_process_success_total",
Help: "The number of group chat msg successful processed",
})
GroupChatMsgProcessFailedCounter = prometheus.NewCounter(prometheus.CounterOpts{
Name: "group_chat_msg_process_failed_total",
Help: "The number of group chat msg failed processed",
})
)

@ -0,0 +1,12 @@
package prom_metrics
import (
"github.com/prometheus/client_golang/prometheus"
)
var (
OnlineUserGauge = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "online_user_num",
Help: "The number of online user num",
})
)

@ -0,0 +1,12 @@
package prom_metrics
import (
"github.com/prometheus/client_golang/prometheus"
)
var (
MsgOfflinePushFailedCounter = prometheus.NewCounter(prometheus.CounterOpts{
Name: "msg_offline_push_failed_total",
Help: "The number of msg failed offline pushed",
})
)

@ -0,0 +1,28 @@
package prom_metrics
import (
"github.com/prometheus/client_golang/prometheus"
)
var (
MsgInsertRedisSuccessCounter = prometheus.NewCounter(prometheus.CounterOpts{
Name: "msg_insert_redis_success_total",
Help: "The number of successful insert msg to redis",
})
MsgInsertRedisFailedCounter = prometheus.NewCounter(prometheus.CounterOpts{
Name: "msg_insert_redis_failed_total",
Help: "The number of failed insert msg to redis",
})
MsgInsertMongoSuccessCounter = prometheus.NewCounter(prometheus.CounterOpts{
Name: "msg_insert_mongo_success_total",
Help: "The number of successful insert msg to mongo",
})
MsgInsertMongoFailedCounter = prometheus.NewCounter(prometheus.CounterOpts{
Name: "msg_insert_mongo_failed_total",
Help: "The number of failed insert msg to mongo",
})
SeqSetFailedCounter = prometheus.NewCounter(prometheus.CounterOpts{
Name: "seq_set_failed_total",
Help: "The number of failed set seq",
})
)

@ -1,15 +0,0 @@
// Copyright © 2023 OpenIM. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prome // import "github.com/openimsdk/open-im-server/v3/pkg/common/prome"

@ -1,470 +0,0 @@
// Copyright © 2023 OpenIM. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prome
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
var (
// auth rpc.
UserLoginCounter prometheus.Counter
UserRegisterCounter prometheus.Counter
// seg.
SeqGetSuccessCounter prometheus.Counter
SeqGetFailedCounter prometheus.Counter
SeqSetSuccessCounter prometheus.Counter
SeqSetFailedCounter prometheus.Counter
// msg-db.
MsgInsertRedisSuccessCounter prometheus.Counter
MsgInsertRedisFailedCounter prometheus.Counter
MsgInsertMongoSuccessCounter prometheus.Counter
MsgInsertMongoFailedCounter prometheus.Counter
MsgPullFromRedisSuccessCounter prometheus.Counter
MsgPullFromRedisFailedCounter prometheus.Counter
MsgPullFromMongoSuccessCounter prometheus.Counter
MsgPullFromMongoFailedCounter prometheus.Counter
// msg-ws.
MsgRecvTotalCounter prometheus.Counter
GetNewestSeqTotalCounter prometheus.Counter
PullMsgBySeqListTotalCounter prometheus.Counter
SingleChatMsgRecvSuccessCounter prometheus.Counter
GroupChatMsgRecvSuccessCounter prometheus.Counter
WorkSuperGroupChatMsgRecvSuccessCounter prometheus.Counter
OnlineUserGauge prometheus.Gauge
// msg-msg.
SingleChatMsgProcessSuccessCounter prometheus.Counter
SingleChatMsgProcessFailedCounter prometheus.Counter
GroupChatMsgProcessSuccessCounter prometheus.Counter
GroupChatMsgProcessFailedCounter prometheus.Counter
WorkSuperGroupChatMsgProcessSuccessCounter prometheus.Counter
WorkSuperGroupChatMsgProcessFailedCounter prometheus.Counter
// msg-push.
MsgOnlinePushSuccessCounter prometheus.Counter
MsgOfflinePushSuccessCounter prometheus.Counter
MsgOfflinePushFailedCounter prometheus.Counter
// api.
ApiRequestCounter prometheus.Counter
ApiRequestSuccessCounter prometheus.Counter
ApiRequestFailedCounter prometheus.Counter
// grpc.
GrpcRequestCounter prometheus.Counter
GrpcRequestSuccessCounter prometheus.Counter
GrpcRequestFailedCounter prometheus.Counter
SendMsgCounter prometheus.Counter
// conversation.
ConversationCreateSuccessCounter prometheus.Counter
ConversationCreateFailedCounter prometheus.Counter
)
func NewUserLoginCounter() {
if UserLoginCounter != nil {
return
}
UserLoginCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "user_login",
Help: "The number of user login",
})
}
func NewUserRegisterCounter() {
if UserRegisterCounter != nil {
return
}
UserRegisterCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "user_register",
Help: "The number of user register",
})
}
func NewSeqGetSuccessCounter() {
if SeqGetSuccessCounter != nil {
return
}
SeqGetSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "seq_get_success",
Help: "The number of successful get seq",
})
}
func NewSeqGetFailedCounter() {
if SeqGetFailedCounter != nil {
return
}
SeqGetFailedCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "seq_get_failed",
Help: "The number of failed get seq",
})
}
func NewSeqSetSuccessCounter() {
if SeqSetSuccessCounter != nil {
return
}
SeqSetSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "seq_set_success",
Help: "The number of successful set seq",
})
}
func NewSeqSetFailedCounter() {
if SeqSetFailedCounter != nil {
return
}
SeqSetFailedCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "seq_set_failed",
Help: "The number of failed set seq",
})
}
func NewApiRequestCounter() {
if ApiRequestCounter != nil {
return
}
ApiRequestCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "api_request",
Help: "The number of api request",
})
}
func NewApiRequestSuccessCounter() {
if ApiRequestSuccessCounter != nil {
return
}
ApiRequestSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "api_request_success",
Help: "The number of api request success",
})
}
func NewApiRequestFailedCounter() {
if ApiRequestFailedCounter != nil {
return
}
ApiRequestFailedCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "api_request_failed",
Help: "The number of api request failed",
})
}
func NewGrpcRequestCounter() {
if GrpcRequestCounter != nil {
return
}
GrpcRequestCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "grpc_request",
Help: "The number of api request",
})
}
func NewGrpcRequestSuccessCounter() {
if GrpcRequestSuccessCounter != nil {
return
}
GrpcRequestSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "grpc_request_success",
Help: "The number of grpc request success",
})
}
func NewGrpcRequestFailedCounter() {
if GrpcRequestFailedCounter != nil {
return
}
GrpcRequestFailedCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "grpc_request_failed",
Help: "The number of grpc request failed",
})
}
func NewSendMsgCount() {
if SendMsgCounter != nil {
return
}
SendMsgCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "send_msg",
Help: "The number of send msg",
})
}
func NewMsgInsertRedisSuccessCounter() {
if MsgInsertRedisSuccessCounter != nil {
return
}
MsgInsertRedisSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "msg_insert_redis_success",
Help: "The number of successful insert msg to redis",
})
}
func NewMsgInsertRedisFailedCounter() {
if MsgInsertRedisFailedCounter != nil {
return
}
MsgInsertRedisFailedCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "msg_insert_redis_failed",
Help: "The number of failed insert msg to redis",
})
}
func NewMsgInsertMongoSuccessCounter() {
if MsgInsertMongoSuccessCounter != nil {
return
}
MsgInsertMongoSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "msg_insert_mongo_success",
Help: "The number of successful insert msg to mongo",
})
}
func NewMsgInsertMongoFailedCounter() {
if MsgInsertMongoFailedCounter != nil {
return
}
MsgInsertMongoFailedCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "msg_insert_mongo_failed",
Help: "The number of failed insert msg to mongo",
})
}
func NewMsgPullFromRedisSuccessCounter() {
if MsgPullFromRedisSuccessCounter != nil {
return
}
MsgPullFromRedisSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "msg_pull_from_redis_success",
Help: "The number of successful pull msg from redis",
})
}
func NewMsgPullFromRedisFailedCounter() {
if MsgPullFromRedisFailedCounter != nil {
return
}
MsgPullFromRedisFailedCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "msg_pull_from_redis_failed",
Help: "The number of failed pull msg from redis",
})
}
func NewMsgPullFromMongoSuccessCounter() {
if MsgPullFromMongoSuccessCounter != nil {
return
}
MsgPullFromMongoSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "msg_pull_from_mongo_success",
Help: "The number of successful pull msg from mongo",
})
}
func NewMsgPullFromMongoFailedCounter() {
if MsgPullFromMongoFailedCounter != nil {
return
}
MsgPullFromMongoFailedCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "msg_pull_from_mongo_failed",
Help: "The number of failed pull msg from mongo",
})
}
func NewMsgRecvTotalCounter() {
if MsgRecvTotalCounter != nil {
return
}
MsgRecvTotalCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "msg_recv_total",
Help: "The number of msg received",
})
}
func NewGetNewestSeqTotalCounter() {
if GetNewestSeqTotalCounter != nil {
return
}
GetNewestSeqTotalCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "get_newest_seq_total",
Help: "the number of get newest seq",
})
}
func NewPullMsgBySeqListTotalCounter() {
if PullMsgBySeqListTotalCounter != nil {
return
}
PullMsgBySeqListTotalCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "pull_msg_by_seq_list_total",
Help: "The number of pull msg by seq list",
})
}
func NewSingleChatMsgRecvSuccessCounter() {
if SingleChatMsgRecvSuccessCounter != nil {
return
}
SingleChatMsgRecvSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "single_chat_msg_recv_success",
Help: "The number of single chat msg successful received ",
})
}
func NewGroupChatMsgRecvSuccessCounter() {
if GroupChatMsgRecvSuccessCounter != nil {
return
}
GroupChatMsgRecvSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "group_chat_msg_recv_success",
Help: "The number of group chat msg successful received",
})
}
func NewWorkSuperGroupChatMsgRecvSuccessCounter() {
if WorkSuperGroupChatMsgRecvSuccessCounter != nil {
return
}
WorkSuperGroupChatMsgRecvSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "work_super_group_chat_msg_recv_success",
Help: "The number of work/super group chat msg successful received",
})
}
func NewOnlineUserGauges() {
if OnlineUserGauge != nil {
return
}
OnlineUserGauge = promauto.NewGauge(prometheus.GaugeOpts{
Name: "online_user_num",
Help: "The number of online user num",
})
}
func NewSingleChatMsgProcessSuccessCounter() {
if SingleChatMsgProcessSuccessCounter != nil {
return
}
SingleChatMsgProcessSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "single_chat_msg_process_success",
Help: "The number of single chat msg successful processed",
})
}
func NewSingleChatMsgProcessFailedCounter() {
if SingleChatMsgProcessFailedCounter != nil {
return
}
SingleChatMsgProcessFailedCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "single_chat_msg_process_failed",
Help: "The number of single chat msg failed processed",
})
}
func NewGroupChatMsgProcessSuccessCounter() {
if GroupChatMsgProcessSuccessCounter != nil {
return
}
GroupChatMsgProcessSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "group_chat_msg_process_success",
Help: "The number of group chat msg successful processed",
})
}
func NewGroupChatMsgProcessFailedCounter() {
if GroupChatMsgProcessFailedCounter != nil {
return
}
GroupChatMsgProcessFailedCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "group_chat_msg_process_failed",
Help: "The number of group chat msg failed processed",
})
}
func NewWorkSuperGroupChatMsgProcessSuccessCounter() {
if WorkSuperGroupChatMsgProcessSuccessCounter != nil {
return
}
WorkSuperGroupChatMsgProcessSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "work_super_group_chat_msg_process_success",
Help: "The number of work/super group chat msg successful processed",
})
}
func NewWorkSuperGroupChatMsgProcessFailedCounter() {
if WorkSuperGroupChatMsgProcessFailedCounter != nil {
return
}
WorkSuperGroupChatMsgProcessFailedCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "work_super_group_chat_msg_process_failed",
Help: "The number of work/super group chat msg failed processed",
})
}
func NewMsgOnlinePushSuccessCounter() {
if MsgOnlinePushSuccessCounter != nil {
return
}
MsgOnlinePushSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "msg_online_push_success",
Help: "The number of msg successful online pushed",
})
}
func NewMsgOfflinePushSuccessCounter() {
if MsgOfflinePushSuccessCounter != nil {
return
}
MsgOfflinePushSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "msg_offline_push_success",
Help: "The number of msg successful offline pushed",
})
}
func NewMsgOfflinePushFailedCounter() {
if MsgOfflinePushFailedCounter != nil {
return
}
MsgOfflinePushFailedCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "msg_offline_push_failed",
Help: "The number of msg failed offline pushed",
})
}
func NewConversationCreateSuccessCounter() {
if ConversationCreateSuccessCounter != nil {
return
}
ConversationCreateSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "conversation_push_success",
Help: "The number of conversation successful pushed",
})
}
func NewConversationCreateFailedCounter() {
if ConversationCreateFailedCounter != nil {
return
}
ConversationCreateFailedCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "conversation_push_failed",
Help: "The number of conversation failed pushed",
})
}

@ -1,97 +0,0 @@
// Copyright © 2023 OpenIM. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prome
import (
"bytes"
"net/http"
"strconv"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
func StartPrometheusSrv(prometheusPort int) error {
if config.Config.Prometheus.Enable {
http.Handle("/metrics", promhttp.Handler())
err := http.ListenAndServe(":"+strconv.Itoa(prometheusPort), nil)
return err
}
return nil
}
func PrometheusHandler() gin.HandlerFunc {
h := promhttp.Handler()
return func(c *gin.Context) {
h.ServeHTTP(c.Writer, c.Request)
}
}
type responseBodyWriter struct {
gin.ResponseWriter
body *bytes.Buffer
}
func (r responseBodyWriter) Write(b []byte) (int, error) {
r.body.Write(b)
return r.ResponseWriter.Write(b)
}
func PrometheusMiddleware(c *gin.Context) {
Inc(ApiRequestCounter)
w := &responseBodyWriter{body: &bytes.Buffer{}, ResponseWriter: c.Writer}
c.Writer = w
c.Next()
if c.Writer.Status() == http.StatusOK {
Inc(ApiRequestSuccessCounter)
} else {
Inc(ApiRequestFailedCounter)
}
}
func Inc(counter prometheus.Counter) {
if config.Config.Prometheus.Enable {
if counter != nil {
counter.Inc()
}
}
}
func Add(counter prometheus.Counter, add int) {
if config.Config.Prometheus.Enable {
if counter != nil {
counter.Add(float64(add))
}
}
}
func GaugeInc(gauges prometheus.Gauge) {
if config.Config.Prometheus.Enable {
if gauges != nil {
gauges.Inc()
}
}
}
func GaugeDec(gauges prometheus.Gauge) {
if config.Config.Prometheus.Enable {
if gauges != nil {
gauges.Dec()
}
}
}

@ -16,7 +16,12 @@ package startrpc
import (
"fmt"
"github.com/openimsdk/open-im-server/v3/pkg/common/prom_metrics"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"log"
"net"
"net/http"
"strconv"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
@ -29,7 +34,6 @@ import (
"github.com/OpenIMSDK/tools/discoveryregistry"
"github.com/OpenIMSDK/tools/mw"
"github.com/OpenIMSDK/tools/network"
"github.com/OpenIMSDK/tools/prome"
"github.com/OpenIMSDK/tools/utils"
)
@ -41,7 +45,7 @@ func Start(
rpcFn func(client discoveryregistry.SvcDiscoveryRegistry, server *grpc.Server) error,
options ...grpc.ServerOption,
) error {
fmt.Printf("start %s server, port: %d, prometheusPort: %d, OpenIM version: %s",
fmt.Printf("start %s server, port: %d, prometheusPort: %d, OpenIM version: %s\n",
rpcRegisterName, rpcPort, prometheusPort, config.Version)
listener, err := net.Listen(
"tcp",
@ -61,16 +65,15 @@ func Start(
if err != nil {
return err
}
var reg *prometheus.Registry
var metric *grpcprometheus.ServerMetrics
// ctx 中间件
if config.Config.Prometheus.Enable {
prome.NewGrpcRequestCounter()
prome.NewGrpcRequestFailedCounter()
prome.NewGrpcRequestSuccessCounter()
unaryInterceptor := mw.InterceptChain(grpcprometheus.UnaryServerInterceptor, mw.RpcServerInterceptor)
options = append(options, []grpc.ServerOption{
grpc.StreamInterceptor(grpcprometheus.StreamServerInterceptor),
grpc.UnaryInterceptor(unaryInterceptor),
}...)
//////////////////////////
cusMetrics := prom_metrics.GetGrpcCusMetrics(rpcRegisterName)
reg, metric, err = prom_metrics.NewGrpcPromObj(cusMetrics)
options = append(options, mw.GrpcServer(), grpc.StreamInterceptor(metric.StreamServerInterceptor()),
grpc.UnaryInterceptor(metric.UnaryServerInterceptor()))
} else {
options = append(options, mw.GrpcServer())
}
@ -91,8 +94,11 @@ func Start(
}
go func() {
if config.Config.Prometheus.Enable && prometheusPort != 0 {
if err := prome.StartPrometheusSrv(prometheusPort); err != nil {
panic(err.Error())
metric.InitializeMetrics(srv)
// Create a HTTP server for prometheus.
httpServer := &http.Server{Handler: promhttp.HandlerFor(reg, promhttp.HandlerOpts{}), Addr: fmt.Sprintf("0.0.0.0:%d", prometheusPort)}
if err := httpServer.ListenAndServe(); err != nil {
log.Fatal("Unable to start a http server.")
}
}
}()

@ -40,8 +40,8 @@ var (
// companion .gitattributes file containing 'export-subst' in this same
// directory. See also https://git-scm.com/docs/gitattributes
gitVersion string = "latest"
gitCommit string = "" // sha1 from git, output of $(git rev-parse HEAD)
gitTreeState string = "" // state of git tree, either "clean" or "dirty"
gitCommit string = "" // sha1 from git, output of $(git rev-parse HEAD)
gitTreeState string = "" // state of git tree, either "clean" or "dirty"
buildDate string = "1970-01-01T00:00:00Z" // build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ')
)

@ -4,15 +4,15 @@ package version
// TODO: Add []string of api versions supported? It's still unclear
// how we'll want to distribute that information.
type Info struct {
Major string `json:"major,omitempty"`
Minor string `json:"minor,omitempty"`
GitVersion string `json:"gitVersion"`
Major string `json:"major,omitempty"`
Minor string `json:"minor,omitempty"`
GitVersion string `json:"gitVersion"`
GitTreeState string `json:"gitTreeState,omitempty"`
GitCommit string `json:"gitCommit,omitempty"`
BuildDate string `json:"buildDate"`
GoVersion string `json:"goVersion"`
Compiler string `json:"compiler"`
Platform string `json:"platform"`
GitCommit string `json:"gitCommit,omitempty"`
BuildDate string `json:"buildDate"`
GoVersion string `json:"goVersion"`
Compiler string `json:"compiler"`
Platform string `json:"platform"`
}
type Output struct {
@ -21,7 +21,7 @@ type Output struct {
}
type OpenIMClientVersion struct {
ClientVersion string `json:"clientVersion,omitempty" yaml:"clientVersion,omitempty"` //sdk core version
ClientVersion string `json:"clientVersion,omitempty" yaml:"clientVersion,omitempty"` //sdk core version
}
// String returns info as a human-friendly version string.

@ -13,15 +13,15 @@ func Get() Info {
// These variables typically come from -ldflags settings and in
// their absence fallback to the settings in ./base.go
return Info{
Major: gitMajor,
Minor: gitMinor,
GitVersion: gitVersion,
Major: gitMajor,
Minor: gitMinor,
GitVersion: gitVersion,
GitTreeState: gitTreeState,
GitCommit: gitCommit,
BuildDate: buildDate,
GoVersion: runtime.Version(),
Compiler: runtime.Compiler,
Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH),
GitCommit: gitCommit,
BuildDate: buildDate,
GoVersion: runtime.Version(),
Compiler: runtime.Compiler,
Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH),
}
}

@ -118,10 +118,9 @@ func GetNotificationConversationIDByConversationID(conversationID string) string
l := strings.Split(conversationID, "_")
if len(l) > 1 {
l[0] = "n"
return strings.Join(l, "_")
} else {
return ""
return conversationID
}
return ""
}
func GetNotificationConversationID(sessionType int, ids ...string) string {

@ -24,11 +24,9 @@ OPENIM_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
source "${OPENIM_ROOT}/scripts/lib/init.sh"
# 定义一个配置文件数组,其中包含需要生成的配置文件的名称路径
# (en: Define a profile array that contains the name path of the profile to be generated.)
readonly ENV_FILE=${ENV_FILE:-"${OPENIM_ROOT}/scripts/install/environment.sh"}
# 定义关联数组,其中键是模板文件,值是对应的输出文件
# (en: Defines an associative array where the keys are the template files and the values are the corresponding output files.)
declare -A TEMPLATES=(
["${OPENIM_ROOT}/deployments/templates/env_template.yaml"]="${OPENIM_ROOT}/.env"

@ -25,10 +25,10 @@ OPENIM_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd -P)"
LOCAL_OUTPUT_ROOT=""${OPENIM_ROOT}"/${OUT_DIR:-_output}"
source "${OPENIM_ROOT}/scripts/lib/init.sh"
#TODO: Access to the IP networks outside, or you want to use the IP network
# IP=127.0.0.1
if [ -z "${IP}" ]; then
IP=$(openim::util::get_server_ip)
#TODO: Access to the OPENIM_IP networks outside, or you want to use the OPENIM_IP network
# OPENIM_IP=127.0.0.1
if [ -z "${OPENIM_IP}" ]; then
OPENIM_IP=$(openim::util::get_server_ip)
fi
# config.gateway custom bridge modes
@ -49,14 +49,14 @@ def "DATA_DIR" "${OPENIM_ROOT}"
def "USER" "root"
# 设置统一的密码,方便记忆
def "PASSWORD" "openIM123"
readonly PASSWORD=${PASSWORD:-'openIM123'}
# 设置统一的数据库名称,方便管理
def "DATABASE_NAME" "openIM_v3"
# Linux系统 openim 用户
def "LINUX_USERNAME" "openim"
def "LINUX_PASSWORD" "${PASSWORD}"
readonly LINUX_PASSWORD=${LINUX_PASSWORD:-"${PASSWORD}"}
# 设置安装目录
def "INSTALL_DIR" "${LOCAL_OUTPUT_ROOT}/installs"
@ -167,7 +167,8 @@ def "ZOOKEEPER_PASSWORD" "" # Zookeeper的密码
def "MYSQL_PORT" "13306" # MySQL的端口
def "MYSQL_ADDRESS" "${DOCKER_BRIDGE_GATEWAY}" # MySQL的地址
def "MYSQL_USERNAME" "${USER}" # MySQL的用户名
def "MYSQL_PASSWORD" "${PASSWORD}" # MySQL的密码
# MySQL的密码
readonly MYSQL_PASSWORD=${MYSQL_PASSWORD:-"${PASSWORD}"}
def "MYSQL_DATABASE" "${DATABASE_NAME}" # MySQL的数据库名
def "MYSQL_MAX_OPEN_CONN" "1000" # 最大打开的连接数
def "MYSQL_MAX_IDLE_CONN" "100" # 最大空闲连接数
@ -181,12 +182,13 @@ def "MONGO_PORT" "37017" # MongoDB的端口
def "MONGO_ADDRESS" "${DOCKER_BRIDGE_GATEWAY}" # MongoDB的地址
def "MONGO_DATABASE" "${DATABASE_NAME}" # MongoDB的数据库名
def "MONGO_USERNAME" "${USER}" # MongoDB的用户名
def "MONGO_PASSWORD" "${PASSWORD}" # MongoDB的密码
# MongoDB的密码
readonly MONGO_PASSWORD=${MONGO_PASSWORD:-"${PASSWORD}"}
def "MONGO_MAX_POOL_SIZE" "100" # 最大连接池大小
###################### Object 配置信息 ######################
# app要能访问到此ip和端口或域名
readonly API_URL=${API_URL:-"http://${IP}:${API_OPENIM_PORT}"}
readonly API_URL=${API_URL:-"http://${OPENIM_IP}:${API_OPENIM_PORT}"}
def "OBJECT_ENABLE" "minio" # 对象是否启用
# 对象的API地址
@ -197,9 +199,9 @@ def "MINIO_PORT" "10005" # MinIO的端口
def MINIO_ADDRESS "${DOCKER_BRIDGE_GATEWAY}"
readonly MINIO_ENDPOINT=${MINIO_ENDPOINT:-"http://${MINIO_ADDRESS}:${MINIO_PORT}"}
def "MINIO_ACCESS_KEY" "${USER}" # MinIO的访问密钥ID
def "MINIO_SECRET_KEY" "${PASSWORD}" # MinIO的密钥
readonly MINIO_SECRET_KEY=${MINIO_SECRET_KEY:-"${PASSWORD}"}
def "MINIO_SESSION_TOKEN" # MinIO的会话令牌
readonly MINIO_SIGN_ENDPOINT=${MINIO_SIGN_ENDPOINT:-"http://${IP}:${MINIO_PORT}"} # signEndpoint为minio公网地址
readonly MINIO_SIGN_ENDPOINT=${MINIO_SIGN_ENDPOINT:-"http://${OPENIM_IP}:${MINIO_PORT}"} # signEndpoint为minio公网地址
def "MINIO_PUBLIC_READ" "false" # 公有读
# 腾讯云COS的存储桶URL
@ -220,7 +222,7 @@ def "OSS_PUBLIC_READ" "false" # 公有
def "REDIS_PORT" "16379" # Redis的端口
def "REDIS_ADDRESS" "${DOCKER_BRIDGE_GATEWAY}" # Redis的地址
def "REDIS_USERNAME" # Redis的用户名
def "REDIS_PASSWORD" "${PASSWORD}" # Redis的密码
readonly REDIS_PASSWORD=${REDIS_PASSWORD:-"${PASSWORD}"}
###################### Kafka 配置信息 ######################
def "KAFKA_USERNAME" # `Kafka` 的用户名
@ -329,7 +331,8 @@ def "RETAIN_CHAT_RECORDS" "365" # 保留聊天记录
readonly CHAT_RECORDS_CLEAR_TIME=${CHAT_RECORDS_CLEAR_TIME:-'0 2 * * 3'}
# 消息销毁时间
readonly MSG_DESTRUCT_TIME=${MSG_DESTRUCT_TIME:-'0 2 * * *'}
def "SECRET" "${PASSWORD}" # 密钥
# 密钥
readonly SECRET=${SECRET:-"${PASSWORD}"}
def "TOKEN_EXPIRE" "90" # Token到期时间
def "FRIEND_VERIFY" "false" # 朋友验证
def "IOS_PUSH_SOUND" "xxx" # IOS推送声音
@ -338,6 +341,9 @@ def "IOS_PRODUCTION" "false" # IOS生产
###################### Prometheus 配置信息 ######################
def "PROMETHEUS_ENABLE" "false" # 是否启用 Prometheus
def "PROMETHEUS_URL" "/prometheus"
# Api 服务的 Prometheus 端口
readonly API_PROM_PORT=${API_PROM_PORT:-'20100'}
# User 服务的 Prometheus 端口
readonly USER_PROM_PORT=${USER_PROM_PORT:-'20110'}
# Friend 服务的 Prometheus 端口

@ -49,13 +49,13 @@ function openim::msgtransfer::start()
openim::log::error_exit "OPENIM_MSGGATEWAY_NUM must be equal to the number of MSG_TRANSFER_PROM_PORTS"
fi
for (( i=1; i<=$OPENIM_MSGGATEWAY_NUM; i++ )) do
for (( i=0; i<$OPENIM_MSGGATEWAY_NUM; i++ )) do
openim::log::info "prometheus port: ${MSG_TRANSFER_PROM_PORTS[$i]}"
PROMETHEUS_PORT_OPTION=""
if [[ -n "${OPENIM_PROMETHEUS_PORTS[$i]}" ]]; then
PROMETHEUS_PORT_OPTION="--prometheus_port ${OPENIM_PROMETHEUS_PORTS[$i]}"
fi
nohup ${OPENIM_MSGTRANSFER_BINARY} ${PROMETHEUS_PORT_OPTION} -c ${OPENIM_MSGTRANSFER_CONFIG} >> ${LOG_FILE} 2>&1 &
nohup ${OPENIM_MSGTRANSFER_BINARY} ${PROMETHEUS_PORT_OPTION} -c ${OPENIM_MSGTRANSFER_CONFIG} -n ${i}>> ${LOG_FILE} 2>&1 &
done
openim::util::check_process_names "${OPENIM_OUTPUT_HOSTBIN}/${SERVER_NAME}"

@ -1,561 +1,332 @@
#!/usr/bin/env bash
# Copyright © 2023 OpenIM. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A set of helpers for tests
openim::test::clear_all() {
if openim::test::if_supports_resource "rc" ; then
# shellcheck disable=SC2154
# Disabling because "kube_flags" is set in a parent script
kubectl delete "${kube_flags[@]}" rc --all --grace-period=0 --force
fi
if openim::test::if_supports_resource "pods" ; then
kubectl delete "${kube_flags[@]}" pods --all --grace-period=0 --force
fi
# The root of the build/dist directory
IAM_ROOT=$(dirname "${BASH_SOURCE[0]}")/../..
[[ -z ${COMMON_SOURCED} ]] && source ${IAM_ROOT}/scripts/install/common.sh
# API Server API Address:Port
INSECURE_OPENIMAPI=${IAM_APISERVER_HOST}:${API_OPENIM_PORT}
INSECURE_OPENIMAUTO=${OPENIM_RPC_AUTH_HOST}:${OPENIM_AUTH_PORT}
Header="-HContent-Type: application/json"
CCURL="curl -f -s -XPOST" # Create
UCURL="curl -f -s -XPUT" # Update
RCURL="curl -f -s -XGET" # Retrieve
DCURL="curl -f -s -XDELETE" # Delete
openim::test::user()
{
token="-HAuthorization: Bearer $(openim::test::login)"
# 1. If colin, mark, john users exist, clear them first
${DCURL} "${token}" http://${INSECURE_OPENIMAPI}/v1/users/colin; echo
${DCURL} "${token}" http://${INSECURE_OPENIMAPI}/v1/users/mark; echo
${DCURL} "${token}" http://${INSECURE_OPENIMAPI}/v1/users/john; echo
# 2. Create colin, mark, john users
${CCURL} "${Header}" "${token}" http://${INSECURE_OPENIMAPI}/v1/users \
-d'{"password":"User@2021","metadata":{"name":"colin"},"nickname":"colin","email":"colin@foxmail.com","phone":"1812884xxxx"}'; echo
# 3. List all users
${RCURL} "${token}" "http://${INSECURE_OPENIMAPI}/v1/users?offset=0&limit=10"; echo
# 4. Get detailed information of colin user
${RCURL} "${token}" http://${INSECURE_OPENIMAPI}/v1/users/colin; echo
# 5. Modify colin user
${UCURL} "${Header}" "${token}" http://${INSECURE_OPENIMAPI}/v1/users/colin \
-d'{"nickname":"colin","email":"colin_modified@foxmail.com","phone":"1812884xxxx"}'; echo
# 6. Delete colin user
${DCURL} "${token}" http://${INSECURE_OPENIMAPI}/v1/users/colin; echo
# 7. Batch delete users
${DCURL} "${token}" "http://${INSECURE_OPENIMAPI}/v1/users?name=mark&name=john"; echo
openim::log::info "$(echo -e '\033[32mcongratulations, /v1/user test passed!\033[0m')"
}
# Prints the calling file and line number $1 levels deep
# Defaults to 2 levels so you can call this to find your own caller
openim::test::get_caller() {
local levels=${1:-2}
local caller_file="${BASH_SOURCE[${levels}]}"
local caller_line="${BASH_LINENO[${levels}-1]}"
echo "$(basename "${caller_file}"):${caller_line}"
# userRouterGroup := r.Group("/user")
# {
# userRouterGroup.POST("/user_register", u.UserRegister)
# userRouterGroup.POST("/update_user_info", ParseToken, u.UpdateUserInfo)
# userRouterGroup.POST("/set_global_msg_recv_opt", ParseToken, u.SetGlobalRecvMessageOpt)
# userRouterGroup.POST("/get_users_info", ParseToken, u.GetUsersPublicInfo)
# userRouterGroup.POST("/get_all_users_uid", ParseToken, u.GetAllUsersID)
# userRouterGroup.POST("/account_check", ParseToken, u.AccountCheck)
# userRouterGroup.POST("/get_users", ParseToken, u.GetUsers)
# userRouterGroup.POST("/get_users_online_status", ParseToken, u.GetUsersOnlineStatus)
# userRouterGroup.POST("/get_users_online_token_detail", ParseToken, u.GetUsersOnlineTokenDetail)
# userRouterGroup.POST("/subscribe_users_status", ParseToken, u.SubscriberStatus)
# userRouterGroup.POST("/get_users_status", ParseToken, u.GetUserStatus)
# userRouterGroup.POST("/get_subscribe_users_status", ParseToken, u.GetSubscribeUsersStatus)
# }
openim::test::group()
{
token="-HAuthorization: Bearer $(openim::test::login)"
}
# Force exact match of a returned result for a object query. Wrap this with || to support multiple
# valid return types.
# This runs `kubectl get` once and asserts that the result is as expected.
# $1: Object on which get should be run
# $2: The go-template to run on the result
# $3: The expected output
# $4: Additional args to be passed to kubectl
openim::test::get_object_assert() {
openim::test::object_assert 1 "$@"
# Define a function to register a user
openim::register_user()
{
user_register_response=$(${CCURL} "${Header}" http://localhost:10002/user/user_register \
-d'{
"secret": "openIM123",
"users": [{"userID": "11111112","nickname": "yourNickname","faceURL": "yourFaceURL"}]
}')
echo "$user_register_response"
}
# Asserts that the output of a given get query is as expected.
# Runs the query multiple times before failing it.
# $1: Object on which get should be run
# $2: The go-template to run on the result
# $3: The expected output
# $4: Additional args to be passed to kubectl
openim::test::wait_object_assert() {
openim::test::object_assert 10 "$@"
# Define a function to get a token
openim::get_token()
{
token_response=$(${CCURL} "${Header}" http://localhost:10002/auth/user_token \
-d'{
"secret": "openIM123",
"platformID": 1,
"userID": "11111112"
}')
token=$(echo $token_response | grep -Po 'token[" :]+\K[^"]+')
echo "$token"
}
# Asserts that the output of a given get query is as expected.
# Can run the query multiple times before failing it.
# $1: Number of times the query should be run before failing it.
# $2: Object on which get should be run
# $3: The go-template to run on the result
# $4: The expected output
# $5: Additional args to be passed to kubectl
openim::test::object_assert() {
local tries=$1
local object=$2
local request=$3
local expected=$4
local args=${5:-}
for j in $(seq 1 "${tries}"); do
# shellcheck disable=SC2086
# Disabling because to allow for expansion here
res=$(kubectl get "${kube_flags[@]}" ${args} ${object} -o go-template="${request}")
if [[ "${res}" =~ ^$expected$ ]]; then
echo -n "${green}"
echo "$(openim::test::get_caller 3): Successful get ${object} ${request}: ${res}"
echo -n "${reset}"
return 0
fi
echo "Waiting for Get ${object} ${request} ${args}: expected: ${expected}, got: ${res}"
sleep $((j-1))
done
echo "${bold}${red}"
echo "$(openim::test::get_caller 3): FAIL!"
echo "Get ${object} ${request}"
echo " Expected: ${expected}"
echo " Got: ${res}"
echo "${reset}${red}"
caller
echo "${reset}"
return 1
# Define a function to check the account
openim::check_account()
{
local token=$1
account_check_response=$(${CCURL} "${Header}" -H"operationID: 1646445464564" -H"token: ${token}" http://localhost:10002/user/account_check \
-d'{
"checkUserIDs": ["11111111","11111112"]
}')
echo "$account_check_response"
}
openim::test::get_object_jsonpath_assert() {
local object=$1
local request=$2
local expected=$3
# Define a function to register, get a token and check the account
openim::register_and_check()
{
# Register a user
user_register_response=$(openim::register_user)
# shellcheck disable=SC2086
# Disabling to allow for expansion here
res=$(kubectl get "${kube_flags[@]}" ${object} -o jsonpath=${request})
if [[ $user_register_response == *"errCode": 0* ]]; then
echo "User registration successful."
if [[ "${res}" =~ ^$expected$ ]]; then
echo -n "${green}"
echo "$(openim::test::get_caller): Successful get ${object} ${request}: ${res}"
echo -n "${reset}"
return 0
else
echo "${bold}${red}"
echo "$(openim::test::get_caller): FAIL!"
echo "Get ${object} ${request}"
echo " Expected: ${expected}"
echo " Got: ${res}"
echo "${reset}${red}"
caller
echo "${reset}"
return 1
fi
}
# Get token
token=$(openim::get_token)
openim::test::describe_object_assert() {
local resource=$1
local object=$2
local matches=( "${@:3}" )
if [[ -n $token ]]; then
echo "Token acquired: $token"
# shellcheck disable=SC2086
# Disabling to allow for expansion here
result=$(kubectl describe "${kube_flags[@]}" ${resource} ${object})
# Check account
account_check_response=$(openim::check_account $token)
for match in "${matches[@]}"; do
if grep -q "${match}" <<< "${result}"; then
echo "matched ${match}"
if [[ $account_check_response == *"errCode": 0* ]]; then
echo "Account check successful."
else
echo "Account check failed."
fi
else
echo "${bold}${red}"
echo "$(openim::test::get_caller): FAIL!"
echo "Describe ${resource} ${object}"
echo " Expected Match: ${match}"
echo " Not found in:"
echo "${result}"
echo "${reset}${red}"
caller
echo "${reset}"
return 1
echo "Failed to acquire token."
fi
done
echo -n "${green}"
echo "$(openim::test::get_caller): Successful describe ${resource} ${object}:"
echo "${result}"
echo -n "${reset}"
return 0
else
echo "User registration failed."
fi
}
openim::test::describe_object_events_assert() {
local resource=$1
local object=$2
local showevents=${3:-"true"}
# shellcheck disable=SC2086
# Disabling to allow for expansion here
if [[ -z "${3:-}" ]]; then
result=$(kubectl describe "${kube_flags[@]}" ${resource} ${object})
else
result=$(kubectl describe "${kube_flags[@]}" "--show-events=${showevents}" ${resource} ${object})
fi
openim::test::secret()
{
token="-HAuthorization: Bearer $(openim::test::login)"
if grep -q "No events.\|Events:" <<< "${result}"; then
local has_events="true"
else
local has_events="false"
fi
if [[ "${showevents}" == "${has_events}" ]]; then
echo -n "${green}"
echo "$(openim::test::get_caller): Successful describe"
echo "${result}"
echo "${reset}"
return 0
else
echo "${bold}${red}"
echo "$(openim::test::get_caller): FAIL"
if [[ "${showevents}" == "false" ]]; then
echo " Events information should not be described in:"
else
echo " Events information not found in:"
fi
echo "${result}"
echo "${reset}${red}"
caller
echo "${reset}"
return 1
fi
}
# 1. 如果有 secret0 密钥先清空
${DCURL} "${token}" http://${INSECURE_OPENIMAPI}/v1/secrets/secret0; echo
openim::test::describe_resource_assert() {
local resource=$1
local matches=( "${@:2}" )
# 2. 创建 secret0 密钥
${CCURL} "${Header}" "${token}" http://${INSECURE_OPENIMAPI}/v1/secrets \
-d'{"metadata":{"name":"secret0"},"expires":0,"description":"admin secret"}'; echo
# shellcheck disable=SC2086
# Disabling to allow for expansion here
result=$(kubectl describe "${kube_flags[@]}" ${resource})
# 3. 列出所有密钥
${RCURL} "${token}" http://${INSECURE_OPENIMAPI}/v1/secrets; echo
for match in "${matches[@]}"; do
if grep -q "${match}" <<< "${result}"; then
echo "matched ${match}"
else
echo "${bold}${red}"
echo "FAIL!"
echo "Describe ${resource}"
echo " Expected Match: ${match}"
echo " Not found in:"
echo "${result}"
echo "${reset}${red}"
caller
echo "${reset}"
return 1
fi
done
# 4. 获取 secret0 密钥的详细信息
${RCURL} "${token}" http://${INSECURE_OPENIMAPI}/v1/secrets/secret0; echo
# 5. 修改 secret0 密钥
${UCURL} "${Header}" "${token}" http://${INSECURE_OPENIMAPI}/v1/secrets/secret0 \
-d'{"expires":0,"description":"admin secret(modified)"}'; echo
echo -n "${green}"
echo "Successful describe ${resource}:"
echo "${result}"
echo -n "${reset}"
return 0
# 6. 删除 secret0 密钥
${DCURL} "${token}" http://${INSECURE_OPENIMAPI}/v1/secrets/secret0; echo
openim::log::info "$(echo -e '\033[32mcongratulations, /v1/secret test passed!\033[0m')"
}
openim::test::describe_resource_events_assert() {
local resource=$1
local showevents=${2:-"true"}
openim::test::policy()
{
token="-HAuthorization: Bearer $(openim::test::login)"
# shellcheck disable=SC2086
# Disabling to allow for expansion here
result=$(kubectl describe "${kube_flags[@]}" "--show-events=${showevents}" ${resource})
# 1. 如果有 policy0 策略先清空
${DCURL} "${token}" http://${INSECURE_OPENIMAPI}/v1/policies/policy0; echo
if grep -q "No events.\|Events:" <<< "${result}"; then
local has_events="true"
else
local has_events="false"
fi
if [[ "${showevents}" == "${has_events}" ]]; then
echo -n "${green}"
echo "Successful describe"
echo "${result}"
echo -n "${reset}"
return 0
else
echo "${bold}${red}"
echo "FAIL"
if [[ "${showevents}" == "false" ]]; then
echo " Events information should not be described in:"
else
echo " Events information not found in:"
fi
echo "${result}"
caller
echo "${reset}"
return 1
fi
}
# 2. 创建 policy0 策略
${CCURL} "${Header}" "${token}" http://${INSECURE_OPENIMAPI}/v1/policies \
-d'{"metadata":{"name":"policy0"},"policy":{"description":"One policy to rule them all.","subjects":["users:<peter|ken>","users:maria","groups:admins"],"actions":["delete","<create|update>"],"effect":"allow","resources":["resources:articles:<.*>","resources:printer"],"conditions":{"remoteIPAddress":{"type":"CIDRCondition","options":{"cidr":"192.168.0.1/16"}}}}}'; echo
openim::test::describe_resource_chunk_size_assert() {
# $1: the target resource
local resource=$1
# $2: comma-separated list of additional resources that will be listed
local additionalResources=${2:-}
# Remaining args are flags to pass to kubectl
local args=${3:-}
# Expect list requests for the target resource and the additional resources
local expectLists
IFS="," read -r -a expectLists <<< "${resource},${additionalResources}"
# shellcheck disable=SC2086
# Disabling to allow for expansion here
defaultResult=$(kubectl describe ${resource} --show-events=true -v=6 ${args} "${kube_flags[@]}" 2>&1 >/dev/null)
for r in "${expectLists[@]}"; do
if grep -q "${r}?.*limit=500" <<< "${defaultResult}"; then
echo "query for ${r} had limit param"
else
echo "${bold}${red}"
echo "FAIL!"
echo "Describe ${resource}"
echo " Expected limit param on request for: ${r}"
echo " Not found in:"
echo "${defaultResult}"
echo "${reset}${red}"
caller
echo "${reset}"
return 1
fi
done
# shellcheck disable=SC2086
# Disabling to allow for expansion here
# Try a non-default chunk size
customResult=$(kubectl describe ${resource} --show-events=false --chunk-size=10 -v=6 ${args} "${kube_flags[@]}" 2>&1 >/dev/null)
if grep -q "${resource}?limit=10" <<< "${customResult}"; then
echo "query for ${resource} had user-specified limit param"
else
echo "${bold}${red}"
echo "FAIL!"
echo "Describe ${resource}"
echo " Expected limit param on request for: ${r}"
echo " Not found in:"
echo "${customResult}"
echo "${reset}${red}"
caller
echo "${reset}"
return 1
fi
# 3. 列出所有策略
${RCURL} "${token}" http://${INSECURE_OPENIMAPI}/v1/policies; echo
echo -n "${green}"
echo "Successful describe ${resource} verbose logs:"
echo "${defaultResult}"
echo -n "${reset}"
# 4. 获取 policy0 策略的详细信息
${RCURL} "${token}" http://${INSECURE_OPENIMAPI}/v1/policies/policy0; echo
return 0
}
# 5. 修改 policy0 策略
${UCURL} "${Header}" "${token}" http://${INSECURE_OPENIMAPI}/v1/policies/policy0 \
-d'{"policy":{"description":"One policy to rule them all(modified).","subjects":["users:<peter|ken>","users:maria","groups:admins"],"actions":["delete","<create|update>"],"effect":"allow","resources":["resources:articles:<.*>","resources:printer"],"conditions":{"remoteIPAddress":{"type":"CIDRCondition","options":{"cidr":"192.168.0.1/16"}}}}}'; echo
# Compare sort-by resource name output (first column, skipping first line) with expected order specify in the last parameter
openim::test::if_sort_by_has_correct_order() {
local var
var="$(echo "$1" | awk '{if(NR!=1) print $1}' | tr '\n' ':')"
openim::test::if_has_string "${var}" "${@:$#}"
# 6. 删除 policy0 策略
${DCURL} "${token}" http://${INSECURE_OPENIMAPI}/v1/policies/policy0; echo
openim::log::info "$(echo -e '\033[32mcongratulations, /v1/policy test passed!\033[0m')"
}
openim::test::if_has_string() {
local message=$1
local match=$2
if grep -q "${match}" <<< "${message}"; then
echo -n "${green}"
echo "Successful"
echo -n "${reset}"
echo "message:${message}"
echo "has:${match}"
return 0
else
echo -n "${bold}${red}"
echo "FAIL!"
echo -n "${reset}"
echo "message:${message}"
echo "has not:${match}"
caller
return 1
fi
openim::test::apiserver()
{
openim::test::user
openim::test::secret
openim::test::policy
openim::log::info "$(echo -e '\033[32mcongratulations, openim-apiserver test passed!\033[0m')"
}
openim::test::if_has_not_string() {
local message=$1
local match=$2
if grep -q "${match}" <<< "${message}"; then
echo -n "${bold}${red}"
echo "FAIL!"
echo -n "${reset}"
echo "message:${message}"
echo "has:${match}"
caller
return 1
else
echo -n "${green}"
echo "Successful"
echo -n "${reset}"
echo "message:${message}"
echo "has not:${match}"
return 0
fi
}
openim::test::authz()
{
token="-HAuthorization: Bearer $(openim::test::login)"
# 1. 如果有 authzpolicy 策略先清空
${DCURL} "${token}" http://${INSECURE_OPENIMAPI}/v1/policies/authzpolicy; echo
# 2. 创建 authzpolicy 策略
${CCURL} "${Header}" "${token}" http://${INSECURE_OPENIMAPI}/v1/policies \
-d'{"metadata":{"name":"authzpolicy"},"policy":{"description":"One policy to rule them all.","subjects":["users:<peter|ken>","users:maria","groups:admins"],"actions":["delete","<create|update>"],"effect":"allow","resources":["resources:articles:<.*>","resources:printer"],"conditions":{"remoteIPAddress":{"type":"CIDRCondition","options":{"cidr":"192.168.0.1/16"}}}}}'; echo
# 3. 如果有 authzsecret 密钥先清空
${DCURL} "${token}" http://${INSECURE_OPENIMAPI}/v1/secrets/authzsecret; echo
# 4. 创建 authzsecret 密钥
secret=$(${CCURL} "${Header}" "${token}" http://${INSECURE_OPENIMAPI}/v1/secrets -d'{"metadata":{"name":"authzsecret"},"expires":0,"description":"admin secret"}')
secretID=$(echo ${secret} | grep -Po 'secretID[" :]+\K[^"]+')
secretKey=$(echo ${secret} | grep -Po 'secretKey[" :]+\K[^"]+')
# 5. 生成 token
token=$(iamctl jwt sign ${secretID} ${secretKey})
openim::test::if_empty_string() {
local match=$1
if [ -n "${match}" ]; then
echo -n "${bold}${red}"
echo "FAIL!"
echo "${match} is not empty"
echo -n "${reset}"
caller
# 6. 调用 /v1/authz 完成资源授权。
# 注意这里要 sleep 3s 等待 openim-authz-server 将新建的密钥同步到其内存中
echo "wait 3s to allow openim-authz-server to sync information into its memory ..."
sleep 3
ret=`$CCURL "${Header}" -H"Authorization: Bearer ${token}" http://${INSECURE_OPENIMAUTO}/v1/authz \
-d'{"subject":"users:maria","action":"delete","resource":"resources:articles:ladon-introduction","context":{"remoteIPAddress":"192.168.0.5"}}' | grep -Po 'allowed[" :]+\K\w+'`
if [ "$ret" != "true" ];then
return 1
else
echo -n "${green}"
echo "Successful"
echo -n "${reset}"
return 0
fi
}
# Returns true if the required resource is part of supported resources.
# Expects env vars:
# SUPPORTED_RESOURCES: Array of all resources supported by the apiserver. "*"
# means it supports all resources. For ex: ("*") or ("rc" "*") both mean that
# all resources are supported.
# $1: Name of the resource to be tested.
openim::test::if_supports_resource() {
SUPPORTED_RESOURCES=${SUPPORTED_RESOURCES:-""}
REQUIRED_RESOURCE=${1:-""}
for r in "${SUPPORTED_RESOURCES[@]}"; do
if [[ "${r}" == "*" || "${r}" == "${REQUIRED_RESOURCE}" ]]; then
return 0
fi
done
return 1
openim::log::info "$(echo -e '\033[32mcongratulations, /v1/authz test passed!\033[0m')"
}
openim::test::version::object_to_file() {
name=$1
flags=${2:-""}
file=$3
# shellcheck disable=SC2086
# Disabling because "flags" needs to allow for expansion here
kubectl version ${flags} | grep "${name} Version:" | sed -e s/"${name} Version: "/""/g > "${file}"
openim::test::authzserver()
{
openim::test::authz
openim::log::info "$(echo -e '\033[32mcongratulations, openim-authz-server test passed!\033[0m')"
}
openim::test::version::json_object_to_file() {
flags=$1
file=$2
# shellcheck disable=SC2086
# Disabling because "flags" needs to allow for expansion here
kubectl version ${flags} --output json | sed -e s/' '/''/g -e s/'\"'/''/g -e s/'}'/''/g -e s/'{'/''/g -e s/'clientVersion:'/'clientVersion:,'/ -e s/'serverVersion:'/'serverVersion:,'/ | tr , '\n' > "${file}"
}
openim::test::pump()
{
${RCURL} http://${IAM_PUMP_HOST}:7070/healthz | egrep -q 'status.*ok' || {
openim::log::error "cannot access openim-pump healthz api, openim-pump maybe down"
return 1
}
openim::test::version::json_client_server_object_to_file() {
flags=$1
name=$2
file=$3
# shellcheck disable=SC2086
# Disabling because "flags" needs to allow for expansion here
kubectl version ${flags} --output json | jq -r ".${name}" | sed -e s/'\"'/''/g -e s/'}'/''/g -e s/'{'/''/g -e /^$/d -e s/','/''/g -e s/':'/'='/g > "${file}"
}
openim::test::real_pump_test
openim::test::version::yaml_object_to_file() {
flags=$1
file=$2
# shellcheck disable=SC2086
# Disabling because "flags" needs to allow for expansion here
kubectl version ${flags} --output yaml | sed -e s/' '/''/g -e s/'\"'/''/g -e /^$/d > "${file}"
openim::log::info "$(echo -e '\033[32mcongratulations, openim-pump test passed!\033[0m')"
}
openim::test::version::diff_assert() {
local original=$1
local comparator=${2:-"eq"}
local latest=$3
local diff_msg=${4:-""}
local res=""
if [ ! -f "${original}" ]; then
echo "${bold}${red}"
echo "FAIL! ${diff_msg}"
echo "the file '${original}' does not exit"
echo "${reset}${red}"
caller
echo "${reset}"
return 1
fi
# 使用真实的数据测试 openim-pump 是否正常工作
openim::test::real_pump_test()
{
# 1. 创建访问 openim-authz-server 需要用到的密钥对
iamctl secret create pumptest &>/dev/null
if [ ! -f "${latest}" ]; then
echo "${bold}${red}"
echo "FAIL! ${diff_msg}"
echo "the file '${latest}' does not exit"
echo "${reset}${red}"
caller
echo "${reset}"
return 1
fi
# 2. 使用步骤 1 创建的密钥对生成 JWT Token
authzAccessToken=`iamctl jwt sign njcho8gJQArsq7zr5v1YpG5NcvL0aeuZ38Ti if70HgRgp021iq5ex2l7pfy5XvgtZM3q` # iamctl jwt sign $secretID $secretKey
if [ "${comparator}" == "exact" ]; then
# Skip sorting of file content for exact comparison.
cp "${original}" "${original}.sorted"
cp "${latest}" "${latest}.sorted"
else
sort "${original}" > "${original}.sorted"
sort "${latest}" > "${latest}.sorted"
fi
# 3. 创建授权策略
iamctl policy create pumptest '{"metadata":{"name":"policy0"},"policy":{"description":"One policy to rule them all.","subjects":["users:<peter|ken>","users:maria","groups:admins"],"actions":["delete","<create|update>"],"effect":"allow","resources":["resources:articles:<.*>","resources:printer"],"conditions":{"remoteIPAddress":{"type":"CIDRCondition","options":{"cidr":"192.168.0.1/16"}}}}}' &>/dev/null
if [ "${comparator}" == "eq" ] || [ "${comparator}" == "exact" ]; then
if [ "$(diff -iwB "${original}".sorted "${latest}".sorted)" == "" ] ; then
echo -n "${green}"
echo "Successful: ${diff_msg}"
echo -n "${reset}"
return 0
else
echo "${bold}${red}"
echo "FAIL! ${diff_msg}"
echo " Expected: "
cat "${original}"
echo " Got: "
cat "${latest}"
echo "${reset}${red}"
caller
echo "${reset}"
return 1
fi
else
if [ -n "$(diff -iwB "${original}".sorted "${latest}".sorted)" ] ; then
echo -n "${green}"
echo "Successful: ${diff_msg}"
echo -n "${reset}"
return 0
else
echo "${bold}${red}"
echo "FAIL! ${diff_msg}"
echo " Expected: "
cat "${original}"
echo " Got: "
cat "${latest}"
echo "${reset}${red}"
caller
echo "${reset}"
return 1
fi
fi
# 注意这里要 sleep 3s 等待 openim-authz-server 将新建的密钥和授权策略同步到其内存中
echo "wait 3s to allow openim-authz-server to sync information into its memory ..."
sleep 3
# 4. 访问 /v1/authz 接口进行资源授权
$CCURL "${Header}" -H"Authorization: Bearer ${token}" http://${INSECURE_OPENIMAUTO}/v1/authz \
-d'{"subject":"users:maria","action":"delete","resource":"resources:articles:ladon-introduction","context":{"remoteIPAddress":"192.168.0.5"}}' &>/dev/null
# 这里要 sleep 5s等待 openim-pump 将 Redis 中的日志,分析并转存到 MongoDB 中
echo "wait 10s to allow openim-pump analyze and dump authorization log into MongoDB ..."
sleep 10
# 5. 查看 MongoDB 中是否有经过解析后的授权日志。
echo "db.iam_analytics.find()" | mongosh --quiet "${IAM_PUMP_MONGO_URL}" | grep -q "allow access" || {
openim::log::error "cannot find analyzed authorization log in MongoDB"
return 1
}
}
# Force exact match of kubectl stdout, stderr, and return code.
# $1: file with actual stdout
# $2: file with actual stderr
# $3: the actual return code
# $4: file with expected stdout
# $5: file with expected stderr
# $6: expected return code
# $7: additional message describing the invocation
openim::test::results::diff() {
local actualstdout=$1
local actualstderr=$2
local actualcode=$3
local expectedstdout=$4
local expectedstderr=$5
local expectedcode=$6
local message=$7
local result=0
if ! openim::test::version::diff_assert "${expectedstdout}" "exact" "${actualstdout}" "stdout for ${message}"; then
result=1
fi
if ! openim::test::version::diff_assert "${expectedstderr}" "exact" "${actualstderr}" "stderr for ${message}"; then
result=1
fi
if [ "${actualcode}" -ne "${expectedcode}" ]; then
echo "${bold}${red}"
echo "$(openim::test::get_caller): FAIL!"
echo "Return code for ${message}"
echo " Expected: ${expectedcode}"
echo " Got: ${actualcode}"
echo "${reset}${red}"
caller
echo "${reset}"
result=1
fi
openim::test::watcher()
{
${RCURL} http://${IAM_WATCHER_HOST}:5050/healthz | egrep -q 'status.*ok' || {
openim::log::error "cannot access openim-watcher healthz api, openim-watcher maybe down"
return 1
}
openim::log::info "$(echo -e '\033[32mcongratulations, openim-watcher test passed!\033[0m')"
}
if [ "${result}" -eq 0 ]; then
echo -n "${green}"
echo "$(openim::test::get_caller): Successful: ${message}"
echo -n "${reset}"
fi
openim::test::iamctl()
{
iamctl user list | egrep -q admin || {
openim::log::error "iamctl cannot list users from openim-apiserver"
return 1
}
openim::log::info "$(echo -e '\033[32mcongratulations, iamctl test passed!\033[0m')"
}
openim::test::man()
{
man openim-apiserver | grep -q 'OPENIM API Server' || {
openim::log::error "openim man page not installed or may not installed properly"
return 1
}
openim::log::info "$(echo -e '\033[32mcongratulations, man test passed!\033[0m')"
}
# OpenIM Smoke Test
openim::test::smoke()
{
openim::test::apiserver
openim::test::authzserver
openim::test::pump
openim::test::watcher
openim::test::iamctl
openim::log::info "$(echo -e '\033[32mcongratulations, smoke test passed!\033[0m')"
}
# OpenIM Test
openim::test::test()
{
openim::test::smoke
openim::test::man
return "$result"
openim::log::info "$(echo -e '\033[32mcongratulations, all test passed!\033[0m')"
}
if [[ "$*" =~ openim::test:: ]];then
eval $*
fi

@ -102,27 +102,24 @@ endif
# The OS can be linux/windows/darwin when building binaries
PLATFORMS ?= linux_s390x linux_mips64 linux_mips64le darwin_amd64 windows_amd64 linux_amd64 linux_arm64 linux_ppc64le # wasip1_wasm
# only support linux
GOOS=linux
# set a specific PLATFORM, defaults to the host platform
ifeq ($(origin PLATFORM), undefined)
ifeq ($(origin GOARCH), undefined)
GOARCH := $(shell go env GOARCH)
endif
ifeq ($(origin GOARCH), undefined)
GOARCH := $(shell go env GOARCH)
endif
# Determine the host OS
GOOS := $(shell go env GOOS)
PLATFORM := $(GOOS)_$(GOARCH)
# Use linux as the default OS when building images
IMAGE_PLAT := linux_$(GOARCH)
# Use the host OS and GOARCH as the default when building images
IMAGE_PLAT := $(PLATFORM)
else
# such as: PLATFORM = linux_amd64
# Parse the PLATFORM variable
GOOS := $(word 1, $(subst _, ,$(PLATFORM)))
GOARCH := $(word 2, $(subst _, ,$(PLATFORM)))
IMAGE_PLAT := $(PLATFORM)
endif
# Protobuf file storage path
APIROOT=$(ROOT_DIR)/pkg/proto

@ -8,11 +8,11 @@ If you encounter any problems during its usage, please create an issue in the [G
**Here are some ways to get involved with the OpenIM community:**
📢 **Slack Channel**: Join our Slack channels for discussions, communication, and support. Click [here](https://join.slack.com/t/openimsdk/shared_invite/zt-22720d66b-o_FvKxMTGXtcnnnHiMqe9Q) to join the Open-IM-Server Slack team channel.
📢 **Slack Channel**: Join our Slack channels for discussions, communication, and support. Click [here](https://openimsdk.slack.com) to join the Open-IM-Server Slack team channel.
📧 **Gmail Contact**: If you have any questions, suggestions, or feedback for our open-source projects, please feel free to [contact us via email](https://mail.google.com/mail/?view=cm&fs=1&tf=1&to=winxu81@gmail.com).
📧 **Gmail Contact**: If you have any questions, suggestions, or feedback for our open-source projects, please feel free to [contact us via email](https://mail.google.com/mail/?view=cm&fs=1&tf=1&to=info@openim.io).
📖 **Blog**: Stay up-to-date with OpenIM-Server projects and trends by reading our [blog](https://doc.rentsoft.cn/). We share the latest developments, tech trends, and other interesting information related to OpenIM.
📖 **Blog**: Stay up-to-date with OpenIM-Server projects and trends by reading our [blog](https://openim.io/). We share the latest developments, tech trends, and other interesting information related to OpenIM.
📱 **WeChat**: Add us on WeChat (QR Code) and indicate that you are a user or developer of Open-IM-Server. We'll process your request as soon as possible.

@ -21,7 +21,7 @@ import (
)
func main() {
rawJWT := `eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJpYW0uYXV0aHoubWFybW90ZWR1LmNvbSIsImV4cCI6MTYwNDEyODQwMywiaWF0IjoxNjA0MTI4NDAyLCJpc3MiOiJpYW1jdGwiLCJraWQiOiJpZDEifQ.Itr5u4C-nTeA01qbjjl7RzuPD-aSQazsJZY_Z25aGnI`
rawJWT := `eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJVc2VySUQiOiI4MjkzODEzMTgzIiwiUGxhdGZvcm1JRCI6NSwiZXhwIjoxNzA2NTk0MTU0LCJuYmYiOjE2OTg4MTc4NTQsImlhdCI6MTY5ODgxODE1NH0.QCJHzU07SC6iYBoFO6Zsm61TNDor2D89I4E3zg8HHHU`
// Verify the token
claims := &jwt.MapClaims{}

Loading…
Cancel
Save