Merge remote-tracking branch 'upstream/main' into main

# Conflicts:
#	cmd/openim-api/main.go
#	pkg/common/cmd/api.go
#	pkg/common/cmd/root.go
#	pkg/common/discovery_register/k8s_discovery_register.go
#	pkg/common/startrpc/start.go
pull/1337/head
lin.huang 2 years ago
commit da3f4a7142

@ -1,4 +1,4 @@
<!-- # Ignore files and directories starting with a dot # Ignore files and directories starting with a dot
# Ignore specific files # Ignore specific files
.dockerignore .dockerignore
@ -29,4 +29,4 @@ assets/
components/ components/
# Ignore tools and scripts # Ignore tools and scripts
.github/ --> .github/

@ -19,7 +19,8 @@ on:
jobs: jobs:
cherry-pick: cherry-pick:
name: Cherry Pick name: Cherry Pick
if: github.event.issue.pull_request != '' && contains(github.event.comment.body, '/cherry-pick') && github.event.comment.user.login=='kubbot' # && github.event.comment.user.login=='kubbot'
if: github.event.issue.pull_request != '' && contains(github.event.comment.body, '/cherry-pick')
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout the latest code - name: Checkout the latest code

@ -120,6 +120,15 @@ jobs:
uses: docker/metadata-action@v5.0.0 uses: docker/metadata-action@v5.0.0
with: with:
images: ghcr.io/openimsdk/openim-server images: ghcr.io/openimsdk/openim-server
# generate Docker tags based on the following events/attributes
tags: |
type=schedule
type=ref,event=branch
type=ref,event=pr
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=semver,pattern={{major}}
type=sha
- name: Log in to GitHub Container Registry - name: Log in to GitHub Container Registry
uses: docker/login-action@v3 uses: docker/login-action@v3

@ -114,15 +114,12 @@ jobs:
uses: docker/setup-qemu-action@v3 uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3 uses: docker/setup-buildx-action@v3
# ghcr.io/openimsdk/openim-web:latest
- name: Extract metadata (tags, labels) for Docker
id: meta2
uses: docker/metadata-action@v5.0.0
with: with:
install: true images: ghcr.io/openimsdk/openim-web
- name: Cache Docker layers
uses: actions/cache@v3
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildx-
- name: Log in to GitHub Container Registry - name: Log in to GitHub Container Registry
uses: docker/login-action@v3 uses: docker/login-action@v3
@ -131,38 +128,12 @@ jobs:
username: ${{ github.repository_owner }} username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }} password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata (tags, labels) for Docker openim-web - name: Build and push Docker image
id: meta1
uses: docker/metadata-action@v5.0.0
with:
images: ghcr.io/openimsdk/openim-web
- name: Build and push Docker image for openim-web
uses: docker/build-push-action@v5 uses: docker/build-push-action@v5
with: with:
context: . context: .
file: ./build/images/openim-tools/openim-web/Dockerfile file: ./build/images/openim-tools/openim-web/Dockerfile
platforms: linux/amd64,linux/arm64 platforms: linux/amd64,linux/arm64
push: ${{ github.event_name != 'pull_request' }} push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.meta1.outputs.tags }}
labels: ${{ steps.meta1.outputs.labels }}
cache-from: type=local,src=/tmp/.buildx-cache
cache-to: type=local,dest=/tmp/.buildx-cache
- name: Extract metadata (tags, labels) for Docker openim-web
id: meta2
uses: docker/metadata-action@v5.0.0
with:
images: ghcr.io/openimsdk/component
- name: Build and push Docker image for component
uses: docker/build-push-action@v5
with:
context: .
file: ./build/images/openim-tools/component/Dockerfile
platforms: linux/amd64,linux/arm64
push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.meta2.outputs.tags }} tags: ${{ steps.meta2.outputs.tags }}
labels: ${{ steps.meta2.outputs.labels }} labels: ${{ steps.meta2.outputs.labels }}
cache-from: type=local,src=/tmp/.buildx-cache
cache-to: type=local,dest=/tmp/.buildx-cache

@ -48,7 +48,7 @@ jobs:
fi fi
- name: Push Changes - name: Push Changes
uses: stefanzweifel/git-auto-commit-action@v4 uses: stefanzweifel/git-auto-commit-action@v5
with: with:
commit_message: "Auto Commit CHANGELOG" commit_message: "Auto Commit CHANGELOG"
branch: release-v${VERSION_PARTS[0]}.${VERSION_PARTS[1]} branch: release-v${VERSION_PARTS[0]}.${VERSION_PARTS[1]}
@ -68,7 +68,7 @@ jobs:
fi fi
- name: Push Changes - name: Push Changes
uses: stefanzweifel/git-auto-commit-action@v4 uses: stefanzweifel/git-auto-commit-action@v5
with: with:
commit_message: "Auto Commit CHANGELOG" commit_message: "Auto Commit CHANGELOG"
branch: main branch: main

@ -48,7 +48,7 @@ jobs:
strategy: strategy:
matrix: matrix:
go_version: ["1.18","1.19","1.20","1.21"] go_version: ["1.19","1.20","1.21"]
os: [ubuntu-latest] os: [ubuntu-latest]
steps: steps:
@ -86,7 +86,7 @@ jobs:
run: sudo make clean run: sudo make clean
- name: Push Changes to Main - name: Push Changes to Main
uses: stefanzweifel/git-auto-commit-action@v4 uses: stefanzweifel/git-auto-commit-action@v5
with: with:
commit_message: "cicd: robot automated Change" commit_message: "cicd: robot automated Change"
branch: main branch: main

@ -171,7 +171,7 @@ linters-settings:
# exclude_godoc_examples: false # exclude_godoc_examples: false
funlen: funlen:
lines: 150 lines: 150
statements: 50 statements: 80
gci: gci:
# put imports beginning with prefix after 3rd-party packages; # put imports beginning with prefix after 3rd-party packages;
# only support one prefix # only support one prefix

@ -98,6 +98,11 @@ OpenIM 我们的目标是建立一个顶级的开源社区。我们有一套标
+ [接口标准](https://github.com/openimsdk/open-im-server/blob/main/docs/conversions/interface.md) + [接口标准](https://github.com/openimsdk/open-im-server/blob/main/docs/conversions/interface.md)
+ [OpenIM配置和环境变量设置](https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/environment.md)
> **Note**
> 针对中国的用户,阅读我们的 [Docker 镜像标准](https://github.com/openimsdk/open-im-server/blob/main/docs/conversions/images.md) 以便使用国内 aliyun 的镜像地址。OpenIM 也有针对中国的 gitee 同步仓库,你可以在 [gitee.com](https://gitee.com/openimsdk) 上找到它。
## :link: 链接 ## :link: 链接
+ **[完整文档](https://doc.rentsoft.cn/)** + **[完整文档](https://doc.rentsoft.cn/)**

@ -126,7 +126,7 @@ It is recommended to use Docker Compose for deployment, which can easily and qui
<details> <summary>Compile from Source</summary> <details> <summary>Compile from Source</summary>
Ur need `Go 1.18` or higher version, and `make`. Ur need `Go 1.20` or higher version, and `make`.
```bash ```bash
@ -212,6 +212,8 @@ Before you start, please make sure your changes are in demand. The best for that
- [Interface Standards](https://github.com/openimsdk/open-im-server/blob/main/docs/conversions/api.md) - [Interface Standards](https://github.com/openimsdk/open-im-server/blob/main/docs/conversions/api.md)
- [Log Standards](https://github.com/openimsdk/open-im-server/blob/main/docs/conversions/logging.md) - [Log Standards](https://github.com/openimsdk/open-im-server/blob/main/docs/conversions/logging.md)
- [Error Code Standards](https://github.com/openimsdk/open-im-server/blob/main/docs/conversions/error_code.md) - [Error Code Standards](https://github.com/openimsdk/open-im-server/blob/main/docs/conversions/error_code.md)
- [OpenIM configuration and environment variable Settings](https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/environment.md)
## :busts_in_silhouette: Community ## :busts_in_silhouette: Community

@ -0,0 +1,10 @@
FROM BASE_IMAGE
WORKDIR ${SERVER_WORKDIR}
# Set HTTP proxy
ARG BINARY_NAME
COPY BINARY_NAME ./bin/BINARY_NAME
ENTRYPOINT ["./bin/BINARY_NAME"]

@ -31,20 +31,15 @@ RUN go mod download
COPY . . COPY . .
RUN make clean
RUN make build BINS=openim-api RUN make build BINS=openim-api
RUN cp /openim/openim-server/_output/bin/platforms/$(go env GOOS)/$(go env GOARCH)/openim-api /usr/bin/openim-api
# FROM ghcr.io/openim-sigs/openim-bash-image:latest # FROM ghcr.io/openim-sigs/openim-bash-image:latest
FROM ghcr.io/openim-sigs/openim-bash-image:latest FROM ghcr.io/openim-sigs/openim-bash-image:latest
WORKDIR /openim/openim-server WORKDIR /openim/openim-server
COPY --from=builder /openim/openim-server/_output/bin/platforms /openim/openim-server/_output/bin/platforms COPY --from=builder /usr/bin/openim-api ./bin/openim-api
COPY --from=builder /openim/openim-server/config /openim/openim-server/config
ENV PORT 10002
EXPOSE 10002
RUN mv ${OPENIM_SERVER_BINDIR}/platforms/$(get_os)/$(get_arch)/openim-api /usr/bin/openim-api
ENTRYPOINT ["bash", "-c", "openim-api -c $OPENIM_SERVER_CONFIG_NAME --port $PORT"] ENTRYPOINT ["./bin/openim-api"]

@ -1,8 +0,0 @@
FROM ghcr.io/openim-sigs/openim-bash-image:latest
COPY openim-api /usr/bin/
# nosemgrep: dockerfile.security.missing-user.missing-user
ENTRYPOINT ["/usr/bin/openim-api"]
# nosemgrep: dockerfile.security.missing-user.missing-user
CMD ["--help"]

@ -31,19 +31,15 @@ RUN go mod download
COPY . . COPY . .
RUN go mod download
RUN make clean
RUN make build BINS=openim-cmdutils RUN make build BINS=openim-cmdutils
RUN cp /openim/openim-server/_output/bin/platforms/$(go env GOOS)/$(go env GOARCH)/openim-cmdutils /usr/bin/openim-cmdutils
FROM ghcr.io/openim-sigs/openim-bash-image:latest FROM ghcr.io/openim-sigs/openim-bash-image:latest
WORKDIR /openim/openim-server WORKDIR /openim/openim-server
COPY --from=builder $OPENIM_SERVER_BINDIR/platforms /openim/openim-server/_output/bin/platforms COPY --from=builder /usr/bin/openim-cmdutils ./bin/openim-cmdutils
COPY --from=builder ${SERVER_WORKDIR}/config /openim/openim-server/config
RUN mv ${OPENIM_SERVER_BINDIR}/platforms/$(get_os)/$(get_arch)/openim-cmdutils /usr/bin/openim-cmdutils
ENTRYPOINT ["openim-cmdutils"] ENTRYPOINT ["./bin/openim-cmdutils"]
CMD ["--help"] CMD ["--help"]

@ -31,18 +31,15 @@ RUN go mod download
COPY . . COPY . .
RUN make clean
RUN make build BINS=openim-crontask RUN make build BINS=openim-crontask
RUN cp /openim/openim-server/_output/bin/platforms/$(go env GOOS)/$(go env GOARCH)/openim-crontask /usr/bin/openim-crontask
# FROM ghcr.io/openim-sigs/openim-bash-image:latest
FROM ghcr.io/openim-sigs/openim-bash-image:latest FROM ghcr.io/openim-sigs/openim-bash-image:latest
WORKDIR /openim/openim-server WORKDIR /openim/openim-server
COPY --from=builder /openim/openim-server/_output/bin/platforms /openim/openim-server/_output/bin/platforms COPY --from=builder /usr/bin/openim-crontask ./bin/openim-crontask
COPY --from=builder /openim/openim-server/config /openim/openim-server/config
ENV OPENIM_SERVER_CONFIG_NAME=/openim/openim-server/config
RUN mv ${OPENIM_SERVER_BINDIR}/platforms/$(get_os)/$(get_arch)/openim-crontask /usr/bin/openim-crontask
CMD ["bash", "-c", "openim-crontask -c $OPENIM_SERVER_CONFIG_NAME"] ENTRYPOINT ["./bin/openim-crontask"]

@ -31,22 +31,15 @@ RUN go mod download
COPY . . COPY . .
RUN make clean
RUN make build BINS=openim-msggateway RUN make build BINS=openim-msggateway
RUN cp /openim/openim-server/_output/bin/platforms/$(go env GOOS)/$(go env GOARCH)/openim-msggateway /usr/bin/openim-msggateway
# FROM ghcr.io/openim-sigs/openim-bash-image:latest
FROM ghcr.io/openim-sigs/openim-bash-image:latest FROM ghcr.io/openim-sigs/openim-bash-image:latest
WORKDIR /openim/openim-server WORKDIR /openim/openim-server
COPY --from=builder /openim/openim-server/_output/bin/platforms /openim/openim-server/_output/bin/platforms COPY --from=builder /usr/bin/openim-msggateway ./bin/openim-msggateway
COPY --from=builder /openim/openim-server/config /openim/openim-server/config
ENV PORT 10140 \
WS_PORT 10001
EXPOSE 10140
EXPOSE 10001
RUN mv ${OPENIM_SERVER_BINDIR}/platforms/$(get_os)/$(get_arch)/openim-msggateway /usr/bin/openim-msggateway
CMD ["bash", "-c", "openim-msggateway -c $OPENIM_SERVER_CONFIG_NAME --port $PORT --ws_port $WS_PORT"] ENTRYPOINT ["./bin/openim-msggateway"]

@ -31,16 +31,15 @@ RUN go mod download
COPY . . COPY . .
RUN make clean
RUN make build BINS=openim-msgtransfer RUN make build BINS=openim-msgtransfer
RUN cp /openim/openim-server/_output/bin/platforms/$(go env GOOS)/$(go env GOARCH)/openim-msgtransfer /usr/bin/openim-msgtransfer
# FROM ghcr.io/openim-sigs/openim-bash-image:latest
FROM ghcr.io/openim-sigs/openim-bash-image:latest FROM ghcr.io/openim-sigs/openim-bash-image:latest
WORKDIR /openim/openim-server WORKDIR /openim/openim-server
COPY --from=builder /openim/openim-server/_output/bin/platforms /openim/openim-server/_output/bin/platforms COPY --from=builder /usr/bin/openim-msgtransfer ./bin/openim-msgtransfer
COPY --from=builder /openim/openim-server/config /openim/openim-server/config
RUN mv ${OPENIM_SERVER_BINDIR}/platforms/$(get_os)/$(get_arch)/openim-msgtransfer /usr/bin/openim-msgtransfer
ENTRYPOINT ["bash", "-c", "if [[ -n $PROMETHEUS_PORT ]]; then openim-msgtransfer -c $OPENIM_SERVER_CONFIG_NAME --prometheus_port $PROMETHEUS_PORT; else openim-msgtransfer -c $OPENIM_SERVER_CONFIG_NAME; fi"] ENTRYPOINT ["./bin/openim-msgtransfer"]

@ -15,7 +15,6 @@
# OpenIM base image: https://github.com/openim-sigs/openim-base-image # OpenIM base image: https://github.com/openim-sigs/openim-base-image
# Set go mod installation source and proxy # Set go mod installation source and proxy
# docker run -e "PORT=10003" -e "PROMETHEUSORT=4321" --network host -it 67ef891ad1ff
FROM golang:1.20 AS builder FROM golang:1.20 AS builder
@ -32,21 +31,15 @@ RUN go mod download
COPY . . COPY . .
RUN make clean
RUN make build BINS=openim-push RUN make build BINS=openim-push
RUN cp /openim/openim-server/_output/bin/platforms/$(go env GOOS)/$(go env GOARCH)/openim-push /usr/bin/openim-push
# FROM ghcr.io/openim-sigs/openim-bash-image:latest
FROM ghcr.io/openim-sigs/openim-bash-image:latest FROM ghcr.io/openim-sigs/openim-bash-image:latest
WORKDIR /openim/openim-server WORKDIR /openim/openim-server
COPY --from=builder /openim/openim-server/_output/bin/platforms /openim/openim-server/_output/bin/platforms COPY --from=builder /usr/bin/openim-push ./bin/openim-push
COPY --from=builder /openim/openim-server/config /openim/openim-server/config
ENV PORT 10170 \
PROMETHEUS_PORT 20170
EXPOSE 10170
RUN mv ${OPENIM_SERVER_BINDIR}/platforms/$(get_os)/$(get_arch)/openim-push /usr/bin/openim-push
ENTRYPOINT ["bash", "-c", "if [[ -n $PROMETHEUS_PORT ]]; then openim-push -c $OPENIM_SERVER_CONFIG_NAME --port $PORT --prometheus_port $PROMETHEUS_PORT; else openim-push -c $OPENIM_SERVER_CONFIG_NAME --port $PORT; fi"] ENTRYPOINT ["./bin/openim-push"]

@ -31,22 +31,15 @@ RUN go mod download
COPY . . COPY . .
RUN make clean
RUN make build BINS=openim-rpc-auth RUN make build BINS=openim-rpc-auth
RUN cp /openim/openim-server/_output/bin/platforms/$(go env GOOS)/$(go env GOARCH)/openim-rpc-auth /usr/bin/openim-rpc-auth
# FROM ghcr.io/openim-sigs/openim-bash-image:latest
FROM ghcr.io/openim-sigs/openim-bash-image:latest FROM ghcr.io/openim-sigs/openim-bash-image:latest
WORKDIR /openim/openim-server WORKDIR /openim/openim-server
COPY --from=builder /openim/openim-server/_output/bin/platforms /openim/openim-server/_output/bin/platforms/ COPY --from=builder /usr/bin/openim-rpc-auth ./bin/openim-rpc-auth
COPY --from=builder /openim/openim-server/config /openim/openim-server/config
ENV PORT 10160 \
PROMETHEUS_PORT 20160
EXPOSE 10160
RUN mv ${OPENIM_SERVER_BINDIR}/platforms/$(get_os)/$(get_arch)/openim-rpc-auth /usr/bin/openim-rpc-auth
ENTRYPOINT ["bash", "-c", "if [[ -n $PROMETHEUS_PORT ]]; then "openim-rpc-auth --port $PORT -c $OPENIM_SERVER_CONFIG_NAME" --prometheus_port $PROMETHEUS_PORT; else "openim-rpc-auth --port $PORT -c $OPENIM_SERVER_CONFIG_NAME"; fi"] ENTRYPOINT ["./bin/openim-rpc-auth"]

@ -31,22 +31,15 @@ RUN go mod download
COPY . . COPY . .
RUN make clean
RUN make build BINS=openim-rpc-conversation RUN make build BINS=openim-rpc-conversation
RUN cp /openim/openim-server/_output/bin/platforms/$(go env GOOS)/$(go env GOARCH)/openim-rpc-conversation /usr/bin/openim-rpc-conversation
# FROM ghcr.io/openim-sigs/openim-bash-image:latest
FROM ghcr.io/openim-sigs/openim-bash-image:latest FROM ghcr.io/openim-sigs/openim-bash-image:latest
WORKDIR /openim/openim-server WORKDIR /openim/openim-server
COPY --from=builder /openim/openim-server/_output/bin/platforms /openim/openim-server/_output/bin/platforms/ COPY --from=builder /usr/bin/openim-rpc-conversation ./bin/openim-rpc-conversation
COPY --from=builder /openim/openim-server/config /openim/openim-server/config
ENV PORT 10230 \
PROMETHEUS_PORT 20230
EXPOSE 10230
RUN mv ${OPENIM_SERVER_BINDIR}/platforms/$(get_os)/$(get_arch)/openim-rpc-conversation /usr/bin/openim-rpc-conversation
ENTRYPOINT ["bash", "-c", "if [[ -n $PROMETHEUS_PORT ]]; then "openim-rpc-conversation --port $PORT -c $OPENIM_SERVER_CONFIG_NAME" --prometheus_port $PROMETHEUS_PORT; else "openim-rpc-conversation --port $PORT -c $OPENIM_SERVER_CONFIG_NAME"; fi"] ENTRYPOINT ["./bin/openim-rpc-conversation"]

@ -31,22 +31,15 @@ RUN go mod download
COPY . . COPY . .
RUN make clean
RUN make build BINS=openim-rpc-friend RUN make build BINS=openim-rpc-friend
RUN cp /openim/openim-server/_output/bin/platforms/$(go env GOOS)/$(go env GOARCH)/openim-rpc-friend /usr/bin/openim-rpc-friend
# FROM ghcr.io/openim-sigs/openim-bash-image:latest
FROM ghcr.io/openim-sigs/openim-bash-image:latest FROM ghcr.io/openim-sigs/openim-bash-image:latest
WORKDIR /openim/openim-server WORKDIR /openim/openim-server
COPY --from=builder /openim/openim-server/_output/bin/platforms /openim/openim-server/_output/bin/platforms/ COPY --from=builder /usr/bin/openim-rpc-friend ./bin/openim-rpc-friend
COPY --from=builder /openim/openim-server/config /openim/openim-server/config
ENV PORT 10120 \
PROMETHEUS_PORT 20120
EXPOSE 10120
RUN mv ${OPENIM_SERVER_BINDIR}/platforms/$(get_os)/$(get_arch)/openim-rpc-friend /usr/bin/openim-rpc-friend
ENTRYPOINT ["bash", "-c", "if [[ -n $PROMETHEUS_PORT ]]; then "openim-rpc-friend --port $PORT -c $OPENIM_SERVER_CONFIG_NAME" --prometheus_port $PROMETHEUS_PORT; else "openim-rpc-friend --port $PORT -c $OPENIM_SERVER_CONFIG_NAME"; fi"] ENTRYPOINT ["./bin/openim-rpc-friend"]

@ -31,22 +31,15 @@ RUN go mod download
COPY . . COPY . .
RUN make clean
RUN make build BINS=openim-rpc-group RUN make build BINS=openim-rpc-group
RUN cp /openim/openim-server/_output/bin/platforms/$(go env GOOS)/$(go env GOARCH)/openim-rpc-group /usr/bin/openim-rpc-group
# FROM ghcr.io/openim-sigs/openim-bash-image:latest
FROM ghcr.io/openim-sigs/openim-bash-image:latest FROM ghcr.io/openim-sigs/openim-bash-image:latest
WORKDIR /openim/openim-server WORKDIR /openim/openim-server
COPY --from=builder /openim/openim-server/_output/bin/platforms /openim/openim-server/_output/bin/platforms/ COPY --from=builder /usr/bin/openim-rpc-group ./bin/openim-rpc-group
COPY --from=builder /openim/openim-server/config /openim/openim-server/config
ENV PORT 10150 \
PROMETHEUS_PORT 20150
EXPOSE 10150
RUN mv ${OPENIM_SERVER_BINDIR}/platforms/$(get_os)/$(get_arch)/openim-rpc-group /usr/bin/openim-rpc-group
ENTRYPOINT ["bash", "-c", "if [[ -n $PROMETHEUS_PORT ]]; then "openim-rpc-group --port $PORT -c $OPENIM_SERVER_CONFIG_NAME" --prometheus_port $PROMETHEUS_PORT; else "openim-rpc-group --port $PORT -c $OPENIM_SERVER_CONFIG_NAME"; fi"] ENTRYPOINT ["./bin/openim-rpc-group"]

@ -31,22 +31,15 @@ RUN go mod download
COPY . . COPY . .
RUN make clean
RUN make build BINS=openim-rpc-msg RUN make build BINS=openim-rpc-msg
RUN cp /openim/openim-server/_output/bin/platforms/$(go env GOOS)/$(go env GOARCH)/openim-rpc-msg /usr/bin/openim-rpc-msg
# FROM ghcr.io/openim-sigs/openim-bash-image:latest
FROM ghcr.io/openim-sigs/openim-bash-image:latest FROM ghcr.io/openim-sigs/openim-bash-image:latest
WORKDIR /openim/openim-server WORKDIR /openim/openim-server
COPY --from=builder /openim/openim-server/_output/bin/platforms /openim/openim-server/_output/bin/platforms/ COPY --from=builder /usr/bin/openim-rpc-msg ./bin/openim-rpc-msg
COPY --from=builder /openim/openim-server/config /openim/openim-server/config
ENV PORT 10130 \
PROMETHEUS_PORT 20130
EXPOSE 10130
RUN mv ${OPENIM_SERVER_BINDIR}/platforms/$(get_os)/$(get_arch)/openim-rpc-msg /usr/bin/openim-rpc-msg
ENTRYPOINT ["bash", "-c", "if [[ -n $PROMETHEUS_PORT ]]; then "openim-rpc-msg --port $PORT -c $OPENIM_SERVER_CONFIG_NAME" --prometheus_port $PROMETHEUS_PORT; else "openim-rpc-msg --port $PORT -c $OPENIM_SERVER_CONFIG_NAME"; fi"] ENTRYPOINT ["./bin/openim-rpc-msg"]

@ -31,23 +31,15 @@ RUN go mod download
COPY . . COPY . .
RUN make clean
RUN make build BINS=openim-rpc-third RUN make build BINS=openim-rpc-third
RUN cp /openim/openim-server/_output/bin/platforms/$(go env GOOS)/$(go env GOARCH)/openim-rpc-third /usr/bin/openim-rpc-third
# FROM ghcr.io/openim-sigs/openim-bash-image:latest
FROM ghcr.io/openim-sigs/openim-bash-image:latest FROM ghcr.io/openim-sigs/openim-bash-image:latest
WORKDIR /openim/openim-server WORKDIR /openim/openim-server
COPY --from=builder /openim/openim-server/_output/bin/platforms /openim/openim-server/_output/bin/platforms/ COPY --from=builder /usr/bin/openim-rpc-third ./bin/openim-rpc-third
COPY --from=builder /openim/openim-server/config /openim/openim-server/config
ENV PORT 10190 \
PROMETHEUS_PORT 21301
EXPOSE 10190
RUN mv ${OPENIM_SERVER_BINDIR}/platforms/$(get_os)/$(get_arch)/openim-rpc-third /usr/bin/openim-rpc-third
ENTRYPOINT ["bash", "-c", "if [[ -n $PROMETHEUS_PORT ]]; then "openim-rpc-third --port $PORT -c $OPENIM_SERVER_CONFIG_NAME" --prometheus_port $PROMETHEUS_PORT; else "openim-rpc-third --port $PORT -c $OPENIM_SERVER_CONFIG_NAME"; fi"] ENTRYPOINT ["./bin/openim-rpc-third"]

@ -31,22 +31,15 @@ RUN go mod download
COPY . . COPY . .
RUN make clean
RUN make build BINS=openim-rpc-user RUN make build BINS=openim-rpc-user
RUN cp /openim/openim-server/_output/bin/platforms/$(go env GOOS)/$(go env GOARCH)/openim-rpc-user /usr/bin/openim-rpc-user
# FROM ghcr.io/openim-sigs/openim-bash-image:latest
FROM ghcr.io/openim-sigs/openim-bash-image:latest FROM ghcr.io/openim-sigs/openim-bash-image:latest
WORKDIR /openim/openim-server WORKDIR /openim/openim-server
COPY --from=builder /openim/openim-server/_output/bin/platforms /openim/openim-server/_output/bin/platforms/ COPY --from=builder /usr/bin/openim-rpc-user ./bin/openim-rpc-user
COPY --from=builder /openim/openim-server/config /openim/openim-server/config
ENV PORT 10110 \
PROMETHEUS_PORT 20110
EXPOSE 10110
RUN mv ${OPENIM_SERVER_BINDIR}/platforms/$(get_os)/$(get_arch)/openim-rpc-user /usr/bin/openim-rpc-user
ENTRYPOINT ["bash", "-c", "if [[ -n $PROMETHEUS_PORT ]]; then "openim-rpc-user --port $PORT -c $OPENIM_SERVER_CONFIG_NAME" --prometheus_port $PROMETHEUS_PORT; else "openim-rpc-user --port $PORT -c $OPENIM_SERVER_CONFIG_NAME"; fi"] ENTRYPOINT ["./bin/openim-rpc-user"]

@ -17,15 +17,15 @@ package main
import ( import (
"context" "context"
"fmt" "fmt"
"github.com/openimsdk/open-im-server/v3/pkg/common/discovery_register"
"net" "net"
_ "net/http/pprof" _ "net/http/pprof"
"strconv" "strconv"
"github.com/openimsdk/open-im-server/v3/pkg/common/discovery_register"
"github.com/OpenIMSDK/protocol/constant" "github.com/OpenIMSDK/protocol/constant"
"github.com/OpenIMSDK/tools/discoveryregistry" "github.com/OpenIMSDK/tools/discoveryregistry"
"github.com/OpenIMSDK/tools/log" "github.com/OpenIMSDK/tools/log"
"github.com/openimsdk/open-im-server/v3/internal/api" "github.com/openimsdk/open-im-server/v3/internal/api"
"github.com/openimsdk/open-im-server/v3/pkg/common/cmd" "github.com/openimsdk/open-im-server/v3/pkg/common/cmd"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
@ -37,49 +37,59 @@ func main() {
apiCmd.AddPortFlag() apiCmd.AddPortFlag()
apiCmd.AddApi(run) apiCmd.AddApi(run)
if err := apiCmd.Execute(); err != nil { if err := apiCmd.Execute(); err != nil {
log.ZError(context.Background(), "API command execution failed", err)
panic(err.Error()) panic(err.Error())
} }
} }
func run(port int) error { func run(port int) error {
fmt.Println("*****openimapi port:", port) log.ZInfo(context.Background(), "Openim api port:", "port", port)
if port == 0 { if port == 0 {
return fmt.Errorf("port is empty") err := "port is empty"
log.ZError(context.Background(), err, nil)
return fmt.Errorf(err)
} }
rdb, err := cache.NewRedis() rdb, err := cache.NewRedis()
if err != nil { if err != nil {
log.ZError(context.Background(), "Failed to initialize Redis", err)
return err return err
} }
fmt.Println("api start init discov client") log.ZInfo(context.Background(), "api start init discov client")
var client discoveryregistry.SvcDiscoveryRegistry var client discoveryregistry.SvcDiscoveryRegistry
// Determine whether zk is passed according to whether it is a clustered deployment
client, err = discovery_register.NewDiscoveryRegister(config.Config.Envs.Discovery) client, err = discovery_register.NewDiscoveryRegister(config.Config.Envs.Discovery)
/*
client, err = openkeeper.NewClient(config.Config.Zookeeper.ZkAddr, config.Config.Zookeeper.Schema,
openkeeper.WithFreq(time.Hour), openkeeper.WithUserNameAndPassword(
config.Config.Zookeeper.Username,
config.Config.Zookeeper.Password,
), openkeeper.WithRoundRobin(), openkeeper.WithTimeout(10), openkeeper.WithLogger(log.NewZkLogger()))*/
if err != nil { if err != nil {
log.ZError(context.Background(), "Failed to initialize discovery register", err)
return err return err
} }
if err = client.CreateRpcRootNodes(config.Config.GetServiceNames()); err != nil { if err = client.CreateRpcRootNodes(config.Config.GetServiceNames()); err != nil {
log.ZError(context.Background(), "Failed to create RPC root nodes", err)
return err return err
} }
fmt.Println("api register public config to discov") log.ZInfo(context.Background(), "api register public config to discov")
if err = client.RegisterConf2Registry(constant.OpenIMCommonConfigKey, config.Config.EncodeConfig()); err != nil { if err = client.RegisterConf2Registry(constant.OpenIMCommonConfigKey, config.Config.EncodeConfig()); err != nil {
log.ZError(context.Background(), "Failed to register public config to discov", err)
return err return err
} }
fmt.Println("api register public config to discov success") log.ZInfo(context.Background(), "api register public config to discov success")
router := api.NewGinRouter(client, rdb) router := api.NewGinRouter(client, rdb)
fmt.Println("api init router success") log.ZInfo(context.Background(), "api init router success")
var address string var address string
if config.Config.Api.ListenIP != "" { if config.Config.Api.ListenIP != "" {
address = net.JoinHostPort(config.Config.Api.ListenIP, strconv.Itoa(port)) address = net.JoinHostPort(config.Config.Api.ListenIP, strconv.Itoa(port))
} else { } else {
address = net.JoinHostPort("0.0.0.0", strconv.Itoa(port)) address = net.JoinHostPort("0.0.0.0", strconv.Itoa(port))
} }
fmt.Println("start api server, address: ", address, ", OpenIM version: ", config.Version) log.ZInfo(context.Background(), "start api server", "address", address, "OpenIM version", config.Version)
log.ZInfo(context.Background(), "start server success", "address", address, "version", config.Version)
err = router.Run(address) err = router.Run(address)
if err != nil { if err != nil {
log.ZError(context.Background(), "api run failed", err, "address", address) log.ZError(context.Background(), "api run failed", err, "address", address)

@ -80,6 +80,12 @@ $ sudo sealos run labring/kubernetes:v1.25.0 labring/helm:v3.8.2 labring/calico:
-p "$CLUSTER_PASSWORD" -p "$CLUSTER_PASSWORD"
``` ```
> **Node**
> 卸载的方式:使用 `kubeadm` 卸载并不会清除 `etcd``cni` 相关,所以需要手动清除,或者使用 `sealos` 卸载。
> ```bash
> sealos reset
> ```
### 安装 helm ### 安装 helm
helm通过打包的方式支持发布的版本管理和控制很大程度上简化了Kubernetes应用的部署和管理。 helm通过打包的方式支持发布的版本管理和控制很大程度上简化了Kubernetes应用的部署和管理。
@ -97,11 +103,17 @@ $ curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bas
$ helm repo add brigade https://openimsdk.github.io/openim-charts $ helm repo add brigade https://openimsdk.github.io/openim-charts
``` ```
### OpenIM 的镜像策略
### 容器化安装 自动化提供的 aliyun, ghcr, docker hub: https://github.com/openimsdk/open-im-server/blob/main/docs/conversions/images.md
具体安装步骤如下: **本地化测试构建方法:**
```bash
$ make image
```
### 容器化安装
### Helm安装 具体安装步骤如下:

@ -1,23 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

@ -1,38 +0,0 @@
# Copyright © 2023 OpenIM. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v2
name: openim-chat
description: A Helm chart for Kubernetes
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "1.16.0"

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

@ -1,22 +0,0 @@
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range $host := .Values.ingress.hosts }}
{{- range .paths }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "openim-chat.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "openim-chat.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "openim-chat.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "openim-chat.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
{{- end }}

@ -1,62 +0,0 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "openim-chat.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "openim-chat.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "openim-chat.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "openim-chat.labels" -}}
helm.sh/chart: {{ include "openim-chat.chart" . }}
{{ include "openim-chat.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "openim-chat.selectorLabels" -}}
app.kubernetes.io/name: {{ include "openim-chat.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "openim-chat.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "openim-chat.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}

@ -1,75 +0,0 @@
# Copyright © 2023 OpenIM. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "openim-chat.fullname" . }}
labels:
{{- include "openim-chat.labels" . | nindent 4 }}
spec:
{{- if not .Values.autoscaling.enabled }}
replicas: {{ .Values.replicaCount }}
{{- end }}
selector:
matchLabels:
{{- include "openim-chat.selectorLabels" . | nindent 6 }}
template:
metadata:
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "openim-chat.selectorLabels" . | nindent 8 }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "openim-chat.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: 80
protocol: TCP
livenessProbe:
httpGet:
path: /
port: http
readinessProbe:
httpGet:
path: /
port: http
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

@ -1,42 +0,0 @@
# Copyright © 2023 OpenIM. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
{{- if .Values.autoscaling.enabled }}
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "openim-chat.fullname" . }}
labels:
{{- include "openim-chat.labels" . | nindent 4 }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "openim-chat.fullname" . }}
minReplicas: {{ .Values.autoscaling.minReplicas }}
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
metrics:
{{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
- type: Resource
resource:
name: cpu
targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
{{- end }}
{{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
- type: Resource
resource:
name: memory
targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
{{- end }}
{{- end }}

@ -1,75 +0,0 @@
# Copyright © 2023 OpenIM. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "openim-chat.fullname" . -}}
{{- $svcPort := .Values.service.port -}}
{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }}
{{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }}
{{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}}
{{- end }}
{{- end }}
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1beta1
{{- else -}}
apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
{{- include "openim-chat.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
ingressClassName: {{ .Values.ingress.className }}
{{- end }}
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
{{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
pathType: {{ .pathType }}
{{- end }}
backend:
{{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
service:
name: {{ $fullName }}
port:
number: {{ $svcPort }}
{{- else }}
serviceName: {{ $fullName }}
servicePort: {{ $svcPort }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}

@ -1,29 +0,0 @@
# Copyright © 2023 OpenIM. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: Service
metadata:
name: {{ include "openim-chat.fullname" . }}
labels:
{{- include "openim-chat.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
{{- include "openim-chat.selectorLabels" . | nindent 4 }}

@ -1,26 +0,0 @@
# Copyright © 2023 OpenIM. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "openim-chat.serviceAccountName" . }}
labels:
{{- include "openim-chat.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

@ -1,96 +0,0 @@
# Copyright © 2023 OpenIM. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Default values for openim-chat.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: nginx
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
podAnnotations: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 80
ingress:
enabled: false
className: ""
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths:
- path: /
pathType: ImplementationSpecific
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
nodeSelector: {}
tolerations: []
affinity: {}

@ -1,20 +1,19 @@
# OpenIM enviroment # OpenIM ENVIRONMENT CONFIGURATION
<!-- vscode-markdown-toc --> <!-- vscode-markdown-toc -->
* 1. [OpenIM Deployment Guide](#OpenIMDeploymentGuide) * 1. [OpenIM Deployment Guide](#OpenIMDeploymentGuide)
* 1.1. [Deployment Strategies](#DeploymentStrategies) * 1.1. [Deployment Strategies](#DeploymentStrategies)
* 1.2. [Source Code Deployment](#SourceCodeDeployment) * 1.2. [Source Code Deployment](#SourceCodeDeployment)
* 1.3. [Docker Compose Deployment](#DockerComposeDeployment) * 1.3. [Docker Compose Deployment](#DockerComposeDeployment)
* 1.4. [Environment Variable Configuration](#EnvironmentVariableConfiguration) * 1.4. [Environment Variable Configuration](#EnvironmentVariableConfiguration)
* 1.4.1. [[1. Recommended using environment variables:](https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/environment.md#1-recommended-using-environment-variables)](#1.Recommendedusingenvironmentvariables:https:github.comopenimsdkopen-im-serverblobmaindocscontribenvironment.md1-recommended-using-environment-variables) * 1.4.1. [Recommended using environment variables](#Recommendedusingenvironmentvariables)
* 1.4.2. [[Additional Configuration](https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/environment.md#additional-configuration)](#AdditionalConfigurationhttps:github.comopenimsdkopen-im-serverblobmaindocscontribenvironment.mdadditional-configuration) * 1.4.2. [Additional Configuration](#AdditionalConfiguration)
* 1.4.3. [[Security Considerations](https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/environment.md#security-considerations)](#SecurityConsiderationshttps:github.comopenimsdkopen-im-serverblobmaindocscontribenvironment.mdsecurity-considerations) * 1.4.3. [Security Considerations](#SecurityConsiderations)
* 1.4.4. [[Data Management](https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/environment.md#data-management)](#DataManagementhttps:github.comopenimsdkopen-im-serverblobmaindocscontribenvironment.mddata-management) * 1.4.4. [Data Management](#DataManagement)
* 1.4.5. [[Monitoring and Logging](https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/environment.md#monitoring-and-logging)](#MonitoringandLogginghttps:github.comopenimsdkopen-im-serverblobmaindocscontribenvironment.mdmonitoring-and-logging) * 1.4.5. [Monitoring and Logging](#MonitoringandLogging)
* 1.4.6. [[Troubleshooting](https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/environment.md#troubleshooting)](#Troubleshootinghttps:github.comopenimsdkopen-im-serverblobmaindocscontribenvironment.mdtroubleshooting) * 1.4.6. [Troubleshooting](#Troubleshooting)
* 1.4.7. [[Conclusion](https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/environment.md#conclusion)](#Conclusionhttps:github.comopenimsdkopen-im-serverblobmaindocscontribenvironment.mdconclusion) * 1.4.7. [Conclusion](#Conclusion)
* 1.4.8. [[Additional Resources](https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/environment.md#additional-resources)](#AdditionalResourceshttps:github.comopenimsdkopen-im-serverblobmaindocscontribenvironment.mdadditional-resources) * 1.4.8. [Additional Resources](#AdditionalResources)
* 2. [Further Configuration](#FurtherConfiguration) * 2. [Further Configuration](#FurtherConfiguration)
* 2.1. [Image Registry Configuration](#ImageRegistryConfiguration) * 2.1. [Image Registry Configuration](#ImageRegistryConfiguration)
* 2.2. [OpenIM Docker Network Configuration](#OpenIMDockerNetworkConfiguration) * 2.2. [OpenIM Docker Network Configuration](#OpenIMDockerNetworkConfiguration)
@ -39,11 +38,38 @@
* 2.20.1. [General Configuration](#GeneralConfiguration) * 2.20.1. [General Configuration](#GeneralConfiguration)
* 2.20.2. [Service-Specific Prometheus Ports](#Service-SpecificPrometheusPorts) * 2.20.2. [Service-Specific Prometheus Ports](#Service-SpecificPrometheusPorts)
<!-- vscode-markdown-toc-config ## 0. <a name='TableofContents'></a>OpenIM Config File
numbering=true
autoSave=true Ensuring that OpenIM operates smoothly requires clear direction on the configuration file's location. Here's a detailed step-by-step guide on how to provide this essential path to OpenIM:
/vscode-markdown-toc-config -->
<!-- /vscode-markdown-toc --> 1. **Using the Command-line Argument**:
+ **For Configuration Path**: When initializing OpenIM, you can specify the path to the configuration file directly using the `-c` or `--config_folder_path` option.
```bash
_output/bin/platforms/linux/amd64/openim-api --config_folder_path="/your/config/folder/path"
```
+ **For Port Specification**: Similarly, if you wish to designate a particular port, utilize the `-p` option followed by the desired port number.
```bash
_output/bin/platforms/linux/amd64/openim-api -p 1234
```
Note: If the port is not specified here, OpenIM will fetch it from the configuration file. Setting the port via environment variables isn't supported. We recommend consolidating settings in the configuration file for a more consistent and streamlined setup.
2. **Leveraging the Environment Variable**:
You have the flexibility to determine OpenIM's configuration path by setting an `OPENIMCONFIG` environment variable. This method provides a seamless way to instruct OpenIM without command-line parameters every time.
```bash
export OPENIMCONFIG="/path/to/your/config"
```
3. **Relying on the Default Path**:
In scenarios where neither command-line arguments nor environment variables are provided, OpenIM will intuitively revert to the `config/` directory to locate its configuration.
## 1. <a name='OpenIMDeploymentGuide'></a>OpenIM Deployment Guide ## 1. <a name='OpenIMDeploymentGuide'></a>OpenIM Deployment Guide
@ -96,7 +122,7 @@ Setting a variable, e.g., `export CHAT_BRANCH="release-v1.3"`, will prioritize `
For convenience, configuration through modifying environment variables is recommended: For convenience, configuration through modifying environment variables is recommended:
#### 1.4.1. <a name='1.Recommendedusingenvironmentvariables:https:github.comopenimsdkopen-im-serverblobmaindocscontribenvironment.md1-recommended-using-environment-variables'></a>[1. Recommended using environment variables:](https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/environment.md#1-recommended-using-environment-variables) #### 1.4.1. <a name='Recommendedusingenvironmentvariables'></a>Recommended using environment variables
+ PASSWORD + PASSWORD
@ -137,9 +163,9 @@ For convenience, configuration through modifying environment variables is recomm
export DATA_DIR="/data/openim" export DATA_DIR="/data/openim"
``` ```
#### 1.4.2. <a name='AdditionalConfigurationhttps:github.comopenimsdkopen-im-serverblobmaindocscontribenvironment.mdadditional-configuration'></a>[Additional Configuration](https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/environment.md#additional-configuration) #### 1.4.2. <a name='AdditionalConfiguration'></a>Additional Configuration
##### [MinIO Access and Secret Key](https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/environment.md#minio-access-and-secret-key) ##### MinIO Access and Secret Key
To secure your MinIO server, you should set up an access key and secret key. These credentials are used to authenticate requests to your MinIO server. To secure your MinIO server, you should set up an access key and secret key. These credentials are used to authenticate requests to your MinIO server.
@ -148,7 +174,7 @@ export MINIO_ACCESS_KEY="YourAccessKey"
export MINIO_SECRET_KEY="YourSecretKey" export MINIO_SECRET_KEY="YourSecretKey"
``` ```
##### [MinIO Browser](https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/environment.md#minio-browser) ##### MinIO Browser
MinIO comes with an embedded web-based object browser. You can control the availability of the MinIO browser by setting the `MINIO_BROWSER` environment variable. MinIO comes with an embedded web-based object browser. You can control the availability of the MinIO browser by setting the `MINIO_BROWSER` environment variable.
@ -156,9 +182,9 @@ MinIO comes with an embedded web-based object browser. You can control the avail
export MINIO_BROWSER="on" export MINIO_BROWSER="on"
``` ```
#### 1.4.3. <a name='SecurityConsiderationshttps:github.comopenimsdkopen-im-serverblobmaindocscontribenvironment.mdsecurity-considerations'></a>[Security Considerations](https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/environment.md#security-considerations) #### 1.4.3. <a name='SecurityConsiderations'></a>Security Considerations
##### [TLS/SSL Configuration](https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/environment.md#tls-ssl-configuration) ##### TLS/SSL Configuration
For secure communication, it's recommended to enable TLS/SSL for your MinIO server. You can do this by providing the path to the SSL certificate and key files. For secure communication, it's recommended to enable TLS/SSL for your MinIO server. You can do this by providing the path to the SSL certificate and key files.
@ -166,9 +192,9 @@ For secure communication, it's recommended to enable TLS/SSL for your MinIO serv
export MINIO_CERTS_DIR="/path/to/certs/directory" export MINIO_CERTS_DIR="/path/to/certs/directory"
``` ```
#### 1.4.4. <a name='DataManagementhttps:github.comopenimsdkopen-im-serverblobmaindocscontribenvironment.mddata-management'></a>[Data Management](https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/environment.md#data-management) #### 1.4.4. <a name='DataManagement'></a>Data Management
##### [Data Retention Policy](https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/environment.md#data-retention-policy) ##### Data Retention Policy
You may want to set up a data retention policy to automatically delete objects after a specified period. You may want to set up a data retention policy to automatically delete objects after a specified period.
@ -176,7 +202,7 @@ You may want to set up a data retention policy to automatically delete objects a
export MINIO_RETENTION_DAYS="30" export MINIO_RETENTION_DAYS="30"
``` ```
#### 1.4.5. <a name='MonitoringandLogginghttps:github.comopenimsdkopen-im-serverblobmaindocscontribenvironment.mdmonitoring-and-logging'></a>[Monitoring and Logging](https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/environment.md#monitoring-and-logging) #### 1.4.5. <a name='MonitoringandLogging'></a>Monitoring and Logging
##### [Audit Logging](https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/environment.md#audit-logging) ##### [Audit Logging](https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/environment.md#audit-logging)
@ -186,9 +212,9 @@ Enable audit logging to keep track of access and changes to your data.
export MINIO_AUDIT="on" export MINIO_AUDIT="on"
``` ```
#### 1.4.6. <a name='Troubleshootinghttps:github.comopenimsdkopen-im-serverblobmaindocscontribenvironment.mdtroubleshooting'></a>[Troubleshooting](https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/environment.md#troubleshooting) #### 1.4.6. <a name='Troubleshooting'></a>Troubleshooting
##### [Debug Mode](https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/environment.md#debug-mode) ##### Debug Mode
In case of issues, you may enable debug mode to get more detailed logs to assist in troubleshooting. In case of issues, you may enable debug mode to get more detailed logs to assist in troubleshooting.
@ -196,11 +222,11 @@ In case of issues, you may enable debug mode to get more detailed logs to assist
export MINIO_DEBUG="on" export MINIO_DEBUG="on"
``` ```
#### 1.4.7. <a name='Conclusionhttps:github.comopenimsdkopen-im-serverblobmaindocscontribenvironment.mdconclusion'></a>[Conclusion](https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/environment.md#conclusion) #### 1.4.7. <a name='Conclusion'></a>Conclusion
With the environment variables configured as per your requirements, your MinIO server should be ready to securely store and manage your object data. Ensure to verify the setup and monitor the logs for any unusual activities or errors. Regularly update the MinIO server and review your configuration to adapt to any changes or improvements in the MinIO system. With the environment variables configured as per your requirements, your MinIO server should be ready to securely store and manage your object data. Ensure to verify the setup and monitor the logs for any unusual activities or errors. Regularly update the MinIO server and review your configuration to adapt to any changes or improvements in the MinIO system.
#### 1.4.8. <a name='AdditionalResourceshttps:github.comopenimsdkopen-im-serverblobmaindocscontribenvironment.mdadditional-resources'></a>[Additional Resources](https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/environment.md#additional-resources) #### 1.4.8. <a name='AdditionalResources'></a>Additional Resources
+ [MinIO Client Quickstart Guide](https://docs.min.io/docs/minio-client-quickstart-guide) + [MinIO Client Quickstart Guide](https://docs.min.io/docs/minio-client-quickstart-guide)
+ [MinIO Admin Complete Guide](https://docs.min.io/docs/minio-admin-complete-guide) + [MinIO Admin Complete Guide](https://docs.min.io/docs/minio-admin-complete-guide)

@ -1,6 +1,6 @@
module github.com/openimsdk/open-im-server/v3 module github.com/openimsdk/open-im-server/v3
go 1.18 go 1.19
require ( require (
firebase.google.com/go v3.13.0+incompatible firebase.google.com/go v3.13.0+incompatible
@ -24,34 +24,34 @@ require (
github.com/sirupsen/logrus v1.9.3 // indirect github.com/sirupsen/logrus v1.9.3 // indirect
github.com/stretchr/testify v1.8.4 github.com/stretchr/testify v1.8.4
go.mongodb.org/mongo-driver v1.12.1 go.mongodb.org/mongo-driver v1.12.1
golang.org/x/image v0.12.0 golang.org/x/image v0.13.0
google.golang.org/api v0.143.0 google.golang.org/api v0.148.0
google.golang.org/grpc v1.58.2 google.golang.org/grpc v1.59.0
google.golang.org/protobuf v1.31.0 google.golang.org/protobuf v1.31.0
gopkg.in/yaml.v3 v3.0.1 gopkg.in/yaml.v3 v3.0.1
gorm.io/driver/mysql v1.5.1 gorm.io/driver/mysql v1.5.2
gorm.io/gorm v1.25.5 gorm.io/gorm v1.25.5
) )
require github.com/google/uuid v1.3.1 require github.com/google/uuid v1.3.1
require ( require (
github.com/IBM/sarama v1.41.2 github.com/IBM/sarama v1.41.3
github.com/OpenIMSDK/protocol v0.0.26 github.com/OpenIMSDK/protocol v0.0.30
github.com/OpenIMSDK/tools v0.0.14 github.com/OpenIMSDK/tools v0.0.15
github.com/aliyun/aliyun-oss-go-sdk v2.2.9+incompatible github.com/aliyun/aliyun-oss-go-sdk v2.2.9+incompatible
github.com/go-redis/redis v6.15.9+incompatible github.com/go-redis/redis v6.15.9+incompatible
github.com/go-sql-driver/mysql v1.7.1 github.com/go-sql-driver/mysql v1.7.1
github.com/redis/go-redis/v9 v9.2.1 github.com/redis/go-redis/v9 v9.2.1
github.com/tencentyun/cos-go-sdk-v5 v0.7.44 github.com/tencentyun/cos-go-sdk-v5 v0.7.45
) )
require ( require (
cloud.google.com/go v0.110.7 // indirect cloud.google.com/go v0.110.8 // indirect
cloud.google.com/go/compute v1.23.0 // indirect cloud.google.com/go/compute v1.23.0 // indirect
cloud.google.com/go/compute/metadata v0.2.3 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect
cloud.google.com/go/firestore v1.12.0 // indirect cloud.google.com/go/firestore v1.13.0 // indirect
cloud.google.com/go/iam v1.1.1 // indirect cloud.google.com/go/iam v1.1.2 // indirect
cloud.google.com/go/longrunning v0.5.1 // indirect cloud.google.com/go/longrunning v0.5.1 // indirect
cloud.google.com/go/storage v1.30.1 // indirect cloud.google.com/go/storage v1.30.1 // indirect
github.com/beorn7/perks v1.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect
@ -71,7 +71,6 @@ require (
github.com/go-zookeeper/zk v1.0.3 // indirect github.com/go-zookeeper/zk v1.0.3 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/snappy v0.0.4 // indirect github.com/golang/snappy v0.0.4 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/go-querystring v1.1.0 // indirect github.com/google/go-querystring v1.1.0 // indirect
github.com/google/s2a-go v0.1.7 // indirect github.com/google/s2a-go v0.1.7 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.1 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.1 // indirect
@ -120,16 +119,16 @@ require (
go.uber.org/multierr v1.6.0 // indirect go.uber.org/multierr v1.6.0 // indirect
golang.org/x/arch v0.3.0 // indirect golang.org/x/arch v0.3.0 // indirect
golang.org/x/net v0.17.0 // indirect golang.org/x/net v0.17.0 // indirect
golang.org/x/oauth2 v0.12.0 // indirect golang.org/x/oauth2 v0.13.0 // indirect
golang.org/x/sync v0.3.0 // indirect golang.org/x/sync v0.4.0 // indirect
golang.org/x/sys v0.13.0 // indirect golang.org/x/sys v0.13.0 // indirect
golang.org/x/text v0.13.0 // indirect golang.org/x/text v0.13.0 // indirect
golang.org/x/time v0.3.0 // indirect golang.org/x/time v0.3.0 // indirect
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
google.golang.org/appengine v1.6.7 // indirect google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20230913181813-007df8e322eb // indirect google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20230913181813-007df8e322eb // indirect google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20231012201019-e917dd12ba7a // indirect
) )
require ( require (

@ -1,14 +1,14 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.110.7 h1:rJyC7nWRg2jWGZ4wSJ5nY65GTdYJkg0cd/uXb+ACI6o= cloud.google.com/go v0.110.8 h1:tyNdfIxjzaWctIiLYOTalaLKZ17SI44SKFW26QbOhME=
cloud.google.com/go v0.110.7/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= cloud.google.com/go v0.110.8/go.mod h1:Iz8AkXJf1qmxC3Oxoep8R1T36w8B92yU29PcBhHO5fk=
cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY=
cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM=
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
cloud.google.com/go/firestore v1.12.0 h1:aeEA/N7DW7+l2u5jtkO8I0qv0D95YwjggD8kUHrTHO4= cloud.google.com/go/firestore v1.13.0 h1:/3S4RssUV4GO/kvgJZB+tayjhOfyAHs+KcpJgRVu/Qk=
cloud.google.com/go/firestore v1.12.0/go.mod h1:b38dKhgzlmNNGTNZZwe7ZRFEuRab1Hay3/DBsIGKKy4= cloud.google.com/go/firestore v1.13.0/go.mod h1:QojqqOh8IntInDUSTAh0c8ZsPYAr68Ma8c5DWOy8xb8=
cloud.google.com/go/iam v1.1.1 h1:lW7fzj15aVIXYHREOqjRBV9PsH0Z6u8Y46a1YGvQP4Y= cloud.google.com/go/iam v1.1.2 h1:gacbrBdWcoVmGLozRuStX45YKvJtzIjJdAolzUs1sm4=
cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= cloud.google.com/go/iam v1.1.2/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU=
cloud.google.com/go/longrunning v0.5.1 h1:Fr7TXftcqTudoyRJa113hyaqlGdiBQkp0Gq7tErFDWI= cloud.google.com/go/longrunning v0.5.1 h1:Fr7TXftcqTudoyRJa113hyaqlGdiBQkp0Gq7tErFDWI=
cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc= cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc=
cloud.google.com/go/storage v1.30.1 h1:uOdMxAs8HExqBlnLtnQyP0YkvbiDpdGShGKtx6U/oNM= cloud.google.com/go/storage v1.30.1 h1:uOdMxAs8HExqBlnLtnQyP0YkvbiDpdGShGKtx6U/oNM=
@ -16,10 +16,10 @@ cloud.google.com/go/storage v1.30.1/go.mod h1:NfxhC0UJE1aXSx7CIIbCf7y9HKT7Biccwk
firebase.google.com/go v3.13.0+incompatible h1:3TdYC3DDi6aHn20qoRkxwGqNgdjtblwVAyRLQwGn/+4= firebase.google.com/go v3.13.0+incompatible h1:3TdYC3DDi6aHn20qoRkxwGqNgdjtblwVAyRLQwGn/+4=
firebase.google.com/go v3.13.0+incompatible/go.mod h1:xlah6XbEyW6tbfSklcfe5FHJIwjt8toICdV5Wh9ptHs= firebase.google.com/go v3.13.0+incompatible/go.mod h1:xlah6XbEyW6tbfSklcfe5FHJIwjt8toICdV5Wh9ptHs=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/IBM/sarama v1.41.2 h1:ZDBZfGPHAD4uuAtSv4U22fRZBgst0eEwGFzLj0fb85c= github.com/IBM/sarama v1.41.3 h1:MWBEJ12vHC8coMjdEXFq/6ftO6DUZnQlFYcxtOJFa7c=
github.com/IBM/sarama v1.41.2/go.mod h1:xdpu7sd6OE1uxNdjYTSKUfY8FaKkJES9/+EyjSgiGQk= github.com/IBM/sarama v1.41.3/go.mod h1:Xxho9HkHd4K/MDUo/T/sOqwtX/17D33++E9Wib6hUdQ=
github.com/OpenIMSDK/protocol v0.0.26 h1:jzq7EemhkO8W5kvOOg2f0b7OAMzMPrIBUG0GVbSNhDA= github.com/OpenIMSDK/protocol v0.0.30 h1:MiHO6PyQMR9ojBHNnSFxCHLmsoE2xZqaiYj975JiZnM=
github.com/OpenIMSDK/protocol v0.0.26/go.mod h1:F25dFrwrIx3lkNoiuf6FkCfxuwf8L4Z8UIsdTHP/r0Y= github.com/OpenIMSDK/protocol v0.0.30/go.mod h1:F25dFrwrIx3lkNoiuf6FkCfxuwf8L4Z8UIsdTHP/r0Y=
github.com/OpenIMSDK/tools v0.0.14 h1:WLof/+WxyPyRST+QkoTKubYCiV73uCLiL8pgnpH/yKQ= github.com/OpenIMSDK/tools v0.0.14 h1:WLof/+WxyPyRST+QkoTKubYCiV73uCLiL8pgnpH/yKQ=
github.com/OpenIMSDK/tools v0.0.14/go.mod h1:eg+q4A34Qmu73xkY0mt37FHGMCMfC6CtmOnm0kFEGFI= github.com/OpenIMSDK/tools v0.0.14/go.mod h1:eg+q4A34Qmu73xkY0mt37FHGMCMfC6CtmOnm0kFEGFI=
github.com/QcloudApi/qcloud_sign_golang v0.0.0-20141224014652-e4130a326409/go.mod h1:1pk82RBxDY/JZnPQrtqHlUFfCctgdorsd9M06fMynOM= github.com/QcloudApi/qcloud_sign_golang v0.0.0-20141224014652-e4130a326409/go.mod h1:1pk82RBxDY/JZnPQrtqHlUFfCctgdorsd9M06fMynOM=
@ -52,6 +52,7 @@ github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46t
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/dtm-labs/rockscache v0.1.1 h1:6S1vgaHvGqrLd8Ka4hRTKeKPV7v+tT0MSkTIX81LRyA= github.com/dtm-labs/rockscache v0.1.1 h1:6S1vgaHvGqrLd8Ka4hRTKeKPV7v+tT0MSkTIX81LRyA=
@ -130,8 +131,7 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
@ -295,8 +295,8 @@ github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcU
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.563/go.mod h1:7sCQWVkxcsR38nffDW057DRGk8mUjK1Ing/EFOK8s8Y= github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.563/go.mod h1:7sCQWVkxcsR38nffDW057DRGk8mUjK1Ing/EFOK8s8Y=
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/kms v1.0.563/go.mod h1:uom4Nvi9W+Qkom0exYiJ9VWJjXwyxtPYTkKkaLMlfE0= github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/kms v1.0.563/go.mod h1:uom4Nvi9W+Qkom0exYiJ9VWJjXwyxtPYTkKkaLMlfE0=
github.com/tencentyun/cos-go-sdk-v5 v0.7.44 h1:Vvz28KVdmSUrwTH2MWgAMlhzUAh+lQBSSAW1J7qJDW8= github.com/tencentyun/cos-go-sdk-v5 v0.7.45 h1:5/ZGOv846tP6+2X7w//8QjLgH2KcUK+HciFbfjWquFU=
github.com/tencentyun/cos-go-sdk-v5 v0.7.44/go.mod h1:LUFnaqRmGk6pEHOaRmdn2dCZR2j0cSsM5xowWFPTPao= github.com/tencentyun/cos-go-sdk-v5 v0.7.45/go.mod h1:DH9US8nB+AJXqwu/AMOrCFN1COv3dpytXuJWHgdg7kE=
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU= github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU=
@ -335,15 +335,14 @@ golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58
golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/image v0.12.0 h1:w13vZbU4o5rKOFFR8y7M+c4A5jXDC0uXTdHYRP8X2DQ= golang.org/x/image v0.13.0 h1:3cge/F/QTkNLauhf2QoE9zp+7sr+ZcL4HnoZmdwg9sg=
golang.org/x/image v0.12.0/go.mod h1:Lu90jvHG7GfemOIcldsh9A2hS01ocl6oNO7ype5mEnk= golang.org/x/image v0.13.0/go.mod h1:6mmbMOeV28HuMTgA6OSRkdXKYw/t5W9Uwn2Yv1r3Yxk=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -366,8 +365,8 @@ golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.12.0 h1:smVPGxink+n1ZI5pkQa8y6fZT0RW0MgCO5bFpepy4B4= golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY=
golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4= golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -375,9 +374,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ=
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -424,15 +422,14 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk=
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
google.golang.org/api v0.143.0 h1:o8cekTkqhywkbZT6p1UHJPZ9+9uuCAJs/KYomxZB8fA= google.golang.org/api v0.148.0 h1:HBq4TZlN4/1pNcu0geJZ/Q50vIwIXT532UIMYoo0vOs=
google.golang.org/api v0.143.0/go.mod h1:FoX9DO9hT7DLNn97OuoZAGSDuNAXdJRuGK98rSUgurk= google.golang.org/api v0.148.0/go.mod h1:8/TBgwaKjfqTdacOJrOv2+2Q6fBDU1uHKK06oGSkxzU=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
@ -440,19 +437,19 @@ google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20230913181813-007df8e322eb h1:XFBgcDwm7irdHTbz4Zk2h7Mh+eis4nfJEFQFYzJzuIA= google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 h1:SeZZZx0cP0fqUyA+oRzP9k7cSwJlvDFiROO72uwD6i0=
google.golang.org/genproto v0.0.0-20230913181813-007df8e322eb/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97/go.mod h1:t1VqOqqvce95G3hIDCT5FeO3YUc6Q4Oe24L/+rNMxRk=
google.golang.org/genproto/googleapis/api v0.0.0-20230913181813-007df8e322eb h1:lK0oleSc7IQsUxO3U5TjL9DWlsxpEBemh+zpB7IqhWI= google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97 h1:W18sezcAYs+3tDZX4F80yctqa12jcP1PUS2gQu1zTPU=
google.golang.org/genproto/googleapis/api v0.0.0-20230913181813-007df8e322eb/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97/go.mod h1:iargEX0SFPm3xcfMI0d1domjg0ZF4Aa0p2awqyxhvF0=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 h1:N3bU/SQDCDyD6R528GJ/PwW9KjYcJA3dgyH+MovAkIM= google.golang.org/genproto/googleapis/rpc v0.0.0-20231012201019-e917dd12ba7a h1:a2MQQVoTo96JC9PMGtGBymLp7+/RzpFc2yX/9WfFg1c=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:KSqppvjFjtoCI+KGd4PELB0qLNxdJHRGqRI09mB6pQA= google.golang.org/genproto/googleapis/rpc v0.0.0-20231012201019-e917dd12ba7a/go.mod h1:4cYg8o5yUbm77w8ZX00LhMVNl/YVBFJRYWDc0uYWMs0=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.58.2 h1:SXUpjxeVF3FKrTYQI4f4KvbGD5u2xccdYdurwowix5I= google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk=
google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@ -481,9 +478,9 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gorm.io/driver/mysql v1.5.1 h1:WUEH5VF9obL/lTtzjmML/5e6VfFR/788coz2uaVCAZw= gorm.io/driver/mysql v1.5.2 h1:QC2HRskSE75wBuOxe0+iCkyJZ+RqpudsQtqkp+IMuXs=
gorm.io/driver/mysql v1.5.1/go.mod h1:Jo3Xu7mMhCyj8dlrb3WoCaRd1FhsVh+yMXb1jUInf5o= gorm.io/driver/mysql v1.5.2/go.mod h1:pQLhh1Ut/WUAySdTHwBpBv6+JKcj+ua4ZFx1QQTBzb8=
gorm.io/gorm v1.25.1/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k= gorm.io/gorm v1.25.2-0.20230530020048-26663ab9bf55/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
gorm.io/gorm v1.25.5 h1:zR9lOiiYf09VNh5Q1gphfyia1JpiClIWG9hQaxB/mls= gorm.io/gorm v1.25.5 h1:zR9lOiiYf09VNh5Q1gphfyia1JpiClIWG9hQaxB/mls=
gorm.io/gorm v1.25.5/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= gorm.io/gorm v1.25.5/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

@ -1,4 +1,4 @@
go 1.18 go 1.19
use ( use (
. .

@ -40,7 +40,7 @@ REPO="Open-IM-Server"
# Version of Go you want to use, make sure it is compatible with your OpenIM-Server requirements. # Version of Go you want to use, make sure it is compatible with your OpenIM-Server requirements.
# Default is 1.18, if you want to use a different version, replace accordingly. # Default is 1.18, if you want to use a different version, replace accordingly.
GO_VERSION="1.18" GO_VERSION="1.20"
# Default HTTP_PORT is 80. If you want to use a different port, uncomment and replace the value. # Default HTTP_PORT is 80. If you want to use a different port, uncomment and replace the value.
# HTTP_PORT=80 # HTTP_PORT=80
@ -168,7 +168,7 @@ function install_go() {
command -v go >/dev/null 2>&1 command -v go >/dev/null 2>&1
# Determines if GO_VERSION is defined # Determines if GO_VERSION is defined
if [ -z "$GO_VERSION" ]; then if [ -z "$GO_VERSION" ]; then
GO_VERSION="1.18" GO_VERSION="1.20"
fi fi
if [[ $? -ne 0 ]]; then if [[ $? -ne 0 ]]; then
@ -309,7 +309,7 @@ function cmd_help() {
color_echo ${BLUE_PREFIX} "-t, --tag ${CYAN_PREFIX}specify the tag (default option, set to latest if not specified)${COLOR_SUFFIX}" color_echo ${BLUE_PREFIX} "-t, --tag ${CYAN_PREFIX}specify the tag (default option, set to latest if not specified)${COLOR_SUFFIX}"
color_echo ${BLUE_PREFIX} "-r, --release ${CYAN_PREFIX}specify the release branch (cannot be used with the tag option)${COLOR_SUFFIX}" color_echo ${BLUE_PREFIX} "-r, --release ${CYAN_PREFIX}specify the release branch (cannot be used with the tag option)${COLOR_SUFFIX}"
color_echo ${BLUE_PREFIX} "-gt, --github-token ${CYAN_PREFIX}set the GITHUB_TOKEN (default: not set)${COLOR_SUFFIX}" color_echo ${BLUE_PREFIX} "-gt, --github-token ${CYAN_PREFIX}set the GITHUB_TOKEN (default: not set)${COLOR_SUFFIX}"
color_echo ${BLUE_PREFIX} "-g, --go-version ${CYAN_PREFIX}set the Go language version (default: GO_VERSION=\"1.18\")${COLOR_SUFFIX}" color_echo ${BLUE_PREFIX} "-g, --go-version ${CYAN_PREFIX}set the Go language version (default: GO_VERSION=\"1.20\")${COLOR_SUFFIX}"
color_echo ${BLUE_PREFIX} "--install-dir ${CYAN_PREFIX}set the OpenIM installation directory (default: /tmp)${COLOR_SUFFIX}" color_echo ${BLUE_PREFIX} "--install-dir ${CYAN_PREFIX}set the OpenIM installation directory (default: /tmp)${COLOR_SUFFIX}"
color_echo ${BLUE_PREFIX} "--cpu ${CYAN_PREFIX}set the number of concurrent processes${COLOR_SUFFIX}" color_echo ${BLUE_PREFIX} "--cpu ${CYAN_PREFIX}set the number of concurrent processes${COLOR_SUFFIX}"
echo echo
@ -329,7 +329,7 @@ function parseinput() {
# CHINA=false # CHINA=false
# TAG=latest # TAG=latest
# RELEASE="" # RELEASE=""
# GO_VERSION=1.18 # GO_VERSION=1.20
# INSTALL_DIR=/tmp # INSTALL_DIR=/tmp
# GITHUB_TOKEN="" # GITHUB_TOKEN=""
# CPU=$(nproc) # CPU=$(nproc)

@ -57,6 +57,10 @@ func (m MessageApi) newUserSendMsgReq(c *gin.Context, params *apistruct.SendMsg)
var newContent string var newContent string
options := make(map[string]bool, 5) options := make(map[string]bool, 5)
switch params.ContentType { switch params.ContentType {
case constant.OANotification:
notification := sdkws.NotificationElem{}
notification.Detail = utils.StructToJsonString(params.Content)
newContent = utils.StructToJsonString(&notification)
case constant.Text: case constant.Text:
fallthrough fallthrough
case constant.Picture: case constant.Picture:
@ -69,10 +73,6 @@ func (m MessageApi) newUserSendMsgReq(c *gin.Context, params *apistruct.SendMsg)
fallthrough fallthrough
case constant.File: case constant.File:
fallthrough fallthrough
case constant.CustomNotTriggerConversation:
fallthrough
case constant.CustomOnlineOnly:
fallthrough
default: default:
newContent = utils.StructToJsonString(params.Content) newContent = utils.StructToJsonString(params.Content)
} }
@ -82,11 +82,6 @@ func (m MessageApi) newUserSendMsgReq(c *gin.Context, params *apistruct.SendMsg)
if params.NotOfflinePush { if params.NotOfflinePush {
utils.SetSwitchFromOptions(options, constant.IsOfflinePush, false) utils.SetSwitchFromOptions(options, constant.IsOfflinePush, false)
} }
if params.ContentType == constant.CustomOnlineOnly {
m.SetOptions(options, false)
} else if params.ContentType == constant.CustomNotTriggerConversation {
utils.SetSwitchFromOptions(options, constant.IsConversationUpdate, false)
}
pbData := msg.SendMsgReq{ pbData := msg.SendMsgReq{
MsgData: &sdkws.MsgData{ MsgData: &sdkws.MsgData{
SendID: params.SendID, SendID: params.SendID,
@ -105,14 +100,6 @@ func (m MessageApi) newUserSendMsgReq(c *gin.Context, params *apistruct.SendMsg)
OfflinePushInfo: params.OfflinePushInfo, OfflinePushInfo: params.OfflinePushInfo,
}, },
} }
//if params.ContentType == constant.OANotification {
// var tips sdkws.TipsComm
// tips.JsonDetail = utils.StructToJsonString(params.Content)
// pbData.MsgData.Content, err = proto.Marshal(&tips)
// if err != nil {
// log.ZError(c, "Marshal failed ", err, "tips", tips.String())
// }
//}
return &pbData return &pbData
} }
@ -180,15 +167,13 @@ func (m *MessageApi) getSendMsgReq(c *gin.Context, req apistruct.SendMsg) (sendM
data = apistruct.FileElem{} data = apistruct.FileElem{}
case constant.Custom: case constant.Custom:
data = apistruct.CustomElem{} data = apistruct.CustomElem{}
case constant.Revoke:
data = apistruct.RevokeElem{}
case constant.OANotification: case constant.OANotification:
data = apistruct.OANotificationElem{} data = apistruct.OANotificationElem{}
req.SessionType = constant.NotificationChatType req.SessionType = constant.NotificationChatType
case constant.CustomNotTriggerConversation: if !authverify.IsManagerUserID(req.SendID) {
data = apistruct.CustomElem{} return nil, errs.ErrNoPermission.
case constant.CustomOnlineOnly: Wrap("only app manager can as sender send OANotificationElem")
data = apistruct.CustomElem{} }
default: default:
return nil, errs.ErrArgs.WithDetail("not support err contentType") return nil, errs.ErrArgs.WithDetail("not support err contentType")
} }
@ -212,7 +197,6 @@ func (m *MessageApi) SendMessage(c *gin.Context) {
apiresp.GinError(c, errs.ErrNoPermission.Wrap("only app manager can send message")) apiresp.GinError(c, errs.ErrNoPermission.Wrap("only app manager can send message"))
return return
} }
sendMsgReq, err := m.getSendMsgReq(c, req.SendMsg) sendMsgReq, err := m.getSendMsgReq(c, req.SendMsg)
if err != nil { if err != nil {
log.ZError(c, "decodeData failed", err) log.ZError(c, "decodeData failed", err)

@ -83,7 +83,6 @@ func NewGinRouter(discov discoveryregistry.SvcDiscoveryRegistry, rdb redis.Unive
userRouterGroup.POST("/get_users_online_status", ParseToken, u.GetUsersOnlineStatus) userRouterGroup.POST("/get_users_online_status", ParseToken, u.GetUsersOnlineStatus)
userRouterGroup.POST("/get_users_online_token_detail", ParseToken, u.GetUsersOnlineTokenDetail) userRouterGroup.POST("/get_users_online_token_detail", ParseToken, u.GetUsersOnlineTokenDetail)
userRouterGroup.POST("/subscribe_users_status", ParseToken, u.SubscriberStatus) userRouterGroup.POST("/subscribe_users_status", ParseToken, u.SubscriberStatus)
userRouterGroup.POST("/unsubscribe_users_status", ParseToken, u.UnSubscriberStatus)
userRouterGroup.POST("/get_users_status", ParseToken, u.GetUserStatus) userRouterGroup.POST("/get_users_status", ParseToken, u.GetUserStatus)
userRouterGroup.POST("/get_subscribe_users_status", ParseToken, u.GetSubscribeUsersStatus) userRouterGroup.POST("/get_subscribe_users_status", ParseToken, u.GetSubscribeUsersStatus)
} }

@ -190,11 +190,6 @@ func (u *UserApi) SubscriberStatus(c *gin.Context) {
a2r.Call(user.UserClient.SubscribeOrCancelUsersStatus, u.Client, c) a2r.Call(user.UserClient.SubscribeOrCancelUsersStatus, u.Client, c)
} }
// UnSubscriberStatus Unsubscribe a user's presence.
func (u *UserApi) UnSubscriberStatus(c *gin.Context) {
a2r.Call(user.UserClient.SubscribeOrCancelUsersStatus, u.Client, c)
}
// GetUserStatus Get the online status of the user. // GetUserStatus Get the online status of the user.
func (u *UserApi) GetUserStatus(c *gin.Context) { func (u *UserApi) GetUserStatus(c *gin.Context) {
a2r.Call(user.UserClient.GetUserStatus, u.Client, c) a2r.Call(user.UserClient.GetUserStatus, u.Client, c)

@ -18,9 +18,12 @@ import (
"fmt" "fmt"
"time" "time"
"github.com/OpenIMSDK/tools/utils"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
) )
// RunWsAndServer run ws server
func RunWsAndServer(rpcPort, wsPort, prometheusPort int) error { func RunWsAndServer(rpcPort, wsPort, prometheusPort int) error {
fmt.Println( fmt.Println(
"start rpc/msg_gateway server, port: ", "start rpc/msg_gateway server, port: ",
@ -42,7 +45,7 @@ func RunWsAndServer(rpcPort, wsPort, prometheusPort int) error {
go func() { go func() {
err := hubServer.Start() err := hubServer.Start()
if err != nil { if err != nil {
panic(err.Error()) panic(utils.Wrap1(err))
} }
}() }()
return hubServer.LongConnServer.Run() return hubServer.LongConnServer.Run()

@ -283,20 +283,30 @@ func (och *OnlineHistoryRedisConsumerHandler) handleMsg(
return return
} }
if isNewConversation { if isNewConversation {
if storageList[0].SessionType == constant.SuperGroupChatType { switch storageList[0].SessionType {
log.ZInfo(ctx, "group chat first create conversation", "conversationID", conversationID) case constant.SuperGroupChatType:
log.ZInfo(ctx, "group chat first create conversation", "conversationID",
conversationID)
userIDs, err := och.groupRpcClient.GetGroupMemberIDs(ctx, storageList[0].GroupID) userIDs, err := och.groupRpcClient.GetGroupMemberIDs(ctx, storageList[0].GroupID)
if err != nil { if err != nil {
log.ZWarn(ctx, "get group member ids error", err, "conversationID", conversationID) log.ZWarn(ctx, "get group member ids error", err, "conversationID",
conversationID)
} else { } else {
if err := och.conversationRpcClient.GroupChatFirstCreateConversation(ctx, storageList[0].GroupID, userIDs); err != nil { if err := och.conversationRpcClient.GroupChatFirstCreateConversation(ctx,
log.ZWarn(ctx, "single chat first create conversation error", err, "conversationID", conversationID) storageList[0].GroupID, userIDs); err != nil {
log.ZWarn(ctx, "single chat first create conversation error", err,
"conversationID", conversationID)
} }
} }
} else { case constant.SingleChatType, constant.NotificationChatType:
if err := och.conversationRpcClient.SingleChatFirstCreateConversation(ctx, storageList[0].RecvID, storageList[0].SendID); err != nil { if err := och.conversationRpcClient.SingleChatFirstCreateConversation(ctx, storageList[0].RecvID,
log.ZWarn(ctx, "single chat first create conversation error", err, "conversationID", conversationID) storageList[0].SendID, conversationID, storageList[0].SessionType); err != nil {
log.ZWarn(ctx, "single chat or notification first create conversation error", err,
"conversationID", conversationID, "sessionType", storageList[0].SessionType)
} }
default:
log.ZWarn(ctx, "unknown session type", nil, "sessionType",
storageList[0].SessionType)
} }
} }

@ -62,7 +62,7 @@ func (mc *OnlineHistoryMongoConsumerHandler) handleChatWs2Mongo(
log.ZError(ctx, "msgFromMQ.MsgData is empty", nil, "cMsg", cMsg) log.ZError(ctx, "msgFromMQ.MsgData is empty", nil, "cMsg", cMsg)
return return
} }
log.ZInfo(ctx, "mongo consumer recv msg", "msgs", msgFromMQ.MsgData) log.ZInfo(ctx, "mongo consumer recv msg", "msgs", msgFromMQ.String())
err = mc.msgDatabase.BatchInsertChat2DB(ctx, msgFromMQ.ConversationID, msgFromMQ.MsgData, msgFromMQ.LastSeq) err = mc.msgDatabase.BatchInsertChat2DB(ctx, msgFromMQ.ConversationID, msgFromMQ.MsgData, msgFromMQ.LastSeq)
if err != nil { if err != nil {
log.ZError( log.ZError(

@ -18,8 +18,6 @@ import (
"context" "context"
"path/filepath" "path/filepath"
config2 "github.com/openimsdk/open-im-server/v3/pkg/common/config"
firebase "firebase.google.com/go" firebase "firebase.google.com/go"
"firebase.google.com/go/messaging" "firebase.google.com/go/messaging"
"github.com/redis/go-redis/v9" "github.com/redis/go-redis/v9"
@ -42,20 +40,17 @@ type Fcm struct {
} }
func NewClient(cache cache.MsgModel) *Fcm { func NewClient(cache cache.MsgModel) *Fcm {
opt := option.WithCredentialsFile(filepath.Join(config2.Root, "config", config.Config.Push.Fcm.ServiceAccount)) projectRoot := config.GetProjectRoot()
credentialsFilePath := filepath.Join(projectRoot, "config", config.Config.Push.Fcm.ServiceAccount)
opt := option.WithCredentialsFile(credentialsFilePath)
fcmApp, err := firebase.NewApp(context.Background(), nil, opt) fcmApp, err := firebase.NewApp(context.Background(), nil, opt)
if err != nil { if err != nil {
return nil return nil
} }
// auth
// fcmClient, err := fcmApp.Auth(context.Background())
// if err != nil {
// return
// }
ctx := context.Background() ctx := context.Background()
fcmMsgClient, err := fcmApp.Messaging(ctx) fcmMsgClient, err := fcmApp.Messaging(ctx)
if err != nil { if err != nil {
panic(err.Error())
return nil return nil
} }
return &Fcm{fcmMsgCli: fcmMsgClient, cache: cache} return &Fcm{fcmMsgCli: fcmMsgClient, cache: cache}

@ -58,6 +58,7 @@ func (c *ConsumerHandler) handleMs2PsChat(ctx context.Context, msg []byte) {
} }
sec := msgFromMQ.MsgData.SendTime / 1000 sec := msgFromMQ.MsgData.SendTime / 1000
nowSec := utils.GetCurrentTimestampBySecond() nowSec := utils.GetCurrentTimestampBySecond()
log.ZDebug(ctx, "push msg", "msg", pbData.String(), "sec", sec, "nowSec", nowSec)
if nowSec-sec > 10 { if nowSec-sec > 10 {
return return
} }

@ -126,13 +126,12 @@ func (p *Pusher) Push2User(ctx context.Context, userIDs []string, msg *sdkws.Msg
} }
func (p *Pusher) UnmarshalNotificationElem(bytes []byte, t interface{}) error { func (p *Pusher) UnmarshalNotificationElem(bytes []byte, t interface{}) error {
var notificationElem struct { var notification sdkws.NotificationElem
Detail string `json:"detail,omitempty"` if err := json.Unmarshal(bytes, &notification); err != nil {
}
if err := json.Unmarshal(bytes, &notificationElem); err != nil {
return err return err
} }
return json.Unmarshal([]byte(notificationElem.Detail), t)
return json.Unmarshal([]byte(notification.Detail), t)
} }
func (p *Pusher) Push2SuperGroup(ctx context.Context, groupID string, msg *sdkws.MsgData) (err error) { func (p *Pusher) Push2SuperGroup(ctx context.Context, groupID string, msg *sdkws.MsgData) (err error) {

@ -17,8 +17,6 @@ package conversation
import ( import (
"context" "context"
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
"google.golang.org/grpc" "google.golang.org/grpc"
"github.com/OpenIMSDK/protocol/constant" "github.com/OpenIMSDK/protocol/constant"
@ -114,7 +112,10 @@ func (c *conversationServer) SetConversation(ctx context.Context, req *pbconvers
return resp, nil return resp, nil
} }
func (c *conversationServer) SetConversations(ctx context.Context, req *pbconversation.SetConversationsReq) (*pbconversation.SetConversationsResp, error) { //nolint
func (c *conversationServer) SetConversations(ctx context.Context,
req *pbconversation.SetConversationsReq,
) (*pbconversation.SetConversationsResp, error) {
if req.Conversation == nil { if req.Conversation == nil {
return nil, errs.ErrArgs.Wrap("conversation must not be nil") return nil, errs.ErrArgs.Wrap("conversation must not be nil")
} }
@ -124,14 +125,8 @@ func (c *conversationServer) SetConversations(ctx context.Context, req *pbconver
return nil, err return nil, err
} }
if groupInfo.Status == constant.GroupStatusDismissed { if groupInfo.Status == constant.GroupStatusDismissed {
return nil, err return nil, errs.ErrDismissedAlready.Wrap("group dismissed")
} }
// for _, userID := range req.UserIDs {
// if _, err := c.groupRpcClient.GetGroupMemberCache(ctx, req.Conversation.GroupID, userID); err != nil {
// log.ZError(ctx, "user not in group", err, "userID", userID, "groupID", req.Conversation.GroupID)
// return nil, err
// }
// }
} }
var unequal int var unequal int
var conv tablerelation.ConversationModel var conv tablerelation.ConversationModel
@ -205,7 +200,14 @@ func (c *conversationServer) SetConversations(ctx context.Context, req *pbconver
return nil, err return nil, err
} }
for _, userID := range req.UserIDs { for _, userID := range req.UserIDs {
c.conversationNotificationSender.ConversationSetPrivateNotification(ctx, userID, req.Conversation.UserID, req.Conversation.IsPrivateChat.Value, req.Conversation.ConversationID) err := c.conversationNotificationSender.ConversationSetPrivateNotification(ctx, userID, req.Conversation.UserID,
req.Conversation.IsPrivateChat.Value, req.Conversation.ConversationID)
if err != nil {
log.ZWarn(ctx, "send conversation set private notification failed", err,
"userID", userID, "conversationID", req.Conversation.ConversationID)
continue
}
} }
} }
if req.Conversation.BurnDuration != nil { if req.Conversation.BurnDuration != nil {
@ -235,10 +237,14 @@ func (c *conversationServer) GetRecvMsgNotNotifyUserIDs(ctx context.Context, req
} }
// create conversation without notification for msg redis transfer. // create conversation without notification for msg redis transfer.
func (c *conversationServer) CreateSingleChatConversations(ctx context.Context, req *pbconversation.CreateSingleChatConversationsReq) (*pbconversation.CreateSingleChatConversationsResp, error) { func (c *conversationServer) CreateSingleChatConversations(ctx context.Context,
req *pbconversation.CreateSingleChatConversationsReq,
) (*pbconversation.CreateSingleChatConversationsResp, error) {
switch req.ConversationType {
case constant.SingleChatType:
var conversation tablerelation.ConversationModel var conversation tablerelation.ConversationModel
conversation.ConversationID = msgprocessor.GetConversationIDBySessionType(constant.SingleChatType, req.RecvID, req.SendID) conversation.ConversationID = req.ConversationID
conversation.ConversationType = constant.SingleChatType conversation.ConversationType = req.ConversationType
conversation.OwnerUserID = req.SendID conversation.OwnerUserID = req.SendID
conversation.UserID = req.RecvID conversation.UserID = req.RecvID
err := c.conversationDatabase.CreateConversation(ctx, []*tablerelation.ConversationModel{&conversation}) err := c.conversationDatabase.CreateConversation(ctx, []*tablerelation.ConversationModel{&conversation})
@ -253,6 +259,18 @@ func (c *conversationServer) CreateSingleChatConversations(ctx context.Context,
if err != nil { if err != nil {
log.ZWarn(ctx, "create conversation failed", err, "conversation2", conversation) log.ZWarn(ctx, "create conversation failed", err, "conversation2", conversation)
} }
case constant.NotificationChatType:
var conversation tablerelation.ConversationModel
conversation.ConversationID = req.ConversationID
conversation.ConversationType = req.ConversationType
conversation.OwnerUserID = req.RecvID
conversation.UserID = req.SendID
err := c.conversationDatabase.CreateConversation(ctx, []*tablerelation.ConversationModel{&conversation})
if err != nil {
log.ZWarn(ctx, "create conversation failed", err, "conversation2", conversation)
}
}
return &pbconversation.CreateSingleChatConversationsResp{}, nil return &pbconversation.CreateSingleChatConversationsResp{}, nil
} }

@ -252,7 +252,8 @@ func (s *friendServer) GetDesignatedFriends(
return resp, nil return resp, nil
} }
func (s *friendServer) GetDesignatedFriendsApply(ctx context.Context, req *pbfriend.GetDesignatedFriendsApplyReq) (resp *pbfriend.GetDesignatedFriendsApplyResp, err error) { func (s *friendServer) GetDesignatedFriendsApply(ctx context.Context,
req *pbfriend.GetDesignatedFriendsApplyReq) (resp *pbfriend.GetDesignatedFriendsApplyResp, err error) {
friendRequests, err := s.friendDatabase.FindBothFriendRequests(ctx, req.FromUserID, req.ToUserID) friendRequests, err := s.friendDatabase.FindBothFriendRequests(ctx, req.FromUserID, req.ToUserID)
if err != nil { if err != nil {
return nil, err return nil, err

@ -154,16 +154,16 @@ func (s *groupServer) CheckGroupAdmin(ctx context.Context, groupID string) error
return nil return nil
} }
func (s *groupServer) GetUsernameMap(ctx context.Context, userIDs []string, complete bool) (map[string]string, error) { func (s *groupServer) GetPublicUserInfoMap(ctx context.Context, userIDs []string, complete bool) (map[string]*sdkws.PublicUserInfo, error) {
if len(userIDs) == 0 { if len(userIDs) == 0 {
return map[string]string{}, nil return map[string]*sdkws.PublicUserInfo{}, nil
} }
users, err := s.User.GetPublicUserInfos(ctx, userIDs, complete) users, err := s.User.GetPublicUserInfos(ctx, userIDs, complete)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return utils.SliceToMapAny(users, func(e *sdkws.PublicUserInfo) (string, string) { return utils.SliceToMapAny(users, func(e *sdkws.PublicUserInfo) (string, *sdkws.PublicUserInfo) {
return e.UserID, e.Nickname return e.UserID, e
}), nil }), nil
} }
@ -468,15 +468,18 @@ func (s *groupServer) GetGroupAllMember(ctx context.Context, req *pbgroup.GetGro
if err != nil { if err != nil {
return nil, err return nil, err
} }
nameMap, err := s.GetUsernameMap(ctx, utils.Filter(members, func(e *relationtb.GroupMemberModel) (string, bool) { publicUserInfoMap, err := s.GetPublicUserInfoMap(ctx, utils.Filter(members, func(e *relationtb.GroupMemberModel) (string, bool) {
return e.UserID, e.Nickname == "" return e.UserID, e.Nickname == "" || e.FaceURL == ""
}), true) }), true)
if err != nil { if err != nil {
return nil, err return nil, err
} }
resp.Members = utils.Slice(members, func(e *relationtb.GroupMemberModel) *sdkws.GroupMemberFullInfo { resp.Members = utils.Slice(members, func(e *relationtb.GroupMemberModel) *sdkws.GroupMemberFullInfo {
if e.Nickname == "" { if e.Nickname == "" {
e.Nickname = nameMap[e.UserID] e.Nickname = publicUserInfoMap[e.UserID].Nickname
}
if e.FaceURL == "" {
e.FaceURL = publicUserInfoMap[e.UserID].FaceURL
} }
return convert.Db2PbGroupMember(e) return convert.Db2PbGroupMember(e)
}) })
@ -616,15 +619,18 @@ func (s *groupServer) GetGroupMembersInfo(ctx context.Context, req *pbgroup.GetG
if err != nil { if err != nil {
return nil, err return nil, err
} }
nameMap, err := s.GetUsernameMap(ctx, utils.Filter(members, func(e *relationtb.GroupMemberModel) (string, bool) { publicUserInfoMap, err := s.GetPublicUserInfoMap(ctx, utils.Filter(members, func(e *relationtb.GroupMemberModel) (string, bool) {
return e.UserID, e.Nickname == "" return e.UserID, e.Nickname == "" || e.FaceURL == ""
}), true) }), true)
if err != nil { if err != nil {
return nil, err return nil, err
} }
resp.Members = utils.Slice(members, func(e *relationtb.GroupMemberModel) *sdkws.GroupMemberFullInfo { resp.Members = utils.Slice(members, func(e *relationtb.GroupMemberModel) *sdkws.GroupMemberFullInfo {
if e.Nickname == "" { if e.Nickname == "" {
e.Nickname = nameMap[e.UserID] e.Nickname = publicUserInfoMap[e.UserID].Nickname
}
if e.FaceURL == "" {
e.FaceURL = publicUserInfoMap[e.UserID].FaceURL
} }
return convert.Db2PbGroupMember(e) return convert.Db2PbGroupMember(e)
}) })
@ -852,32 +858,40 @@ func (s *groupServer) JoinGroup(ctx context.Context, req *pbgroup.JoinGroupReq)
func (s *groupServer) QuitGroup(ctx context.Context, req *pbgroup.QuitGroupReq) (*pbgroup.QuitGroupResp, error) { func (s *groupServer) QuitGroup(ctx context.Context, req *pbgroup.QuitGroupReq) (*pbgroup.QuitGroupResp, error) {
resp := &pbgroup.QuitGroupResp{} resp := &pbgroup.QuitGroupResp{}
if req.UserID == "" {
req.UserID = mcontext.GetOpUserID(ctx)
} else {
if err := authverify.CheckAccessV3(ctx, req.UserID); err != nil {
return nil, err
}
}
group, err := s.GroupDatabase.TakeGroup(ctx, req.GroupID) group, err := s.GroupDatabase.TakeGroup(ctx, req.GroupID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if group.GroupType == constant.SuperGroup { if group.GroupType == constant.SuperGroup {
if err := s.GroupDatabase.DeleteSuperGroupMember(ctx, req.GroupID, []string{mcontext.GetOpUserID(ctx)}); err != nil { if err := s.GroupDatabase.DeleteSuperGroupMember(ctx, req.GroupID, []string{req.UserID}); err != nil {
return nil, err return nil, err
} }
s.Notification.SuperGroupNotification(ctx, mcontext.GetOpUserID(ctx), mcontext.GetOpUserID(ctx)) _ = s.Notification.SuperGroupNotification(ctx, req.UserID, req.UserID)
} else { } else {
info, err := s.TakeGroupMember(ctx, req.GroupID, mcontext.GetOpUserID(ctx)) info, err := s.TakeGroupMember(ctx, req.GroupID, req.UserID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if info.RoleLevel == constant.GroupOwner { if info.RoleLevel == constant.GroupOwner {
return nil, errs.ErrNoPermission.Wrap("group owner can't quit") return nil, errs.ErrNoPermission.Wrap("group owner can't quit")
} }
err = s.GroupDatabase.DeleteGroupMember(ctx, req.GroupID, []string{mcontext.GetOpUserID(ctx)}) err = s.GroupDatabase.DeleteGroupMember(ctx, req.GroupID, []string{req.UserID})
if err != nil { if err != nil {
return nil, err return nil, err
} }
s.Notification.MemberQuitNotification(ctx, s.groupMemberDB2PB(info, 0)) _ = s.Notification.MemberQuitNotification(ctx, s.groupMemberDB2PB(info, 0))
} }
if err := s.deleteMemberAndSetConversationSeq(ctx, req.GroupID, []string{mcontext.GetOpUserID(ctx)}); err != nil { if err := s.deleteMemberAndSetConversationSeq(ctx, req.GroupID, []string{req.UserID}); err != nil {
return nil, err return nil, err
} }
return resp, nil return resp, nil
} }
@ -1059,15 +1073,18 @@ func (s *groupServer) GetGroupMembersCMS(ctx context.Context, req *pbgroup.GetGr
return nil, err return nil, err
} }
resp.Total = total resp.Total = total
nameMap, err := s.GetUsernameMap(ctx, utils.Filter(members, func(e *relationtb.GroupMemberModel) (string, bool) { nameMap, err := s.GetPublicUserInfoMap(ctx, utils.Filter(members, func(e *relationtb.GroupMemberModel) (string, bool) {
return e.UserID, e.Nickname == "" return e.UserID, e.Nickname == "" || e.FaceURL == ""
}), true) }), true)
if err != nil { if err != nil {
return nil, err return nil, err
} }
resp.Members = utils.Slice(members, func(e *relationtb.GroupMemberModel) *sdkws.GroupMemberFullInfo { resp.Members = utils.Slice(members, func(e *relationtb.GroupMemberModel) *sdkws.GroupMemberFullInfo {
if e.Nickname == "" { if e.Nickname == "" {
e.Nickname = nameMap[e.UserID] e.Nickname = nameMap[e.UserID].Nickname
}
if e.FaceURL == "" {
e.FaceURL = nameMap[e.UserID].FaceURL
} }
return convert.Db2PbGroupMember(e) return convert.Db2PbGroupMember(e)
}) })
@ -1453,7 +1470,7 @@ func (s *groupServer) GetUserInGroupMembers(ctx context.Context, req *pbgroup.Ge
if err != nil { if err != nil {
return nil, err return nil, err
} }
nameMap, err := s.GetUsernameMap(ctx, utils.Filter(members, func(e *relationtb.GroupMemberModel) (string, bool) { publicUserInfoMap, err := s.GetPublicUserInfoMap(ctx, utils.Filter(members, func(e *relationtb.GroupMemberModel) (string, bool) {
return e.UserID, e.Nickname == "" return e.UserID, e.Nickname == ""
}), true) }), true)
if err != nil { if err != nil {
@ -1461,7 +1478,10 @@ func (s *groupServer) GetUserInGroupMembers(ctx context.Context, req *pbgroup.Ge
} }
resp.Members = utils.Slice(members, func(e *relationtb.GroupMemberModel) *sdkws.GroupMemberFullInfo { resp.Members = utils.Slice(members, func(e *relationtb.GroupMemberModel) *sdkws.GroupMemberFullInfo {
if e.Nickname == "" { if e.Nickname == "" {
e.Nickname = nameMap[e.UserID] e.Nickname = publicUserInfoMap[e.UserID].Nickname
}
if e.FaceURL == "" {
e.FaceURL = publicUserInfoMap[e.UserID].FaceURL
} }
return convert.Db2PbGroupMember(e) return convert.Db2PbGroupMember(e)
}) })
@ -1486,15 +1506,18 @@ func (s *groupServer) GetGroupMemberRoleLevel(ctx context.Context, req *pbgroup.
if err != nil { if err != nil {
return nil, err return nil, err
} }
nameMap, err := s.GetUsernameMap(ctx, utils.Filter(members, func(e *relationtb.GroupMemberModel) (string, bool) { publicUserInfoMap, err := s.GetPublicUserInfoMap(ctx, utils.Filter(members, func(e *relationtb.GroupMemberModel) (string, bool) {
return e.UserID, e.Nickname == "" return e.UserID, e.Nickname == "" || e.FaceURL == ""
}), true) }), true)
if err != nil { if err != nil {
return nil, err return nil, err
} }
resp.Members = utils.Slice(members, func(e *relationtb.GroupMemberModel) *sdkws.GroupMemberFullInfo { resp.Members = utils.Slice(members, func(e *relationtb.GroupMemberModel) *sdkws.GroupMemberFullInfo {
if e.Nickname == "" { if e.Nickname == "" {
e.Nickname = nameMap[e.UserID] e.Nickname = publicUserInfoMap[e.UserID].Nickname
}
if e.FaceURL == "" {
e.FaceURL = publicUserInfoMap[e.UserID].FaceURL
} }
return convert.Db2PbGroupMember(e) return convert.Db2PbGroupMember(e)
}) })

@ -165,8 +165,8 @@ func (m *msgServer) MarkConversationAsRead(
m.conversationAndGetRecvID(conversation, req.UserID), seqs, hasReadSeq); err != nil { m.conversationAndGetRecvID(conversation, req.UserID), seqs, hasReadSeq); err != nil {
return nil, err return nil, err
} }
} else if conversation.ConversationType == constant.SuperGroupChatType ||
} else if conversation.ConversationType == constant.SuperGroupChatType { conversation.ConversationType == constant.NotificationChatType {
if req.HasReadSeq > hasReadSeq { if req.HasReadSeq > hasReadSeq {
err = m.MsgDatabase.SetHasReadSeq(ctx, req.UserID, req.ConversationID, req.HasReadSeq) err = m.MsgDatabase.SetHasReadSeq(ctx, req.UserID, req.ConversationID, req.HasReadSeq)
if err != nil { if err != nil {

@ -62,6 +62,11 @@ func (m *msgServer) PullMessageBySeqs(
case sdkws.PullOrder_PullOrderDesc: case sdkws.PullOrder_PullOrderDesc:
isEnd = seq.Begin <= minSeq isEnd = seq.Begin <= minSeq
} }
if len(msgs) == 0 {
log.ZWarn(ctx, "not have msgs", nil, "conversationID", seq.ConversationID, "seq", seq)
continue
}
resp.Msgs[seq.ConversationID] = &sdkws.PullMsgs{Msgs: msgs, IsEnd: isEnd} resp.Msgs[seq.ConversationID] = &sdkws.PullMsgs{Msgs: msgs, IsEnd: isEnd}
} else { } else {
var seqs []int64 var seqs []int64
@ -71,6 +76,7 @@ func (m *msgServer) PullMessageBySeqs(
minSeq, maxSeq, notificationMsgs, err := m.MsgDatabase.GetMsgBySeqs(ctx, req.UserID, seq.ConversationID, seqs) minSeq, maxSeq, notificationMsgs, err := m.MsgDatabase.GetMsgBySeqs(ctx, req.UserID, seq.ConversationID, seqs)
if err != nil { if err != nil {
log.ZWarn(ctx, "GetMsgBySeqs error", err, "conversationID", seq.ConversationID, "seq", seq) log.ZWarn(ctx, "GetMsgBySeqs error", err, "conversationID", seq.ConversationID, "seq", seq)
continue continue
} }
var isEnd bool var isEnd bool
@ -80,6 +86,11 @@ func (m *msgServer) PullMessageBySeqs(
case sdkws.PullOrder_PullOrderDesc: case sdkws.PullOrder_PullOrderDesc:
isEnd = seq.Begin <= minSeq isEnd = seq.Begin <= minSeq
} }
if len(notificationMsgs) == 0 {
log.ZWarn(ctx, "not have notificationMsgs", nil, "conversationID", seq.ConversationID, "seq", seq)
continue
}
resp.NotificationMsgs[seq.ConversationID] = &sdkws.PullMsgs{Msgs: notificationMsgs, IsEnd: isEnd} resp.NotificationMsgs[seq.ConversationID] = &sdkws.PullMsgs{Msgs: notificationMsgs, IsEnd: isEnd}
} }
} }

@ -41,7 +41,7 @@ type SendMsgReq struct {
type BatchSendMsgReq struct { type BatchSendMsgReq struct {
SendMsg SendMsg
IsSendAll bool `json:"isSendAll"` IsSendAll bool `json:"isSendAll"`
RecvIDs []string `json:"recvIDs"` RecvIDs []string `json:"recvIDs" binding:"required"`
} }
type BatchSendMsgResp struct { type BatchSendMsgResp struct {

@ -28,6 +28,7 @@ type ApiCmd struct {
func NewApiCmd() *ApiCmd { func NewApiCmd() *ApiCmd {
ret := &ApiCmd{NewRootCmd("api")} ret := &ApiCmd{NewRootCmd("api")}
ret.SetRootCmdPt(ret) ret.SetRootCmdPt(ret)
return ret return ret
} }
@ -36,11 +37,13 @@ func (a *ApiCmd) AddApi(f func(port int) error) {
return f(a.getPortFlag(cmd)) return f(a.getPortFlag(cmd))
} }
} }
func (a *ApiCmd) GetPortFromConfig(portType string) int { func (a *ApiCmd) GetPortFromConfig(portType string) int {
fmt.Println("GetPortFromConfig:", portType) fmt.Println("GetPortFromConfig:", portType)
if portType == constant.FlagPort { if portType == constant.FlagPort {
return config2.Config.Api.OpenImApiPort[0] return config2.Config.Api.OpenImApiPort[0]
} else { } else {
return 0 return 0
} }
} }

@ -23,6 +23,7 @@ import (
"github.com/OpenIMSDK/protocol/constant" "github.com/OpenIMSDK/protocol/constant"
"github.com/OpenIMSDK/tools/log" "github.com/OpenIMSDK/tools/log"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
) )
@ -53,38 +54,76 @@ func WithLogName(logName string) func(*CmdOpts) {
} }
} }
func NewRootCmd(name string, opts ...func(*CmdOpts)) (rootCmd *RootCmd) { func NewRootCmd(name string, opts ...func(*CmdOpts)) *RootCmd {
rootCmd = &RootCmd{Name: name} rootCmd := &RootCmd{Name: name}
c := cobra.Command{ cmd := cobra.Command{
Use: "start openIM application", Use: "Start openIM application",
Short: fmt.Sprintf(`Start %s `, name), Short: fmt.Sprintf(`Start %s `, name),
Long: fmt.Sprintf(`Start %s `, name), Long: fmt.Sprintf(`Start %s `, name),
PersistentPreRunE: func(cmd *cobra.Command, args []string) error { PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
if err := rootCmd.getConfFromCmdAndInit(cmd); err != nil { return rootCmd.persistentPreRun(cmd, opts...)
panic(err) },
}
rootCmd.Command = cmd
rootCmd.addConfFlag()
return rootCmd
}
func (rc *RootCmd) persistentPreRun(cmd *cobra.Command, opts ...func(*CmdOpts)) error {
if err := rc.initializeConfiguration(cmd); err != nil {
return fmt.Errorf("failed to get configuration from command: %w", err)
}
cmdOpts := rc.applyOptions(opts...)
if err := rc.initializeLogger(cmdOpts); err != nil {
return fmt.Errorf("failed to initialize from config: %w", err)
}
return nil
}
func (rc *RootCmd) initializeConfiguration(cmd *cobra.Command) error {
return rc.getConfFromCmdAndInit(cmd)
} }
cmdOpts := &CmdOpts{}
func (rc *RootCmd) applyOptions(opts ...func(*CmdOpts)) *CmdOpts {
cmdOpts := defaultCmdOpts()
for _, opt := range opts { for _, opt := range opts {
opt(cmdOpts) opt(cmdOpts)
} }
if cmdOpts.loggerPrefixName == "" {
cmdOpts.loggerPrefixName = "OpenIM.log.all" return cmdOpts
} }
if err := log.InitFromConfig(cmdOpts.loggerPrefixName, name, config.Config.Log.RemainLogLevel, config.Config.Log.IsStdout, config.Config.Log.IsJson, config.Config.Log.StorageLocation, config.Config.Log.RemainRotationCount, config.Config.Log.RotationTime); err != nil {
panic(err) func (rc *RootCmd) initializeLogger(cmdOpts *CmdOpts) error {
logConfig := config.Config.Log
return log.InitFromConfig(
cmdOpts.loggerPrefixName,
rc.Name,
logConfig.RemainLogLevel,
logConfig.IsStdout,
logConfig.IsJson,
logConfig.StorageLocation,
logConfig.RemainRotationCount,
logConfig.RotationTime,
)
} }
return nil
}, func defaultCmdOpts() *CmdOpts {
return &CmdOpts{
loggerPrefixName: "OpenIM.log.all",
} }
rootCmd.Command = c
rootCmd.addConfFlag()
return rootCmd
} }
func (r *RootCmd) SetRootCmdPt(cmdItf RootCmdPt) { func (r *RootCmd) SetRootCmdPt(cmdItf RootCmdPt) {
r.cmdItf = cmdItf r.cmdItf = cmdItf
} }
func (r *RootCmd) addConfFlag() { func (r *RootCmd) addConfFlag() {
r.Command.Flags().StringP(constant.FlagConf, "c", "", "Path to config file folder") r.Command.Flags().StringP(constant.FlagConf, "c", "", "path to config file folder")
} }
func (r *RootCmd) AddPortFlag() { func (r *RootCmd) AddPortFlag() {

@ -19,32 +19,31 @@ import (
"fmt" "fmt"
"os" "os"
"path/filepath" "path/filepath"
"runtime"
"github.com/OpenIMSDK/protocol/constant"
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor" "github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
"gopkg.in/yaml.v3" "gopkg.in/yaml.v3"
"github.com/OpenIMSDK/protocol/constant"
) )
//go:embed version //go:embed version
var Version string var Version string
var (
_, b, _, _ = runtime.Caller(0)
// Root folder of this project.
Root = filepath.Join(filepath.Dir(b), "../../..")
)
const ( const (
FileName = "config.yaml" FileName = "config.yaml"
NotificationFileName = "notification.yaml" NotificationFileName = "notification.yaml"
DefaultFolderPath = "../config/" DefaultFolderPath = "../config/"
) )
// getProjectRoot returns the absolute path of the project root directory
func GetProjectRoot() string {
b, _ := filepath.Abs(os.Args[0])
return filepath.Join(filepath.Dir(b), "../../..")
}
func GetOptionsByNotification(cfg NotificationConf) msgprocessor.Options { func GetOptionsByNotification(cfg NotificationConf) msgprocessor.Options {
opts := msgprocessor.NewOptions() opts := msgprocessor.NewOptions()
if cfg.UnreadCount { if cfg.UnreadCount {
opts = msgprocessor.WithOptions(opts, msgprocessor.WithUnreadCount(true)) opts = msgprocessor.WithOptions(opts, msgprocessor.WithUnreadCount(true))
} }
@ -61,40 +60,38 @@ func GetOptionsByNotification(cfg NotificationConf) msgprocessor.Options {
} }
func initConfig(config interface{}, configName, configFolderPath string) error { func initConfig(config interface{}, configName, configFolderPath string) error {
if configFolderPath == "" { configFolderPath = filepath.Join(configFolderPath, configName)
configFolderPath = DefaultFolderPath _, err := os.Stat(configFolderPath)
}
configPath := filepath.Join(configFolderPath, configName)
defer func() {
fmt.Println("use config", configPath)
}()
_, err := os.Stat(configPath)
if err != nil { if err != nil {
if !os.IsNotExist(err) { if !os.IsNotExist(err) {
return err return fmt.Errorf("stat config path error: %w", err)
} }
configPath = filepath.Join(Root, "config", configName) configFolderPath = filepath.Join(GetProjectRoot(), "config", configName)
} else {
Root = filepath.Dir(configPath)
} }
data, err := os.ReadFile(configPath) data, err := os.ReadFile(configFolderPath)
if err != nil { if err != nil {
return err return fmt.Errorf("read file error: %w", err)
} }
if err = yaml.Unmarshal(data, config); err != nil { if err = yaml.Unmarshal(data, config); err != nil {
return err return fmt.Errorf("unmarshal yaml error: %w", err)
} }
fmt.Println("use config", configFolderPath)
return nil return nil
} }
func InitConfig(configFolderPath string) error { func InitConfig(configFolderPath string) error {
err := initConfig(&Config, FileName, configFolderPath) if configFolderPath == "" {
if err != nil { envConfigPath := os.Getenv("OPENIMCONFIG")
return err if envConfigPath != "" {
configFolderPath = envConfigPath
} else {
configFolderPath = DefaultFolderPath
} }
err = initConfig(&Config.Notification, NotificationFileName, configFolderPath) }
if err != nil {
if err := initConfig(&Config, FileName, configFolderPath); err != nil {
return err return err
} }
return nil
return initConfig(&Config.Notification, NotificationFileName, configFolderPath)
} }

@ -52,6 +52,7 @@ func NewBlackCacheRedis(
options rockscache.Options, options rockscache.Options,
) BlackCache { ) BlackCache {
rcClient := rockscache.NewClient(rdb, options) rcClient := rockscache.NewClient(rdb, options)
return &BlackCacheRedis{ return &BlackCacheRedis{
expireTime: blackExpireTime, expireTime: blackExpireTime,
rcClient: rcClient, rcClient: rcClient,
@ -88,5 +89,6 @@ func (b *BlackCacheRedis) GetBlackIDs(ctx context.Context, userID string) (black
func (b *BlackCacheRedis) DelBlackIDs(ctx context.Context, userID string) BlackCache { func (b *BlackCacheRedis) DelBlackIDs(ctx context.Context, userID string) BlackCache {
cache := b.NewCache() cache := b.NewCache()
cache.AddKeys(b.getBlackIDsKey(userID)) cache.AddKeys(b.getBlackIDsKey(userID))
return cache return cache
} }

@ -73,7 +73,7 @@ type ConversationCache interface {
GetSuperGroupRecvMsgNotNotifyUserIDsHash(ctx context.Context, groupID string) (hash uint64, err error) GetSuperGroupRecvMsgNotNotifyUserIDsHash(ctx context.Context, groupID string) (hash uint64, err error)
DelSuperGroupRecvMsgNotNotifyUserIDsHash(groupID string) ConversationCache DelSuperGroupRecvMsgNotNotifyUserIDsHash(groupID string) ConversationCache
GetUserAllHasReadSeqs(ctx context.Context, ownerUserID string) (map[string]int64, error) //GetUserAllHasReadSeqs(ctx context.Context, ownerUserID string) (map[string]int64, error)
DelUserAllHasReadSeqs(ownerUserID string, conversationIDs ...string) ConversationCache DelUserAllHasReadSeqs(ownerUserID string, conversationIDs ...string) ConversationCache
GetConversationsByConversationID(ctx context.Context, GetConversationsByConversationID(ctx context.Context,
@ -83,12 +83,9 @@ type ConversationCache interface {
DelConversationNotReceiveMessageUserIDs(conversationIDs ...string) ConversationCache DelConversationNotReceiveMessageUserIDs(conversationIDs ...string) ConversationCache
} }
func NewConversationRedis( func NewConversationRedis(rdb redis.UniversalClient, opts rockscache.Options, db relationtb.ConversationModelInterface) ConversationCache {
rdb redis.UniversalClient,
opts rockscache.Options,
db relationtb.ConversationModelInterface,
) ConversationCache {
rcClient := rockscache.NewClient(rdb, opts) rcClient := rockscache.NewClient(rdb, opts)
return &ConversationRedisCache{ return &ConversationRedisCache{
rcClient: rcClient, rcClient: rcClient,
metaCache: NewMetaCacheRedis(rcClient), metaCache: NewMetaCacheRedis(rcClient),
@ -110,6 +107,7 @@ func NewNewConversationRedis(
options rockscache.Options, options rockscache.Options,
) ConversationCache { ) ConversationCache {
rcClient := rockscache.NewClient(rdb, options) rcClient := rockscache.NewClient(rdb, options)
return &ConversationRedisCache{ return &ConversationRedisCache{
rcClient: rcClient, rcClient: rcClient,
metaCache: NewMetaCacheRedis(rcClient), metaCache: NewMetaCacheRedis(rcClient),
@ -156,24 +154,19 @@ func (c *ConversationRedisCache) getConversationNotReceiveMessageUserIDsKey(conv
} }
func (c *ConversationRedisCache) GetUserConversationIDs(ctx context.Context, ownerUserID string) ([]string, error) { func (c *ConversationRedisCache) GetUserConversationIDs(ctx context.Context, ownerUserID string) ([]string, error) {
return getCache( return getCache(ctx, c.rcClient, c.getConversationIDsKey(ownerUserID), c.expireTime, func(ctx context.Context) ([]string, error) {
ctx,
c.rcClient,
c.getConversationIDsKey(ownerUserID),
c.expireTime,
func(ctx context.Context) ([]string, error) {
return c.conversationDB.FindUserIDAllConversationID(ctx, ownerUserID) return c.conversationDB.FindUserIDAllConversationID(ctx, ownerUserID)
}, })
)
} }
func (c *ConversationRedisCache) DelConversationIDs(userIDs ...string) ConversationCache { func (c *ConversationRedisCache) DelConversationIDs(userIDs ...string) ConversationCache {
var keys []string keys := make([]string, 0, len(userIDs))
for _, userID := range userIDs { for _, userID := range userIDs {
keys = append(keys, c.getConversationIDsKey(userID)) keys = append(keys, c.getConversationIDsKey(userID))
} }
cache := c.NewCache() cache := c.NewCache()
cache.AddKeys(keys...) cache.AddKeys(keys...)
return cache return cache
} }
@ -181,10 +174,7 @@ func (c *ConversationRedisCache) getUserConversationIDsHashKey(ownerUserID strin
return conversationIDsHashKey + ownerUserID return conversationIDsHashKey + ownerUserID
} }
func (c *ConversationRedisCache) GetUserConversationIDsHash( func (c *ConversationRedisCache) GetUserConversationIDsHash(ctx context.Context, ownerUserID string) (hash uint64, err error) {
ctx context.Context,
ownerUserID string,
) (hash uint64, err error) {
return getCache( return getCache(
ctx, ctx,
c.rcClient, c.rcClient,
@ -204,160 +194,127 @@ func (c *ConversationRedisCache) GetUserConversationIDsHash(
} }
func (c *ConversationRedisCache) DelUserConversationIDsHash(ownerUserIDs ...string) ConversationCache { func (c *ConversationRedisCache) DelUserConversationIDsHash(ownerUserIDs ...string) ConversationCache {
var keys []string keys := make([]string, 0, len(ownerUserIDs))
for _, ownerUserID := range ownerUserIDs { for _, ownerUserID := range ownerUserIDs {
keys = append(keys, c.getUserConversationIDsHashKey(ownerUserID)) keys = append(keys, c.getUserConversationIDsHashKey(ownerUserID))
} }
cache := c.NewCache() cache := c.NewCache()
cache.AddKeys(keys...) cache.AddKeys(keys...)
return cache return cache
} }
func (c *ConversationRedisCache) GetConversation( func (c *ConversationRedisCache) GetConversation(ctx context.Context, ownerUserID, conversationID string) (*relationtb.ConversationModel, error) {
ctx context.Context, return getCache(ctx, c.rcClient, c.getConversationKey(ownerUserID, conversationID), c.expireTime, func(ctx context.Context) (*relationtb.ConversationModel, error) {
ownerUserID, conversationID string,
) (*relationtb.ConversationModel, error) {
return getCache(
ctx,
c.rcClient,
c.getConversationKey(ownerUserID, conversationID),
c.expireTime,
func(ctx context.Context) (*relationtb.ConversationModel, error) {
return c.conversationDB.Take(ctx, ownerUserID, conversationID) return c.conversationDB.Take(ctx, ownerUserID, conversationID)
}, })
)
} }
func (c *ConversationRedisCache) DelConversations(ownerUserID string, conversationIDs ...string) ConversationCache { func (c *ConversationRedisCache) DelConversations(ownerUserID string, conversationIDs ...string) ConversationCache {
var keys []string keys := make([]string, 0, len(conversationIDs))
for _, conversationID := range conversationIDs { for _, conversationID := range conversationIDs {
keys = append(keys, c.getConversationKey(ownerUserID, conversationID)) keys = append(keys, c.getConversationKey(ownerUserID, conversationID))
} }
cache := c.NewCache() cache := c.NewCache()
cache.AddKeys(keys...) cache.AddKeys(keys...)
return cache return cache
} }
func (c *ConversationRedisCache) getConversationIndex( func (c *ConversationRedisCache) getConversationIndex(convsation *relationtb.ConversationModel, keys []string) (int, error) {
convsation *relationtb.ConversationModel,
keys []string,
) (int, error) {
key := c.getConversationKey(convsation.OwnerUserID, convsation.ConversationID) key := c.getConversationKey(convsation.OwnerUserID, convsation.ConversationID)
for _i, _key := range keys { for _i, _key := range keys {
if _key == key { if _key == key {
return _i, nil return _i, nil
} }
} }
return 0, errors.New("not found key:" + key + " in keys") return 0, errors.New("not found key:" + key + " in keys")
} }
func (c *ConversationRedisCache) GetConversations( func (c *ConversationRedisCache) GetConversations(ctx context.Context, ownerUserID string, conversationIDs []string) ([]*relationtb.ConversationModel, error) {
ctx context.Context, //var keys []string
ownerUserID string, //for _, conversarionID := range conversationIDs {
conversationIDs []string, // keys = append(keys, c.getConversationKey(ownerUserID, conversarionID))
) ([]*relationtb.ConversationModel, error) { //}
var keys []string //return batchGetCache(
for _, conversarionID := range conversationIDs { // ctx,
keys = append(keys, c.getConversationKey(ownerUserID, conversarionID)) // c.rcClient,
} // keys,
return batchGetCache( // c.expireTime,
ctx, // c.getConversationIndex,
c.rcClient, // func(ctx context.Context) ([]*relationtb.ConversationModel, error) {
keys, // return c.conversationDB.Find(ctx, ownerUserID, conversationIDs)
c.expireTime, // },
c.getConversationIndex, //)
func(ctx context.Context) ([]*relationtb.ConversationModel, error) { return batchGetCache2(ctx, c.rcClient, c.expireTime, conversationIDs, func(conversationID string) string {
return c.conversationDB.Find(ctx, ownerUserID, conversationIDs) return c.getConversationKey(ownerUserID, conversationID)
}, }, func(ctx context.Context, conversationID string) (*relationtb.ConversationModel, error) {
) return c.conversationDB.Take(ctx, ownerUserID, conversationID)
})
} }
func (c *ConversationRedisCache) GetUserAllConversations( func (c *ConversationRedisCache) GetUserAllConversations(ctx context.Context, ownerUserID string) ([]*relationtb.ConversationModel, error) {
ctx context.Context,
ownerUserID string,
) ([]*relationtb.ConversationModel, error) {
conversationIDs, err := c.GetUserConversationIDs(ctx, ownerUserID) conversationIDs, err := c.GetUserConversationIDs(ctx, ownerUserID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
var keys []string //var keys []string
for _, conversarionID := range conversationIDs { //for _, conversarionID := range conversationIDs {
keys = append(keys, c.getConversationKey(ownerUserID, conversarionID)) // keys = append(keys, c.getConversationKey(ownerUserID, conversarionID))
} //}
return batchGetCache( //return batchGetCache(
ctx, // ctx,
c.rcClient, // c.rcClient,
keys, // keys,
c.expireTime, // c.expireTime,
c.getConversationIndex, // c.getConversationIndex,
func(ctx context.Context) ([]*relationtb.ConversationModel, error) { // func(ctx context.Context) ([]*relationtb.ConversationModel, error) {
return c.conversationDB.FindUserIDAllConversations(ctx, ownerUserID) // return c.conversationDB.FindUserIDAllConversations(ctx, ownerUserID)
}, // },
) //)
} return c.GetConversations(ctx, ownerUserID, conversationIDs)
}
func (c *ConversationRedisCache) GetUserRecvMsgOpt(
ctx context.Context, func (c *ConversationRedisCache) GetUserRecvMsgOpt(ctx context.Context, ownerUserID, conversationID string) (opt int, err error) {
ownerUserID, conversationID string, return getCache(ctx, c.rcClient, c.getRecvMsgOptKey(ownerUserID, conversationID), c.expireTime, func(ctx context.Context) (opt int, err error) {
) (opt int, err error) {
return getCache(
ctx,
c.rcClient,
c.getRecvMsgOptKey(ownerUserID, conversationID),
c.expireTime,
func(ctx context.Context) (opt int, err error) {
return c.conversationDB.GetUserRecvMsgOpt(ctx, ownerUserID, conversationID) return c.conversationDB.GetUserRecvMsgOpt(ctx, ownerUserID, conversationID)
}, })
)
} }
func (c *ConversationRedisCache) GetSuperGroupRecvMsgNotNotifyUserIDs( func (c *ConversationRedisCache) GetSuperGroupRecvMsgNotNotifyUserIDs(ctx context.Context, groupID string) (userIDs []string, err error) {
ctx context.Context, return getCache(ctx, c.rcClient, c.getSuperGroupRecvNotNotifyUserIDsKey(groupID), c.expireTime, func(ctx context.Context) (userIDs []string, err error) {
groupID string,
) (userIDs []string, err error) {
return getCache(
ctx,
c.rcClient,
c.getSuperGroupRecvNotNotifyUserIDsKey(groupID),
c.expireTime,
func(ctx context.Context) (userIDs []string, err error) {
return c.conversationDB.FindSuperGroupRecvMsgNotNotifyUserIDs(ctx, groupID) return c.conversationDB.FindSuperGroupRecvMsgNotNotifyUserIDs(ctx, groupID)
}, })
)
} }
func (c *ConversationRedisCache) DelUsersConversation(conversationID string, ownerUserIDs ...string) ConversationCache { func (c *ConversationRedisCache) DelUsersConversation(conversationID string, ownerUserIDs ...string) ConversationCache {
var keys []string keys := make([]string, 0, len(ownerUserIDs))
for _, ownerUserID := range ownerUserIDs { for _, ownerUserID := range ownerUserIDs {
keys = append(keys, c.getConversationKey(ownerUserID, conversationID)) keys = append(keys, c.getConversationKey(ownerUserID, conversationID))
} }
cache := c.NewCache() cache := c.NewCache()
cache.AddKeys(keys...) cache.AddKeys(keys...)
return cache return cache
} }
func (c *ConversationRedisCache) DelUserRecvMsgOpt(ownerUserID, conversationID string) ConversationCache { func (c *ConversationRedisCache) DelUserRecvMsgOpt(ownerUserID, conversationID string) ConversationCache {
cache := c.NewCache() cache := c.NewCache()
cache.AddKeys(c.getRecvMsgOptKey(ownerUserID, conversationID)) cache.AddKeys(c.getRecvMsgOptKey(ownerUserID, conversationID))
return cache return cache
} }
func (c *ConversationRedisCache) DelSuperGroupRecvMsgNotNotifyUserIDs(groupID string) ConversationCache { func (c *ConversationRedisCache) DelSuperGroupRecvMsgNotNotifyUserIDs(groupID string) ConversationCache {
cache := c.NewCache() cache := c.NewCache()
cache.AddKeys(c.getSuperGroupRecvNotNotifyUserIDsKey(groupID)) cache.AddKeys(c.getSuperGroupRecvNotNotifyUserIDsKey(groupID))
return cache return cache
} }
func (c *ConversationRedisCache) GetSuperGroupRecvMsgNotNotifyUserIDsHash( func (c *ConversationRedisCache) GetSuperGroupRecvMsgNotNotifyUserIDsHash(ctx context.Context, groupID string) (hash uint64, err error) {
ctx context.Context, return getCache(ctx, c.rcClient, c.getSuperGroupRecvNotNotifyUserIDsHashKey(groupID), c.expireTime, func(ctx context.Context) (hash uint64, err error) {
groupID string,
) (hash uint64, err error) {
return getCache(
ctx,
c.rcClient,
c.getSuperGroupRecvNotNotifyUserIDsHashKey(groupID),
c.expireTime,
func(ctx context.Context) (hash uint64, err error) {
userIDs, err := c.GetSuperGroupRecvMsgNotNotifyUserIDs(ctx, groupID) userIDs, err := c.GetSuperGroupRecvMsgNotNotifyUserIDs(ctx, groupID)
if err != nil { if err != nil {
return 0, err return 0, err
@ -373,60 +330,44 @@ func (c *ConversationRedisCache) GetSuperGroupRecvMsgNotNotifyUserIDsHash(
func (c *ConversationRedisCache) DelSuperGroupRecvMsgNotNotifyUserIDsHash(groupID string) ConversationCache { func (c *ConversationRedisCache) DelSuperGroupRecvMsgNotNotifyUserIDsHash(groupID string) ConversationCache {
cache := c.NewCache() cache := c.NewCache()
cache.AddKeys(c.getSuperGroupRecvNotNotifyUserIDsHashKey(groupID)) cache.AddKeys(c.getSuperGroupRecvNotNotifyUserIDsHashKey(groupID))
return cache return cache
} }
func (c *ConversationRedisCache) getUserAllHasReadSeqsIndex( func (c *ConversationRedisCache) getUserAllHasReadSeqsIndex(conversationID string, conversationIDs []string) (int, error) {
conversationID string,
conversationIDs []string,
) (int, error) {
for _i, _conversationID := range conversationIDs { for _i, _conversationID := range conversationIDs {
if _conversationID == conversationID { if _conversationID == conversationID {
return _i, nil return _i, nil
} }
} }
return 0, errors.New("not found key:" + conversationID + " in keys")
}
func (c *ConversationRedisCache) GetUserAllHasReadSeqs( return 0, errors.New("not found key:" + conversationID + " in keys")
ctx context.Context,
ownerUserID string,
) (map[string]int64, error) {
conversationIDs, err := c.GetUserConversationIDs(ctx, ownerUserID)
if err != nil {
return nil, err
}
var keys []string
for _, conversarionID := range conversationIDs {
keys = append(keys, c.getConversationHasReadSeqKey(ownerUserID, conversarionID))
}
return batchGetCacheMap(
ctx,
c.rcClient,
keys,
conversationIDs,
c.expireTime,
c.getUserAllHasReadSeqsIndex,
func(ctx context.Context) (map[string]int64, error) {
return c.conversationDB.GetUserAllHasReadSeqs(ctx, ownerUserID)
},
)
} }
func (c *ConversationRedisCache) DelUserAllHasReadSeqs(ownerUserID string, //func (c *ConversationRedisCache) GetUserAllHasReadSeqs(ctx context.Context, ownerUserID string) (map[string]int64, error) {
conversationIDs ...string, // conversationIDs, err := c.GetUserConversationIDs(ctx, ownerUserID)
) ConversationCache { // if err != nil {
// return nil, err
// }
// var keys []string
// for _, conversarionID := range conversationIDs {
// keys = append(keys, c.getConversationHasReadSeqKey(ownerUserID, conversarionID))
// }
// return batchGetCacheMap(ctx, c.rcClient, keys, conversationIDs, c.expireTime, c.getUserAllHasReadSeqsIndex, func(ctx context.Context) (map[string]int64, error) {
// return c.conversationDB.GetUserAllHasReadSeqs(ctx, ownerUserID)
// })
//}
func (c *ConversationRedisCache) DelUserAllHasReadSeqs(ownerUserID string, conversationIDs ...string) ConversationCache {
cache := c.NewCache() cache := c.NewCache()
for _, conversationID := range conversationIDs { for _, conversationID := range conversationIDs {
cache.AddKeys(c.getConversationHasReadSeqKey(ownerUserID, conversationID)) cache.AddKeys(c.getConversationHasReadSeqKey(ownerUserID, conversationID))
} }
return cache return cache
} }
func (c *ConversationRedisCache) GetConversationsByConversationID( func (c *ConversationRedisCache) GetConversationsByConversationID(ctx context.Context, conversationIDs []string) ([]*relationtb.ConversationModel, error) {
ctx context.Context,
conversationIDs []string,
) ([]*relationtb.ConversationModel, error) {
panic("implement me") panic("implement me")
} }
@ -435,15 +376,9 @@ func (c *ConversationRedisCache) DelConversationByConversationID(conversationIDs
} }
func (c *ConversationRedisCache) GetConversationNotReceiveMessageUserIDs(ctx context.Context, conversationID string) ([]string, error) { func (c *ConversationRedisCache) GetConversationNotReceiveMessageUserIDs(ctx context.Context, conversationID string) ([]string, error) {
return getCache( return getCache(ctx, c.rcClient, c.getConversationNotReceiveMessageUserIDsKey(conversationID), c.expireTime, func(ctx context.Context) ([]string, error) {
ctx,
c.rcClient,
c.getConversationNotReceiveMessageUserIDsKey(conversationID),
c.expireTime,
func(ctx context.Context) ([]string, error) {
return c.conversationDB.GetConversationNotReceiveMessageUserIDs(ctx, conversationID) return c.conversationDB.GetConversationNotReceiveMessageUserIDs(ctx, conversationID)
}, })
)
} }
func (c *ConversationRedisCache) DelConversationNotReceiveMessageUserIDs(conversationIDs ...string) ConversationCache { func (c *ConversationRedisCache) DelConversationNotReceiveMessageUserIDs(conversationIDs ...string) ConversationCache {
@ -451,5 +386,6 @@ func (c *ConversationRedisCache) DelConversationNotReceiveMessageUserIDs(convers
for _, conversationID := range conversationIDs { for _, conversationID := range conversationIDs {
cache.AddKeys(c.getConversationNotReceiveMessageUserIDsKey(conversationID)) cache.AddKeys(c.getConversationNotReceiveMessageUserIDsKey(conversationID))
} }
return cache return cache
} }

@ -53,11 +53,8 @@ type FriendCacheRedis struct {
rcClient *rockscache.Client rcClient *rockscache.Client
} }
func NewFriendCacheRedis( func NewFriendCacheRedis(rdb redis.UniversalClient, friendDB relationtb.FriendModelInterface,
rdb redis.UniversalClient, options rockscache.Options) FriendCache {
friendDB relationtb.FriendModelInterface,
options rockscache.Options,
) FriendCache {
rcClient := rockscache.NewClient(rdb, options) rcClient := rockscache.NewClient(rdb, options)
return &FriendCacheRedis{ return &FriendCacheRedis{
metaCache: NewMetaCacheRedis(rcClient), metaCache: NewMetaCacheRedis(rcClient),
@ -67,12 +64,12 @@ func NewFriendCacheRedis(
} }
} }
func (c *FriendCacheRedis) NewCache() FriendCache { func (f *FriendCacheRedis) NewCache() FriendCache {
return &FriendCacheRedis{ return &FriendCacheRedis{
rcClient: c.rcClient, rcClient: f.rcClient,
metaCache: NewMetaCacheRedis(c.rcClient, c.metaCache.GetPreDelKeys()...), metaCache: NewMetaCacheRedis(f.rcClient, f.metaCache.GetPreDelKeys()...),
friendDB: c.friendDB, friendDB: f.friendDB,
expireTime: c.expireTime, expireTime: f.expireTime,
} }
} }
@ -89,32 +86,24 @@ func (f *FriendCacheRedis) getFriendKey(ownerUserID, friendUserID string) string
} }
func (f *FriendCacheRedis) GetFriendIDs(ctx context.Context, ownerUserID string) (friendIDs []string, err error) { func (f *FriendCacheRedis) GetFriendIDs(ctx context.Context, ownerUserID string) (friendIDs []string, err error) {
return getCache( return getCache(ctx, f.rcClient, f.getFriendIDsKey(ownerUserID), f.expireTime, func(ctx context.Context) ([]string, error) {
ctx,
f.rcClient,
f.getFriendIDsKey(ownerUserID),
f.expireTime,
func(ctx context.Context) ([]string, error) {
return f.friendDB.FindFriendUserIDs(ctx, ownerUserID) return f.friendDB.FindFriendUserIDs(ctx, ownerUserID)
}, })
)
} }
func (f *FriendCacheRedis) DelFriendIDs(ownerUserID ...string) FriendCache { func (f *FriendCacheRedis) DelFriendIDs(ownerUserIDs ...string) FriendCache {
new := f.NewCache() newGroupCache := f.NewCache()
var keys []string keys := make([]string, 0, len(ownerUserIDs))
for _, userID := range ownerUserID { for _, userID := range ownerUserIDs {
keys = append(keys, f.getFriendIDsKey(userID)) keys = append(keys, f.getFriendIDsKey(userID))
} }
new.AddKeys(keys...) newGroupCache.AddKeys(keys...)
return new
return newGroupCache
} }
// todo. // todo.
func (f *FriendCacheRedis) GetTwoWayFriendIDs( func (f *FriendCacheRedis) GetTwoWayFriendIDs(ctx context.Context, ownerUserID string) (twoWayFriendIDs []string, err error) {
ctx context.Context,
ownerUserID string,
) (twoWayFriendIDs []string, err error) {
friendIDs, err := f.GetFriendIDs(ctx, ownerUserID) friendIDs, err := f.GetFriendIDs(ctx, ownerUserID)
if err != nil { if err != nil {
return nil, err return nil, err
@ -128,32 +117,28 @@ func (f *FriendCacheRedis) GetTwoWayFriendIDs(
twoWayFriendIDs = append(twoWayFriendIDs, ownerUserID) twoWayFriendIDs = append(twoWayFriendIDs, ownerUserID)
} }
} }
return twoWayFriendIDs, nil return twoWayFriendIDs, nil
} }
func (f *FriendCacheRedis) DelTwoWayFriendIDs(ctx context.Context, ownerUserID string) FriendCache { func (f *FriendCacheRedis) DelTwoWayFriendIDs(ctx context.Context, ownerUserID string) FriendCache {
new := f.NewCache() newFriendCache := f.NewCache()
new.AddKeys(f.getTwoWayFriendsIDsKey(ownerUserID)) newFriendCache.AddKeys(f.getTwoWayFriendsIDsKey(ownerUserID))
return new
} return newFriendCache
}
func (f *FriendCacheRedis) GetFriend(
ctx context.Context, func (f *FriendCacheRedis) GetFriend(ctx context.Context, ownerUserID,
ownerUserID, friendUserID string, friendUserID string) (friend *relationtb.FriendModel, err error) {
) (friend *relationtb.FriendModel, err error) { return getCache(ctx, f.rcClient, f.getFriendKey(ownerUserID,
return getCache( friendUserID), f.expireTime, func(ctx context.Context) (*relationtb.FriendModel, error) {
ctx,
f.rcClient,
f.getFriendKey(ownerUserID, friendUserID),
f.expireTime,
func(ctx context.Context) (*relationtb.FriendModel, error) {
return f.friendDB.Take(ctx, ownerUserID, friendUserID) return f.friendDB.Take(ctx, ownerUserID, friendUserID)
}, })
)
} }
func (f *FriendCacheRedis) DelFriend(ownerUserID, friendUserID string) FriendCache { func (f *FriendCacheRedis) DelFriend(ownerUserID, friendUserID string) FriendCache {
new := f.NewCache() newFriendCache := f.NewCache()
new.AddKeys(f.getFriendKey(ownerUserID, friendUserID)) newFriendCache.AddKeys(f.getFriendKey(ownerUserID, friendUserID))
return new
return newFriendCache
} }

@ -65,22 +65,10 @@ type GroupCache interface {
GetJoinedGroupIDs(ctx context.Context, userID string) (joinedGroupIDs []string, err error) GetJoinedGroupIDs(ctx context.Context, userID string) (joinedGroupIDs []string, err error)
DelJoinedGroupID(userID ...string) GroupCache DelJoinedGroupID(userID ...string) GroupCache
GetGroupMemberInfo( GetGroupMemberInfo(ctx context.Context, groupID, userID string) (groupMember *relationtb.GroupMemberModel, err error)
ctx context.Context, GetGroupMembersInfo(ctx context.Context, groupID string, userID []string) (groupMembers []*relationtb.GroupMemberModel, err error)
groupID, userID string,
) (groupMember *relationtb.GroupMemberModel, err error)
GetGroupMembersInfo(
ctx context.Context,
groupID string,
userID []string,
) (groupMembers []*relationtb.GroupMemberModel, err error)
GetAllGroupMembersInfo(ctx context.Context, groupID string) (groupMembers []*relationtb.GroupMemberModel, err error) GetAllGroupMembersInfo(ctx context.Context, groupID string) (groupMembers []*relationtb.GroupMemberModel, err error)
GetGroupMembersPage( GetGroupMembersPage(ctx context.Context, groupID string, userID []string, showNumber, pageNumber int32) (total uint32, groupMembers []*relationtb.GroupMemberModel, err error)
ctx context.Context,
groupID string,
userID []string,
showNumber, pageNumber int32,
) (total uint32, groupMembers []*relationtb.GroupMemberModel, err error)
DelGroupMembersInfo(groupID string, userID ...string) GroupCache DelGroupMembersInfo(groupID string, userID ...string) GroupCache
@ -109,6 +97,7 @@ func NewGroupCacheRedis(
opts rockscache.Options, opts rockscache.Options,
) GroupCache { ) GroupCache {
rcClient := rockscache.NewClient(rdb, opts) rcClient := rockscache.NewClient(rdb, opts)
return &GroupCacheRedis{ return &GroupCacheRedis{
rcClient: rcClient, expireTime: groupExpireTime, rcClient: rcClient, expireTime: groupExpireTime,
groupDB: groupDB, groupMemberDB: groupMemberDB, groupRequestDB: groupRequestDB, groupDB: groupDB, groupMemberDB: groupMemberDB, groupRequestDB: groupRequestDB,
@ -169,6 +158,7 @@ func (g *GroupCacheRedis) GetGroupIndex(group *relationtb.GroupModel, keys []str
return i, nil return i, nil
} }
} }
return 0, errIndex return 0, errIndex
} }
@ -179,62 +169,45 @@ func (g *GroupCacheRedis) GetGroupMemberIndex(groupMember *relationtb.GroupMembe
return i, nil return i, nil
} }
} }
return 0, errIndex return 0, errIndex
} }
// / groupInfo. // / groupInfo.
func (g *GroupCacheRedis) GetGroupsInfo( func (g *GroupCacheRedis) GetGroupsInfo(ctx context.Context, groupIDs []string) (groups []*relationtb.GroupModel, err error) {
ctx context.Context, //var keys []string
groupIDs []string, //for _, group := range groupIDs {
) (groups []*relationtb.GroupModel, err error) { // keys = append(keys, g.getGroupInfoKey(group))
var keys []string //}
for _, group := range groupIDs { //return batchGetCache(ctx, g.rcClient, keys, g.expireTime, g.GetGroupIndex, func(ctx context.Context) ([]*relationtb.GroupModel, error) {
keys = append(keys, g.getGroupInfoKey(group)) // return g.groupDB.Find(ctx, groupIDs)
} //})
return batchGetCache( return batchGetCache2(ctx, g.rcClient, g.expireTime, groupIDs, func(groupID string) string {
ctx, return g.getGroupInfoKey(groupID)
g.rcClient, }, func(ctx context.Context, groupID string) (*relationtb.GroupModel, error) {
keys, return g.groupDB.Take(ctx, groupID)
g.expireTime, })
g.GetGroupIndex,
func(ctx context.Context) ([]*relationtb.GroupModel, error) {
return g.groupDB.Find(ctx, groupIDs)
},
)
} }
func (g *GroupCacheRedis) GetGroupInfo(ctx context.Context, groupID string) (group *relationtb.GroupModel, err error) { func (g *GroupCacheRedis) GetGroupInfo(ctx context.Context, groupID string) (group *relationtb.GroupModel, err error) {
return getCache( return getCache(ctx, g.rcClient, g.getGroupInfoKey(groupID), g.expireTime, func(ctx context.Context) (*relationtb.GroupModel, error) {
ctx,
g.rcClient,
g.getGroupInfoKey(groupID),
g.expireTime,
func(ctx context.Context) (*relationtb.GroupModel, error) {
return g.groupDB.Take(ctx, groupID) return g.groupDB.Take(ctx, groupID)
}, })
)
} }
func (g *GroupCacheRedis) DelGroupsInfo(groupIDs ...string) GroupCache { func (g *GroupCacheRedis) DelGroupsInfo(groupIDs ...string) GroupCache {
new := g.NewCache() newGroupCache := g.NewCache()
var keys []string keys := make([]string, 0, len(groupIDs))
for _, groupID := range groupIDs { for _, groupID := range groupIDs {
keys = append(keys, g.getGroupInfoKey(groupID)) keys = append(keys, g.getGroupInfoKey(groupID))
} }
new.AddKeys(keys...) newGroupCache.AddKeys(keys...)
return new
return newGroupCache
} }
func (g *GroupCacheRedis) GetJoinedSuperGroupIDs( func (g *GroupCacheRedis) GetJoinedSuperGroupIDs(ctx context.Context, userID string) (joinedSuperGroupIDs []string, err error) {
ctx context.Context, return getCache(ctx, g.rcClient, g.getJoinedSuperGroupsIDKey(userID), g.expireTime, func(ctx context.Context) ([]string, error) {
userID string,
) (joinedSuperGroupIDs []string, err error) {
return getCache(
ctx,
g.rcClient,
g.getJoinedSuperGroupsIDKey(userID),
g.expireTime,
func(ctx context.Context) ([]string, error) {
userGroup, err := g.mongoDB.GetSuperGroupByUserID(ctx, userID) userGroup, err := g.mongoDB.GetSuperGroupByUserID(ctx, userID)
if err != nil { if err != nil {
return nil, err return nil, err
@ -244,52 +217,50 @@ func (g *GroupCacheRedis) GetJoinedSuperGroupIDs(
) )
} }
func (g *GroupCacheRedis) GetSuperGroupMemberIDs( func (g *GroupCacheRedis) GetSuperGroupMemberIDs(ctx context.Context, groupIDs ...string) (models []*unrelationtb.SuperGroupModel, err error) {
ctx context.Context, //var keys []string
groupIDs ...string, //for _, group := range groupIDs {
) (models []*unrelationtb.SuperGroupModel, err error) { // keys = append(keys, g.getSuperGroupMemberIDsKey(group))
var keys []string //}
for _, group := range groupIDs { //return batchGetCache(ctx, g.rcClient, keys, g.expireTime, func(model *unrelationtb.SuperGroupModel, keys []string) (int, error) {
keys = append(keys, g.getSuperGroupMemberIDsKey(group)) // for i, key := range keys {
} // if g.getSuperGroupMemberIDsKey(model.GroupID) == key {
return batchGetCache( // return i, nil
ctx, // }
g.rcClient, // }
keys, // return 0, errIndex
g.expireTime, //},
func(model *unrelationtb.SuperGroupModel, keys []string) (int, error) { // func(ctx context.Context) ([]*unrelationtb.SuperGroupModel, error) {
for i, key := range keys { // return g.mongoDB.FindSuperGroup(ctx, groupIDs)
if g.getSuperGroupMemberIDsKey(model.GroupID) == key { // })
return i, nil return batchGetCache2(ctx, g.rcClient, g.expireTime, groupIDs, func(groupID string) string {
} return g.getSuperGroupMemberIDsKey(groupID)
} }, func(ctx context.Context, groupID string) (*unrelationtb.SuperGroupModel, error) {
return 0, errIndex return g.mongoDB.TakeSuperGroup(ctx, groupID)
}, })
func(ctx context.Context) ([]*unrelationtb.SuperGroupModel, error) {
return g.mongoDB.FindSuperGroup(ctx, groupIDs)
},
)
} }
// userJoinSuperGroup. // userJoinSuperGroup.
func (g *GroupCacheRedis) DelJoinedSuperGroupIDs(userIDs ...string) GroupCache { func (g *GroupCacheRedis) DelJoinedSuperGroupIDs(userIDs ...string) GroupCache {
new := g.NewCache() newGroupCache := g.NewCache()
var keys []string keys := make([]string, 0, len(userIDs))
for _, userID := range userIDs { for _, userID := range userIDs {
keys = append(keys, g.getJoinedSuperGroupsIDKey(userID)) keys = append(keys, g.getJoinedSuperGroupsIDKey(userID))
} }
new.AddKeys(keys...) newGroupCache.AddKeys(keys...)
return new
return newGroupCache
} }
func (g *GroupCacheRedis) DelSuperGroupMemberIDs(groupIDs ...string) GroupCache { func (g *GroupCacheRedis) DelSuperGroupMemberIDs(groupIDs ...string) GroupCache {
new := g.NewCache() newGroupCache := g.NewCache()
var keys []string keys := make([]string, 0, len(groupIDs))
for _, groupID := range groupIDs { for _, groupID := range groupIDs {
keys = append(keys, g.getSuperGroupMemberIDsKey(groupID)) keys = append(keys, g.getSuperGroupMemberIDsKey(groupID))
} }
new.AddKeys(keys...) newGroupCache.AddKeys(keys...)
return new
return newGroupCache
} }
// groupMembersHash. // groupMembersHash.
@ -351,10 +322,7 @@ func (g *GroupCacheRedis) GetGroupMembersHash(ctx context.Context, groupID strin
//) //)
} }
func (g *GroupCacheRedis) GetGroupMemberHashMap( func (g *GroupCacheRedis) GetGroupMemberHashMap(ctx context.Context, groupIDs []string) (map[string]*relationtb.GroupSimpleUserID, error) {
ctx context.Context,
groupIDs []string,
) (map[string]*relationtb.GroupSimpleUserID, error) {
res := make(map[string]*relationtb.GroupSimpleUserID) res := make(map[string]*relationtb.GroupSimpleUserID)
for _, groupID := range groupIDs { for _, groupID := range groupIDs {
hash, err := g.GetGroupMembersHash(ctx, groupID) hash, err := g.GetGroupMembersHash(ctx, groupID)
@ -368,26 +336,22 @@ func (g *GroupCacheRedis) GetGroupMemberHashMap(
} }
res[groupID] = &relationtb.GroupSimpleUserID{Hash: hash, MemberNum: uint32(num)} res[groupID] = &relationtb.GroupSimpleUserID{Hash: hash, MemberNum: uint32(num)}
} }
return res, nil return res, nil
} }
func (g *GroupCacheRedis) DelGroupMembersHash(groupID string) GroupCache { func (g *GroupCacheRedis) DelGroupMembersHash(groupID string) GroupCache {
cache := g.NewCache() cache := g.NewCache()
cache.AddKeys(g.getGroupMembersHashKey(groupID)) cache.AddKeys(g.getGroupMembersHashKey(groupID))
return cache return cache
} }
// groupMemberIDs. // groupMemberIDs.
func (g *GroupCacheRedis) GetGroupMemberIDs(ctx context.Context, groupID string) (groupMemberIDs []string, err error) { func (g *GroupCacheRedis) GetGroupMemberIDs(ctx context.Context, groupID string) (groupMemberIDs []string, err error) {
return getCache( return getCache(ctx, g.rcClient, g.getGroupMemberIDsKey(groupID), g.expireTime, func(ctx context.Context) ([]string, error) {
ctx,
g.rcClient,
g.getGroupMemberIDsKey(groupID),
g.expireTime,
func(ctx context.Context) ([]string, error) {
return g.groupMemberDB.FindMemberUserID(ctx, groupID) return g.groupMemberDB.FindMemberUserID(ctx, groupID)
}, })
)
} }
func (g *GroupCacheRedis) GetGroupsMemberIDs(ctx context.Context, groupIDs []string) (map[string][]string, error) { func (g *GroupCacheRedis) GetGroupsMemberIDs(ctx context.Context, groupIDs []string) (map[string][]string, error) {
@ -399,71 +363,53 @@ func (g *GroupCacheRedis) GetGroupsMemberIDs(ctx context.Context, groupIDs []str
} }
m[groupID] = userIDs m[groupID] = userIDs
} }
return m, nil return m, nil
} }
func (g *GroupCacheRedis) DelGroupMemberIDs(groupID string) GroupCache { func (g *GroupCacheRedis) DelGroupMemberIDs(groupID string) GroupCache {
cache := g.NewCache() cache := g.NewCache()
cache.AddKeys(g.getGroupMemberIDsKey(groupID)) cache.AddKeys(g.getGroupMemberIDsKey(groupID))
return cache return cache
} }
func (g *GroupCacheRedis) GetJoinedGroupIDs(ctx context.Context, userID string) (joinedGroupIDs []string, err error) { func (g *GroupCacheRedis) GetJoinedGroupIDs(ctx context.Context, userID string) (joinedGroupIDs []string, err error) {
return getCache( return getCache(ctx, g.rcClient, g.getJoinedGroupsKey(userID), g.expireTime, func(ctx context.Context) ([]string, error) {
ctx,
g.rcClient,
g.getJoinedGroupsKey(userID),
g.expireTime,
func(ctx context.Context) ([]string, error) {
return g.groupMemberDB.FindUserJoinedGroupID(ctx, userID) return g.groupMemberDB.FindUserJoinedGroupID(ctx, userID)
}, })
)
} }
func (g *GroupCacheRedis) DelJoinedGroupID(userIDs ...string) GroupCache { func (g *GroupCacheRedis) DelJoinedGroupID(userIDs ...string) GroupCache {
var keys []string keys := make([]string, 0, len(userIDs))
for _, userID := range userIDs { for _, userID := range userIDs {
keys = append(keys, g.getJoinedGroupsKey(userID)) keys = append(keys, g.getJoinedGroupsKey(userID))
} }
cache := g.NewCache() cache := g.NewCache()
cache.AddKeys(keys...) cache.AddKeys(keys...)
return cache return cache
} }
func (g *GroupCacheRedis) GetGroupMemberInfo( func (g *GroupCacheRedis) GetGroupMemberInfo(ctx context.Context, groupID, userID string) (groupMember *relationtb.GroupMemberModel, err error) {
ctx context.Context, return getCache(ctx, g.rcClient, g.getGroupMemberInfoKey(groupID, userID), g.expireTime, func(ctx context.Context) (*relationtb.GroupMemberModel, error) {
groupID, userID string,
) (groupMember *relationtb.GroupMemberModel, err error) {
return getCache(
ctx,
g.rcClient,
g.getGroupMemberInfoKey(groupID, userID),
g.expireTime,
func(ctx context.Context) (*relationtb.GroupMemberModel, error) {
return g.groupMemberDB.Take(ctx, groupID, userID) return g.groupMemberDB.Take(ctx, groupID, userID)
}, })
)
} }
func (g *GroupCacheRedis) GetGroupMembersInfo( func (g *GroupCacheRedis) GetGroupMembersInfo(ctx context.Context, groupID string, userIDs []string) ([]*relationtb.GroupMemberModel, error) {
ctx context.Context, //var keys []string
groupID string, //for _, userID := range userIDs {
userIDs []string, // keys = append(keys, g.getGroupMemberInfoKey(groupID, userID))
) ([]*relationtb.GroupMemberModel, error) { //}
var keys []string //return batchGetCache(ctx, g.rcClient, keys, g.expireTime, g.GetGroupMemberIndex, func(ctx context.Context) ([]*relationtb.GroupMemberModel, error) {
for _, userID := range userIDs { // return g.groupMemberDB.Find(ctx, []string{groupID}, userIDs, nil)
keys = append(keys, g.getGroupMemberInfoKey(groupID, userID)) //})
} return batchGetCache2(ctx, g.rcClient, g.expireTime, userIDs, func(userID string) string {
return batchGetCache( return g.getGroupMemberInfoKey(groupID, userID)
ctx, }, func(ctx context.Context, userID string) (*relationtb.GroupMemberModel, error) {
g.rcClient, return g.groupMemberDB.Take(ctx, groupID, userID)
keys, })
g.expireTime,
g.GetGroupMemberIndex,
func(ctx context.Context) ([]*relationtb.GroupMemberModel, error) {
return g.groupMemberDB.Find(ctx, []string{groupID}, userIDs, nil)
},
)
} }
func (g *GroupCacheRedis) GetGroupMembersPage( func (g *GroupCacheRedis) GetGroupMembersPage(
@ -482,72 +428,58 @@ func (g *GroupCacheRedis) GetGroupMembersPage(
userIDs = groupMemberIDs userIDs = groupMemberIDs
} }
groupMembers, err = g.GetGroupMembersInfo(ctx, groupID, utils.Paginate(userIDs, int(showNumber), int(showNumber))) groupMembers, err = g.GetGroupMembersInfo(ctx, groupID, utils.Paginate(userIDs, int(showNumber), int(showNumber)))
return uint32(len(userIDs)), groupMembers, err return uint32(len(userIDs)), groupMembers, err
} }
func (g *GroupCacheRedis) GetAllGroupMembersInfo( func (g *GroupCacheRedis) GetAllGroupMembersInfo(ctx context.Context, groupID string) (groupMembers []*relationtb.GroupMemberModel, err error) {
ctx context.Context,
groupID string,
) (groupMembers []*relationtb.GroupMemberModel, err error) {
groupMemberIDs, err := g.GetGroupMemberIDs(ctx, groupID) groupMemberIDs, err := g.GetGroupMemberIDs(ctx, groupID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return g.GetGroupMembersInfo(ctx, groupID, groupMemberIDs) return g.GetGroupMembersInfo(ctx, groupID, groupMemberIDs)
} }
func (g *GroupCacheRedis) GetAllGroupMemberInfo( func (g *GroupCacheRedis) GetAllGroupMemberInfo(ctx context.Context, groupID string) ([]*relationtb.GroupMemberModel, error) {
ctx context.Context,
groupID string,
) ([]*relationtb.GroupMemberModel, error) {
groupMemberIDs, err := g.GetGroupMemberIDs(ctx, groupID) groupMemberIDs, err := g.GetGroupMemberIDs(ctx, groupID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
var keys []string //var keys []string
for _, groupMemberID := range groupMemberIDs { //for _, groupMemberID := range groupMemberIDs {
keys = append(keys, g.getGroupMemberInfoKey(groupID, groupMemberID)) // keys = append(keys, g.getGroupMemberInfoKey(groupID, groupMemberID))
} //}
return batchGetCache( //return batchGetCache(ctx, g.rcClient, keys, g.expireTime, g.GetGroupMemberIndex, func(ctx context.Context) ([]*relationtb.GroupMemberModel, error) {
ctx, // return g.groupMemberDB.Find(ctx, []string{groupID}, groupMemberIDs, nil)
g.rcClient, //})
keys, return g.GetGroupMembersInfo(ctx, groupID, groupMemberIDs)
g.expireTime,
g.GetGroupMemberIndex,
func(ctx context.Context) ([]*relationtb.GroupMemberModel, error) {
return g.groupMemberDB.Find(ctx, []string{groupID}, groupMemberIDs, nil)
},
)
} }
func (g *GroupCacheRedis) DelGroupMembersInfo(groupID string, userIDs ...string) GroupCache { func (g *GroupCacheRedis) DelGroupMembersInfo(groupID string, userIDs ...string) GroupCache {
var keys []string keys := make([]string, 0, len(userIDs))
for _, userID := range userIDs { for _, userID := range userIDs {
keys = append(keys, g.getGroupMemberInfoKey(groupID, userID)) keys = append(keys, g.getGroupMemberInfoKey(groupID, userID))
} }
cache := g.NewCache() cache := g.NewCache()
cache.AddKeys(keys...) cache.AddKeys(keys...)
return cache return cache
} }
func (g *GroupCacheRedis) GetGroupMemberNum(ctx context.Context, groupID string) (memberNum int64, err error) { func (g *GroupCacheRedis) GetGroupMemberNum(ctx context.Context, groupID string) (memberNum int64, err error) {
return getCache( return getCache(ctx, g.rcClient, g.getGroupMemberNumKey(groupID), g.expireTime, func(ctx context.Context) (int64, error) {
ctx,
g.rcClient,
g.getGroupMemberNumKey(groupID),
g.expireTime,
func(ctx context.Context) (int64, error) {
return g.groupMemberDB.TakeGroupMemberNum(ctx, groupID) return g.groupMemberDB.TakeGroupMemberNum(ctx, groupID)
}, })
)
} }
func (g *GroupCacheRedis) DelGroupsMemberNum(groupID ...string) GroupCache { func (g *GroupCacheRedis) DelGroupsMemberNum(groupID ...string) GroupCache {
var keys []string keys := make([]string, 0, len(groupID))
for _, groupID := range groupID { for _, groupID := range groupID {
keys = append(keys, g.getGroupMemberNumKey(groupID)) keys = append(keys, g.getGroupMemberNumKey(groupID))
} }
cache := g.NewCache() cache := g.NewCache()
cache.AddKeys(keys...) cache.AddKeys(keys...)
return cache return cache
} }

@ -18,7 +18,6 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"errors" "errors"
"fmt"
"time" "time"
"github.com/dtm-labs/rockscache" "github.com/dtm-labs/rockscache"
@ -59,27 +58,40 @@ type metaCacheRedis struct {
func (m *metaCacheRedis) ExecDel(ctx context.Context) error { func (m *metaCacheRedis) ExecDel(ctx context.Context) error {
if len(m.keys) > 0 { if len(m.keys) > 0 {
log.ZDebug(ctx, "delete cache", "keys", m.keys) log.ZDebug(ctx, "delete cache", "keys", m.keys)
retryTimes := 0 for _, key := range m.keys {
for { for i := 0; i < m.maxRetryTimes; i++ {
if err := m.rcClient.TagAsDeletedBatch2(ctx, m.keys); err != nil { if err := m.rcClient.TagAsDeleted(key); err != nil {
if retryTimes >= m.maxRetryTimes { log.ZError(ctx, "delete cache failed", err, "key", key)
err = errs.ErrInternalServer.Wrap( time.Sleep(m.retryInterval)
fmt.Sprintf( continue
"delete cache error: %v, keys: %v, retry times %d, please check redis server",
err,
m.keys,
retryTimes,
),
)
log.ZWarn(ctx, "delete cache failed, please handle keys", err, "keys", m.keys)
return err
} }
retryTimes++
} else {
break break
} }
//retryTimes := 0
//for {
// m.rcClient.TagAsDeleted()
// if err := m.rcClient.TagAsDeletedBatch2(ctx, []string{key}); err != nil {
// if retryTimes >= m.maxRetryTimes {
// err = errs.ErrInternalServer.Wrap(
// fmt.Sprintf(
// "delete cache error: %v, keys: %v, retry times %d, please check redis server",
// err,
// key,
// retryTimes,
// ),
// )
// log.ZWarn(ctx, "delete cache failed, please handle keys", err, "keys", key)
// return err
// }
// retryTimes++
// } else {
// break
// }
//}
} }
} }
return nil return nil
} }
@ -103,16 +115,11 @@ func GetDefaultOpt() rockscache.Options {
opts := rockscache.NewDefaultOptions() opts := rockscache.NewDefaultOptions()
opts.StrongConsistency = true opts.StrongConsistency = true
opts.RandomExpireAdjustment = 0.2 opts.RandomExpireAdjustment = 0.2
return opts return opts
} }
func getCache[T any]( func getCache[T any](ctx context.Context, rcClient *rockscache.Client, key string, expire time.Duration, fn func(ctx context.Context) (T, error)) (T, error) {
ctx context.Context,
rcClient *rockscache.Client,
key string,
expire time.Duration,
fn func(ctx context.Context) (T, error),
) (T, error) {
var t T var t T
var write bool var write bool
v, err := rcClient.Fetch2(ctx, key, expire, func() (s string, err error) { v, err := rcClient.Fetch2(ctx, key, expire, func() (s string, err error) {
@ -125,6 +132,7 @@ func getCache[T any](
return "", utils.Wrap(err, "") return "", utils.Wrap(err, "")
} }
write = true write = true
return string(bs), nil return string(bs), nil
}) })
if err != nil { if err != nil {
@ -139,95 +147,116 @@ func getCache[T any](
err = json.Unmarshal([]byte(v), &t) err = json.Unmarshal([]byte(v), &t)
if err != nil { if err != nil {
log.ZError(ctx, "cache json.Unmarshal failed", err, "key", key, "value", v, "expire", expire) log.ZError(ctx, "cache json.Unmarshal failed", err, "key", key, "value", v, "expire", expire)
return t, utils.Wrap(err, "") return t, utils.Wrap(err, "")
} }
return t, nil return t, nil
} }
func batchGetCache[T any]( // func batchGetCache[T any](ctx context.Context, rcClient *rockscache.Client, keys []string, expire time.Duration, keyIndexFn func(t T, keys []string) (int, error), fn func(ctx context.Context) ([]T,
// error)) ([]T, error) {
// batchMap, err := rcClient.FetchBatch2(ctx, keys, expire, func(idxs []int) (m map[int]string, err error) {
// values := make(map[int]string)
// tArrays, err := fn(ctx)
// if err != nil {
// return nil, err
// }
// for _, v := range tArrays {
// index, err := keyIndexFn(v, keys)
// if err != nil {
// continue
// }
// bs, err := json.Marshal(v)
// if err != nil {
// return nil, utils.Wrap(err, "marshal failed")
// }
// values[index] = string(bs)
// }
// return values, nil
// })
// if err != nil {
// return nil, err
// }
// var tArrays []T
// for _, v := range batchMap {
// if v != "" {
// var t T
// err = json.Unmarshal([]byte(v), &t)
// if err != nil {
// return nil, utils.Wrap(err, "unmarshal failed")
// }
// tArrays = append(tArrays, t)
// }
// }
// return tArrays, nil
//}
func batchGetCache2[T any, K comparable](
ctx context.Context, ctx context.Context,
rcClient *rockscache.Client, rcClient *rockscache.Client,
keys []string,
expire time.Duration, expire time.Duration,
keyIndexFn func(t T, keys []string) (int, error), keys []K,
fn func(ctx context.Context) ([]T, error), keyFn func(key K) string,
fns func(ctx context.Context, key K) (T, error),
) ([]T, error) { ) ([]T, error) {
batchMap, err := rcClient.FetchBatch2(ctx, keys, expire, func(idxs []int) (m map[int]string, err error) { if len(keys) == 0 {
values := make(map[int]string) return nil, nil
tArrays, err := fn(ctx)
if err != nil {
return nil, err
}
for _, v := range tArrays {
index, err := keyIndexFn(v, keys)
if err != nil {
continue
}
bs, err := json.Marshal(v)
if err != nil {
return nil, utils.Wrap(err, "marshal failed")
} }
values[index] = string(bs) res := make([]T, 0, len(keys))
} for _, key := range keys {
return values, nil val, err := getCache(ctx, rcClient, keyFn(key), expire, func(ctx context.Context) (T, error) {
return fns(ctx, key)
}) })
if err != nil { if err != nil {
return nil, err return nil, err
} }
var tArrays []T res = append(res, val)
for _, v := range batchMap {
if v != "" {
var t T
err = json.Unmarshal([]byte(v), &t)
if err != nil {
return nil, utils.Wrap(err, "unmarshal failed")
}
tArrays = append(tArrays, t)
}
}
return tArrays, nil
} }
func batchGetCacheMap[T any]( return res, nil
ctx context.Context,
rcClient *rockscache.Client,
keys, originKeys []string,
expire time.Duration,
keyIndexFn func(s string, keys []string) (int, error),
fn func(ctx context.Context) (map[string]T, error),
) (map[string]T, error) {
batchMap, err := rcClient.FetchBatch2(ctx, keys, expire, func(idxs []int) (m map[int]string, err error) {
tArrays, err := fn(ctx)
if err != nil {
return nil, err
}
values := make(map[int]string)
for k, v := range tArrays {
index, err := keyIndexFn(k, originKeys)
if err != nil {
continue
}
bs, err := json.Marshal(v)
if err != nil {
return nil, utils.Wrap(err, "marshal failed")
}
values[index] = string(bs)
}
return values, nil
})
if err != nil {
return nil, err
}
tMap := make(map[string]T)
for i, v := range batchMap {
if v != "" {
var t T
err = json.Unmarshal([]byte(v), &t)
if err != nil {
return nil, utils.Wrap(err, "unmarshal failed")
}
tMap[originKeys[i]] = t
}
}
return tMap, nil
} }
//func batchGetCacheMap[T any](
// ctx context.Context,
// rcClient *rockscache.Client,
// keys, originKeys []string,
// expire time.Duration,
// keyIndexFn func(s string, keys []string) (int, error),
// fn func(ctx context.Context) (map[string]T, error),
//) (map[string]T, error) {
// batchMap, err := rcClient.FetchBatch2(ctx, keys, expire, func(idxs []int) (m map[int]string, err error) {
// tArrays, err := fn(ctx)
// if err != nil {
// return nil, err
// }
// values := make(map[int]string)
// for k, v := range tArrays {
// index, err := keyIndexFn(k, originKeys)
// if err != nil {
// continue
// }
// bs, err := json.Marshal(v)
// if err != nil {
// return nil, utils.Wrap(err, "marshal failed")
// }
// values[index] = string(bs)
// }
// return values, nil
// })
// if err != nil {
// return nil, err
// }
// tMap := make(map[string]T)
// for i, v := range batchMap {
// if v != "" {
// var t T
// err = json.Unmarshal([]byte(v), &t)
// if err != nil {
// return nil, utils.Wrap(err, "unmarshal failed")
// }
// tMap[originKeys[i]] = t
// }
// }
// return tMap, nil
//}

@ -16,13 +16,16 @@ package cache
import ( import (
"context" "context"
"errors"
"strconv" "strconv"
"time" "time"
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
"github.com/dtm-labs/rockscache" "github.com/dtm-labs/rockscache"
unrelationtb "github.com/openimsdk/open-im-server/v3/pkg/common/db/table/unrelation"
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
"github.com/OpenIMSDK/tools/errs" "github.com/OpenIMSDK/tools/errs"
"github.com/gogo/protobuf/jsonpb" "github.com/gogo/protobuf/jsonpb"
@ -33,7 +36,6 @@ import (
"github.com/OpenIMSDK/tools/utils" "github.com/OpenIMSDK/tools/utils"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
unrelationtb "github.com/openimsdk/open-im-server/v3/pkg/common/db/table/unrelation"
"github.com/redis/go-redis/v9" "github.com/redis/go-redis/v9"
) )
@ -105,11 +107,7 @@ type MsgModel interface {
GetTokensWithoutError(ctx context.Context, userID string, platformID int) (map[string]int, error) GetTokensWithoutError(ctx context.Context, userID string, platformID int) (map[string]int, error)
SetTokenMapByUidPid(ctx context.Context, userID string, platformID int, m map[string]int) error SetTokenMapByUidPid(ctx context.Context, userID string, platformID int, m map[string]int) error
DeleteTokenByUidPid(ctx context.Context, userID string, platformID int, fields []string) error DeleteTokenByUidPid(ctx context.Context, userID string, platformID int, fields []string) error
GetMessagesBySeq( GetMessagesBySeq(ctx context.Context, conversationID string, seqs []int64) (seqMsg []*sdkws.MsgData, failedSeqList []int64, err error)
ctx context.Context,
conversationID string,
seqs []int64,
) (seqMsg []*sdkws.MsgData, failedSeqList []int64, err error)
SetMessageToCache(ctx context.Context, conversationID string, msgs []*sdkws.MsgData) (int, error) SetMessageToCache(ctx context.Context, conversationID string, msgs []*sdkws.MsgData) (int, error)
UserDeleteMsgs(ctx context.Context, conversationID string, seqs []int64, userID string) error UserDeleteMsgs(ctx context.Context, conversationID string, seqs []int64, userID string) error
DelUserDeleteMsgsList(ctx context.Context, conversationID string, seqs []int64) DelUserDeleteMsgsList(ctx context.Context, conversationID string, seqs []int64)
@ -122,12 +120,7 @@ type MsgModel interface {
JudgeMessageReactionExist(ctx context.Context, clientMsgID string, sessionType int32) (bool, error) JudgeMessageReactionExist(ctx context.Context, clientMsgID string, sessionType int32) (bool, error)
GetOneMessageAllReactionList(ctx context.Context, clientMsgID string, sessionType int32) (map[string]string, error) GetOneMessageAllReactionList(ctx context.Context, clientMsgID string, sessionType int32) (map[string]string, error)
DeleteOneMessageKey(ctx context.Context, clientMsgID string, sessionType int32, subKey string) error DeleteOneMessageKey(ctx context.Context, clientMsgID string, sessionType int32, subKey string) error
SetMessageReactionExpire( SetMessageReactionExpire(ctx context.Context, clientMsgID string, sessionType int32, expiration time.Duration) (bool, error)
ctx context.Context,
clientMsgID string,
sessionType int32,
expiration time.Duration,
) (bool, error)
GetMessageTypeKeyValue(ctx context.Context, clientMsgID string, sessionType int32, typeKey string) (string, error) GetMessageTypeKeyValue(ctx context.Context, clientMsgID string, sessionType int32, typeKey string) (string, error)
SetMessageTypeKeyValue(ctx context.Context, clientMsgID string, sessionType int32, typeKey, value string) error SetMessageTypeKeyValue(ctx context.Context, clientMsgID string, sessionType int32, typeKey, value string) error
LockMessageTypeKey(ctx context.Context, clientMsgID string, TypeKey string) error LockMessageTypeKey(ctx context.Context, clientMsgID string, TypeKey string) error
@ -158,50 +151,51 @@ func (c *msgCache) getHasReadSeqKey(conversationID string, userID string) string
return hasReadSeq + userID + ":" + conversationID return hasReadSeq + userID + ":" + conversationID
} }
func (c *msgCache) setSeq( func (c *msgCache) setSeq(ctx context.Context, conversationID string, seq int64, getkey func(conversationID string) string) error {
ctx context.Context,
conversationID string,
seq int64,
getkey func(conversationID string) string,
) error {
return utils.Wrap1(c.rdb.Set(ctx, getkey(conversationID), seq, 0).Err()) return utils.Wrap1(c.rdb.Set(ctx, getkey(conversationID), seq, 0).Err())
} }
func (c *msgCache) getSeq( func (c *msgCache) getSeq(ctx context.Context, conversationID string, getkey func(conversationID string) string) (int64, error) {
ctx context.Context,
conversationID string,
getkey func(conversationID string) string,
) (int64, error) {
return utils.Wrap2(c.rdb.Get(ctx, getkey(conversationID)).Int64()) return utils.Wrap2(c.rdb.Get(ctx, getkey(conversationID)).Int64())
} }
func (c *msgCache) getSeqs( func (c *msgCache) getSeqs(ctx context.Context, items []string, getkey func(s string) string) (m map[string]int64, err error) {
ctx context.Context, m = make(map[string]int64, len(items))
items []string, for i, v := range items {
getkey func(s string) string, res, err := c.rdb.Get(ctx, getkey(v)).Result()
) (m map[string]int64, err error) {
pipe := c.rdb.Pipeline()
for _, v := range items {
if err := pipe.Get(ctx, getkey(v)).Err(); err != nil && err != redis.Nil {
return nil, errs.Wrap(err)
}
}
result, err := pipe.Exec(ctx)
if err != nil && err != redis.Nil { if err != nil && err != redis.Nil {
return nil, errs.Wrap(err) return nil, errs.Wrap(err)
} }
m = make(map[string]int64, len(items)) val := utils.StringToInt64(res)
for i, v := range result {
seq := v.(*redis.StringCmd)
if seq.Err() != nil && seq.Err() != redis.Nil {
return nil, errs.Wrap(v.Err())
}
val := utils.StringToInt64(seq.Val())
if val != 0 { if val != 0 {
m[items[i]] = val m[items[i]] = val
} }
} }
return m, nil return m, nil
//pipe := c.rdb.Pipeline()
//for _, v := range items {
// if err := pipe.Get(ctx, getkey(v)).Err(); err != nil && err != redis.Nil {
// return nil, errs.Wrap(err)
// }
//}
//result, err := pipe.Exec(ctx)
//if err != nil && err != redis.Nil {
// return nil, errs.Wrap(err)
//}
//m = make(map[string]int64, len(items))
//for i, v := range result {
// seq := v.(*redis.StringCmd)
// if seq.Err() != nil && seq.Err() != redis.Nil {
// return nil, errs.Wrap(v.Err())
// }
// val := utils.StringToInt64(seq.Val())
// if val != 0 {
// m[items[i]] = val
// }
//}
//return m, nil
} }
func (c *msgCache) SetMaxSeq(ctx context.Context, conversationID string, maxSeq int64) error { func (c *msgCache) SetMaxSeq(ctx context.Context, conversationID string, maxSeq int64) error {
@ -221,15 +215,21 @@ func (c *msgCache) SetMinSeq(ctx context.Context, conversationID string, minSeq
} }
func (c *msgCache) setSeqs(ctx context.Context, seqs map[string]int64, getkey func(key string) string) error { func (c *msgCache) setSeqs(ctx context.Context, seqs map[string]int64, getkey func(key string) string) error {
pipe := c.rdb.Pipeline() for conversationID, seq := range seqs {
for k, seq := range seqs { if err := c.rdb.Set(ctx, getkey(conversationID), seq, 0).Err(); err != nil {
err := pipe.Set(ctx, getkey(k), seq, 0).Err()
if err != nil {
return errs.Wrap(err) return errs.Wrap(err)
} }
} }
_, err := pipe.Exec(ctx) return nil
return err //pipe := c.rdb.Pipeline()
//for k, seq := range seqs {
// err := pipe.Set(ctx, getkey(k), seq, 0).Err()
// if err != nil {
// return errs.Wrap(err)
// }
//}
//_, err := pipe.Exec(ctx)
//return err
} }
func (c *msgCache) SetMinSeqs(ctx context.Context, seqs map[string]int64) error { func (c *msgCache) SetMinSeqs(ctx context.Context, seqs map[string]int64) error {
@ -252,30 +252,17 @@ func (c *msgCache) GetConversationUserMinSeq(ctx context.Context, conversationID
return utils.Wrap2(c.rdb.Get(ctx, c.getConversationUserMinSeqKey(conversationID, userID)).Int64()) return utils.Wrap2(c.rdb.Get(ctx, c.getConversationUserMinSeqKey(conversationID, userID)).Int64())
} }
func (c *msgCache) GetConversationUserMinSeqs( func (c *msgCache) GetConversationUserMinSeqs(ctx context.Context, conversationID string, userIDs []string) (m map[string]int64, err error) {
ctx context.Context,
conversationID string,
userIDs []string,
) (m map[string]int64, err error) {
return c.getSeqs(ctx, userIDs, func(userID string) string { return c.getSeqs(ctx, userIDs, func(userID string) string {
return c.getConversationUserMinSeqKey(conversationID, userID) return c.getConversationUserMinSeqKey(conversationID, userID)
}) })
} }
func (c *msgCache) SetConversationUserMinSeq( func (c *msgCache) SetConversationUserMinSeq(ctx context.Context, conversationID string, userID string, minSeq int64) error {
ctx context.Context,
conversationID string,
userID string,
minSeq int64,
) error {
return utils.Wrap1(c.rdb.Set(ctx, c.getConversationUserMinSeqKey(conversationID, userID), minSeq, 0).Err()) return utils.Wrap1(c.rdb.Set(ctx, c.getConversationUserMinSeqKey(conversationID, userID), minSeq, 0).Err())
} }
func (c *msgCache) SetConversationUserMinSeqs( func (c *msgCache) SetConversationUserMinSeqs(ctx context.Context, conversationID string, seqs map[string]int64) (err error) {
ctx context.Context,
conversationID string,
seqs map[string]int64,
) (err error) {
return c.setSeqs(ctx, seqs, func(userID string) string { return c.setSeqs(ctx, seqs, func(userID string) string {
return c.getConversationUserMinSeqKey(conversationID, userID) return c.getConversationUserMinSeqKey(conversationID, userID)
}) })
@ -303,11 +290,7 @@ func (c *msgCache) UserSetHasReadSeqs(ctx context.Context, userID string, hasRea
}) })
} }
func (c *msgCache) GetHasReadSeqs( func (c *msgCache) GetHasReadSeqs(ctx context.Context, userID string, conversationIDs []string) (map[string]int64, error) {
ctx context.Context,
userID string,
conversationIDs []string,
) (map[string]int64, error) {
return c.getSeqs(ctx, conversationIDs, func(conversationID string) string { return c.getSeqs(ctx, conversationIDs, func(conversationID string) string {
return c.getHasReadSeqKey(conversationID, userID) return c.getHasReadSeqKey(conversationID, userID)
}) })
@ -319,6 +302,7 @@ func (c *msgCache) GetHasReadSeq(ctx context.Context, userID string, conversatio
func (c *msgCache) AddTokenFlag(ctx context.Context, userID string, platformID int, token string, flag int) error { func (c *msgCache) AddTokenFlag(ctx context.Context, userID string, platformID int, token string, flag int) error {
key := uidPidToken + userID + ":" + constant.PlatformIDToName(platformID) key := uidPidToken + userID + ":" + constant.PlatformIDToName(platformID)
return errs.Wrap(c.rdb.HSet(ctx, key, token, flag).Err()) return errs.Wrap(c.rdb.HSet(ctx, key, token, flag).Err())
} }
@ -332,6 +316,7 @@ func (c *msgCache) GetTokensWithoutError(ctx context.Context, userID string, pla
for k, v := range m { for k, v := range m {
mm[k] = utils.StringToInt(v) mm[k] = utils.StringToInt(v)
} }
return mm, nil return mm, nil
} }
@ -341,11 +326,13 @@ func (c *msgCache) SetTokenMapByUidPid(ctx context.Context, userID string, platf
for k, v := range m { for k, v := range m {
mm[k] = v mm[k] = v
} }
return errs.Wrap(c.rdb.HSet(ctx, key, mm).Err()) return errs.Wrap(c.rdb.HSet(ctx, key, mm).Err())
} }
func (c *msgCache) DeleteTokenByUidPid(ctx context.Context, userID string, platform int, fields []string) error { func (c *msgCache) DeleteTokenByUidPid(ctx context.Context, userID string, platform int, fields []string) error {
key := uidPidToken + userID + ":" + constant.PlatformIDToName(platform) key := uidPidToken + userID + ":" + constant.PlatformIDToName(platform)
return errs.Wrap(c.rdb.HDel(ctx, key, fields...).Err()) return errs.Wrap(c.rdb.HDel(ctx, key, fields...).Err())
} }
@ -357,58 +344,86 @@ func (c *msgCache) allMessageCacheKey(conversationID string) string {
return messageCache + conversationID + "_*" return messageCache + conversationID + "_*"
} }
func (c *msgCache) GetMessagesBySeq( func (c *msgCache) GetMessagesBySeq(ctx context.Context, conversationID string, seqs []int64) (seqMsgs []*sdkws.MsgData, failedSeqs []int64, err error) {
ctx context.Context, for _, seq := range seqs {
conversationID string, res, err := c.rdb.Get(ctx, c.getMessageCacheKey(conversationID, seq)).Result()
seqs []int64, if err != nil {
) (seqMsgs []*sdkws.MsgData, failedSeqs []int64, err error) { log.ZError(ctx, "GetMessagesBySeq failed", err, "conversationID", conversationID, "seq", seq)
pipe := c.rdb.Pipeline() failedSeqs = append(failedSeqs, seq)
for _, v := range seqs {
// MESSAGE_CACHE:169.254.225.224_reliability1653387820_0_1
key := c.getMessageCacheKey(conversationID, v)
if err := pipe.Get(ctx, key).Err(); err != nil && err != redis.Nil {
return nil, nil, err
}
}
result, err := pipe.Exec(ctx)
for i, v := range result {
cmd := v.(*redis.StringCmd)
if cmd.Err() != nil {
failedSeqs = append(failedSeqs, seqs[i])
} else {
msg := sdkws.MsgData{}
err = msgprocessor.String2Pb(cmd.Val(), &msg)
if err == nil {
if msg.Status != constant.MsgDeleted {
seqMsgs = append(seqMsgs, &msg)
continue continue
} }
} else { msg := sdkws.MsgData{}
log.ZWarn(ctx, "UnmarshalString failed", err, "conversationID", conversationID, "seq", seqs[i], "msg", cmd.Val()) if err = msgprocessor.String2Pb(res, &msg); err != nil {
log.ZError(ctx, "GetMessagesBySeq Unmarshal failed", err, "res", res, "conversationID", conversationID, "seq", seq)
failedSeqs = append(failedSeqs, seq)
continue
} }
failedSeqs = append(failedSeqs, seqs[i]) if msg.Status == constant.MsgDeleted {
failedSeqs = append(failedSeqs, seq)
continue
} }
seqMsgs = append(seqMsgs, &msg)
} }
return seqMsgs, failedSeqs, err
return
//pipe := c.rdb.Pipeline()
//for _, v := range seqs {
// // MESSAGE_CACHE:169.254.225.224_reliability1653387820_0_1
// key := c.getMessageCacheKey(conversationID, v)
// if err := pipe.Get(ctx, key).Err(); err != nil && err != redis.Nil {
// return nil, nil, err
// }
//}
//result, err := pipe.Exec(ctx)
//for i, v := range result {
// cmd := v.(*redis.StringCmd)
// if cmd.Err() != nil {
// failedSeqs = append(failedSeqs, seqs[i])
// } else {
// msg := sdkws.MsgData{}
// err = msgprocessor.String2Pb(cmd.Val(), &msg)
// if err == nil {
// if msg.Status != constant.MsgDeleted {
// seqMsgs = append(seqMsgs, &msg)
// continue
// }
// } else {
// log.ZWarn(ctx, "UnmarshalString failed", err, "conversationID", conversationID, "seq", seqs[i], "msg", cmd.Val())
// }
// failedSeqs = append(failedSeqs, seqs[i])
// }
//}
//return seqMsgs, failedSeqs, err
} }
func (c *msgCache) SetMessageToCache(ctx context.Context, conversationID string, msgs []*sdkws.MsgData) (int, error) { func (c *msgCache) SetMessageToCache(ctx context.Context, conversationID string, msgs []*sdkws.MsgData) (int, error) {
pipe := c.rdb.Pipeline()
var failedMsgs []*sdkws.MsgData
for _, msg := range msgs { for _, msg := range msgs {
key := c.getMessageCacheKey(conversationID, msg.Seq)
s, err := msgprocessor.Pb2String(msg) s, err := msgprocessor.Pb2String(msg)
if err != nil { if err != nil {
return 0, errs.Wrap(err) return 0, errs.Wrap(err)
} }
err = pipe.Set(ctx, key, s, time.Duration(config.Config.MsgCacheTimeout)*time.Second).Err() key := c.getMessageCacheKey(conversationID, msg.Seq)
if err != nil { if err := c.rdb.Set(ctx, key, s, time.Duration(config.Config.MsgCacheTimeout)*time.Second).Err(); err != nil {
failedMsgs = append(failedMsgs, msg) return 0, errs.Wrap(err)
log.ZWarn(ctx, "set msg 2 cache failed", err, "msg", failedMsgs)
} }
} }
_, err := pipe.Exec(ctx) return len(msgs), nil
return len(failedMsgs), err //pipe := c.rdb.Pipeline()
//var failedMsgs []*sdkws.MsgData
//for _, msg := range msgs {
// key := c.getMessageCacheKey(conversationID, msg.Seq)
// s, err := msgprocessor.Pb2String(msg)
// if err != nil {
// return 0, errs.Wrap(err)
// }
// err = pipe.Set(ctx, key, s, time.Duration(config.Config.MsgCacheTimeout)*time.Second).Err()
// if err != nil {
// failedMsgs = append(failedMsgs, msg)
// log.ZWarn(ctx, "set msg 2 cache failed", err, "msg", failedMsgs)
// }
//}
//_, err := pipe.Exec(ctx)
//return len(failedMsgs), err
} }
func (c *msgCache) getMessageDelUserListKey(conversationID string, seq int64) string { func (c *msgCache) getMessageDelUserListKey(conversationID string, seq int64) string {
@ -420,27 +435,47 @@ func (c *msgCache) getUserDelList(conversationID, userID string) string {
} }
func (c *msgCache) UserDeleteMsgs(ctx context.Context, conversationID string, seqs []int64, userID string) error { func (c *msgCache) UserDeleteMsgs(ctx context.Context, conversationID string, seqs []int64, userID string) error {
pipe := c.rdb.Pipeline()
for _, seq := range seqs { for _, seq := range seqs {
delUserListKey := c.getMessageDelUserListKey(conversationID, seq) delUserListKey := c.getMessageDelUserListKey(conversationID, seq)
userDelListKey := c.getUserDelList(conversationID, userID) userDelListKey := c.getUserDelList(conversationID, userID)
err := pipe.SAdd(ctx, delUserListKey, userID).Err() err := c.rdb.SAdd(ctx, delUserListKey, userID).Err()
if err != nil { if err != nil {
return errs.Wrap(err) return errs.Wrap(err)
} }
err = pipe.SAdd(ctx, userDelListKey, seq).Err() err = c.rdb.SAdd(ctx, userDelListKey, seq).Err()
if err != nil { if err != nil {
return errs.Wrap(err) return errs.Wrap(err)
} }
if err := pipe.Expire(ctx, delUserListKey, time.Duration(config.Config.MsgCacheTimeout)*time.Second).Err(); err != nil { if err := c.rdb.Expire(ctx, delUserListKey, time.Duration(config.Config.MsgCacheTimeout)*time.Second).Err(); err != nil {
return errs.Wrap(err) return errs.Wrap(err)
} }
if err := pipe.Expire(ctx, userDelListKey, time.Duration(config.Config.MsgCacheTimeout)*time.Second).Err(); err != nil { if err := c.rdb.Expire(ctx, userDelListKey, time.Duration(config.Config.MsgCacheTimeout)*time.Second).Err(); err != nil {
return errs.Wrap(err) return errs.Wrap(err)
} }
} }
_, err := pipe.Exec(ctx)
return errs.Wrap(err) return nil
//pipe := c.rdb.Pipeline()
//for _, seq := range seqs {
// delUserListKey := c.getMessageDelUserListKey(conversationID, seq)
// userDelListKey := c.getUserDelList(conversationID, userID)
// err := pipe.SAdd(ctx, delUserListKey, userID).Err()
// if err != nil {
// return errs.Wrap(err)
// }
// err = pipe.SAdd(ctx, userDelListKey, seq).Err()
// if err != nil {
// return errs.Wrap(err)
// }
// if err := pipe.Expire(ctx, delUserListKey, time.Duration(config.Config.MsgCacheTimeout)*time.Second).Err(); err != nil {
// return errs.Wrap(err)
// }
// if err := pipe.Expire(ctx, userDelListKey, time.Duration(config.Config.MsgCacheTimeout)*time.Second).Err(); err != nil {
// return errs.Wrap(err)
// }
//}
//_, err := pipe.Exec(ctx)
//return errs.Wrap(err)
} }
func (c *msgCache) GetUserDelList(ctx context.Context, userID, conversationID string) (seqs []int64, err error) { func (c *msgCache) GetUserDelList(ctx context.Context, userID, conversationID string) (seqs []int64, err error) {
@ -452,6 +487,7 @@ func (c *msgCache) GetUserDelList(ctx context.Context, userID, conversationID st
for i, v := range result { for i, v := range result {
seqs[i] = utils.StringToInt64(v) seqs[i] = utils.StringToInt64(v)
} }
return seqs, nil return seqs, nil
} }
@ -460,67 +496,102 @@ func (c *msgCache) DelUserDeleteMsgsList(ctx context.Context, conversationID str
delUsers, err := c.rdb.SMembers(ctx, c.getMessageDelUserListKey(conversationID, seq)).Result() delUsers, err := c.rdb.SMembers(ctx, c.getMessageDelUserListKey(conversationID, seq)).Result()
if err != nil { if err != nil {
log.ZWarn(ctx, "DelUserDeleteMsgsList failed", err, "conversationID", conversationID, "seq", seq) log.ZWarn(ctx, "DelUserDeleteMsgsList failed", err, "conversationID", conversationID, "seq", seq)
continue continue
} }
if len(delUsers) > 0 { if len(delUsers) > 0 {
pipe := c.rdb.Pipeline()
var failedFlag bool var failedFlag bool
for _, userID := range delUsers { for _, userID := range delUsers {
err = pipe.SRem(ctx, c.getUserDelList(conversationID, userID), seq).Err() err = c.rdb.SRem(ctx, c.getUserDelList(conversationID, userID), seq).Err()
if err != nil { if err != nil {
failedFlag = true failedFlag = true
log.ZWarn( log.ZWarn(ctx, "DelUserDeleteMsgsList failed", err, "conversationID", conversationID, "seq", seq, "userID", userID)
ctx,
"DelUserDeleteMsgsList failed",
err,
"conversationID",
conversationID,
"seq",
seq,
"userID",
userID,
)
} }
} }
if !failedFlag { if !failedFlag {
if err := pipe.Del(ctx, c.getMessageDelUserListKey(conversationID, seq)).Err(); err != nil { if err := c.rdb.Del(ctx, c.getMessageDelUserListKey(conversationID, seq)).Err(); err != nil {
log.ZWarn(ctx, "DelUserDeleteMsgsList failed", err, "conversationID", conversationID, "seq", seq) log.ZWarn(ctx, "DelUserDeleteMsgsList failed", err, "conversationID", conversationID, "seq", seq)
} }
} }
if _, err := pipe.Exec(ctx); err != nil {
log.ZError(ctx, "pipe exec failed", err, "conversationID", conversationID, "seq", seq)
}
} }
} }
//for _, seq := range seqs {
// delUsers, err := c.rdb.SMembers(ctx, c.getMessageDelUserListKey(conversationID, seq)).Result()
// if err != nil {
// log.ZWarn(ctx, "DelUserDeleteMsgsList failed", err, "conversationID", conversationID, "seq", seq)
// continue
// }
// if len(delUsers) > 0 {
// pipe := c.rdb.Pipeline()
// var failedFlag bool
// for _, userID := range delUsers {
// err = pipe.SRem(ctx, c.getUserDelList(conversationID, userID), seq).Err()
// if err != nil {
// failedFlag = true
// log.ZWarn(
// ctx,
// "DelUserDeleteMsgsList failed",
// err,
// "conversationID",
// conversationID,
// "seq",
// seq,
// "userID",
// userID,
// )
// }
// }
// if !failedFlag {
// if err := pipe.Del(ctx, c.getMessageDelUserListKey(conversationID, seq)).Err(); err != nil {
// log.ZWarn(ctx, "DelUserDeleteMsgsList failed", err, "conversationID", conversationID, "seq", seq)
// }
// }
// if _, err := pipe.Exec(ctx); err != nil {
// log.ZError(ctx, "pipe exec failed", err, "conversationID", conversationID, "seq", seq)
// }
// }
//}
} }
func (c *msgCache) DeleteMessages(ctx context.Context, conversationID string, seqs []int64) error { func (c *msgCache) DeleteMessages(ctx context.Context, conversationID string, seqs []int64) error {
pipe := c.rdb.Pipeline()
for _, seq := range seqs { for _, seq := range seqs {
if err := pipe.Del(ctx, c.getMessageCacheKey(conversationID, seq)).Err(); err != nil { if err := c.rdb.Del(ctx, c.getMessageCacheKey(conversationID, seq)).Err(); err != nil {
return errs.Wrap(err) return errs.Wrap(err)
} }
} }
_, err := pipe.Exec(ctx) return nil
return errs.Wrap(err) //pipe := c.rdb.Pipeline()
//for _, seq := range seqs {
// if err := pipe.Del(ctx, c.getMessageCacheKey(conversationID, seq)).Err(); err != nil {
// return errs.Wrap(err)
// }
//}
//_, err := pipe.Exec(ctx)
//return errs.Wrap(err)
} }
func (c *msgCache) CleanUpOneConversationAllMsg(ctx context.Context, conversationID string) error { func (c *msgCache) CleanUpOneConversationAllMsg(ctx context.Context, conversationID string) error {
vals, err := c.rdb.Keys(ctx, c.allMessageCacheKey(conversationID)).Result() vals, err := c.rdb.Keys(ctx, c.allMessageCacheKey(conversationID)).Result()
if err == redis.Nil { if errors.Is(err, redis.Nil) {
return nil return nil
} }
if err != nil { if err != nil {
return errs.Wrap(err) return errs.Wrap(err)
} }
pipe := c.rdb.Pipeline()
for _, v := range vals { for _, v := range vals {
if err := pipe.Del(ctx, v).Err(); err != nil { if err := c.rdb.Del(ctx, v).Err(); err != nil {
return errs.Wrap(err) return errs.Wrap(err)
} }
} }
_, err = pipe.Exec(ctx) return nil
return errs.Wrap(err) //pipe := c.rdb.Pipeline()
//for _, v := range vals {
// if err := pipe.Del(ctx, v).Err(); err != nil {
// return errs.Wrap(err)
// }
//}
//_, err = pipe.Exec(ctx)
//return errs.Wrap(err)
} }
func (c *msgCache) DelMsgFromCache(ctx context.Context, userID string, seqs []int64) error { func (c *msgCache) DelMsgFromCache(ctx context.Context, userID string, seqs []int64) error {
@ -528,13 +599,15 @@ func (c *msgCache) DelMsgFromCache(ctx context.Context, userID string, seqs []in
key := c.getMessageCacheKey(userID, seq) key := c.getMessageCacheKey(userID, seq)
result, err := c.rdb.Get(ctx, key).Result() result, err := c.rdb.Get(ctx, key).Result()
if err != nil { if err != nil {
if err == redis.Nil { if errors.Is(err, redis.Nil) {
continue continue
} }
return errs.Wrap(err) return errs.Wrap(err)
} }
var msg sdkws.MsgData var msg sdkws.MsgData
if err := jsonpb.UnmarshalString(result, &msg); err != nil { err = jsonpb.UnmarshalString(result, &msg)
if err != nil {
return err return err
} }
msg.Status = constant.MsgDeleted msg.Status = constant.MsgDeleted
@ -546,6 +619,7 @@ func (c *msgCache) DelMsgFromCache(ctx context.Context, userID string, seqs []in
return errs.Wrap(err) return errs.Wrap(err)
} }
} }
return nil return nil
} }
@ -571,20 +645,12 @@ func (c *msgCache) SetSendMsgStatus(ctx context.Context, id string, status int32
func (c *msgCache) GetSendMsgStatus(ctx context.Context, id string) (int32, error) { func (c *msgCache) GetSendMsgStatus(ctx context.Context, id string) (int32, error) {
result, err := c.rdb.Get(ctx, sendMsgFailedFlag+id).Int() result, err := c.rdb.Get(ctx, sendMsgFailedFlag+id).Int()
return int32(result), errs.Wrap(err) return int32(result), errs.Wrap(err)
} }
func (c *msgCache) SetFcmToken( func (c *msgCache) SetFcmToken(ctx context.Context, account string, platformID int, fcmToken string, expireTime int64) (err error) {
ctx context.Context, return errs.Wrap(c.rdb.Set(ctx, fcmToken+account+":"+strconv.Itoa(platformID), fcmToken, time.Duration(expireTime)*time.Second).Err())
account string,
platformID int,
fcmToken string,
expireTime int64,
) (err error) {
return errs.Wrap(
c.rdb.Set(ctx, fcmToken+account+":"+strconv.Itoa(platformID), fcmToken, time.Duration(expireTime)*time.Second).
Err(),
)
} }
func (c *msgCache) GetFcmToken(ctx context.Context, account string, platformID int) (string, error) { func (c *msgCache) GetFcmToken(ctx context.Context, account string, platformID int) (string, error) {
@ -597,6 +663,7 @@ func (c *msgCache) DelFcmToken(ctx context.Context, account string, platformID i
func (c *msgCache) IncrUserBadgeUnreadCountSum(ctx context.Context, userID string) (int, error) { func (c *msgCache) IncrUserBadgeUnreadCountSum(ctx context.Context, userID string) (int, error) {
seq, err := c.rdb.Incr(ctx, userBadgeUnreadCountSum+userID).Result() seq, err := c.rdb.Incr(ctx, userBadgeUnreadCountSum+userID).Result()
return int(seq), errs.Wrap(err) return int(seq), errs.Wrap(err)
} }
@ -610,11 +677,13 @@ func (c *msgCache) GetUserBadgeUnreadCountSum(ctx context.Context, userID string
func (c *msgCache) LockMessageTypeKey(ctx context.Context, clientMsgID string, TypeKey string) error { func (c *msgCache) LockMessageTypeKey(ctx context.Context, clientMsgID string, TypeKey string) error {
key := exTypeKeyLocker + clientMsgID + "_" + TypeKey key := exTypeKeyLocker + clientMsgID + "_" + TypeKey
return errs.Wrap(c.rdb.SetNX(ctx, key, 1, time.Minute).Err()) return errs.Wrap(c.rdb.SetNX(ctx, key, 1, time.Minute).Err())
} }
func (c *msgCache) UnLockMessageTypeKey(ctx context.Context, clientMsgID string, TypeKey string) error { func (c *msgCache) UnLockMessageTypeKey(ctx context.Context, clientMsgID string, TypeKey string) error {
key := exTypeKeyLocker + clientMsgID + "_" + TypeKey key := exTypeKeyLocker + clientMsgID + "_" + TypeKey
return errs.Wrap(c.rdb.Del(ctx, key).Err()) return errs.Wrap(c.rdb.Del(ctx, key).Err())
} }
@ -629,6 +698,7 @@ func (c *msgCache) getMessageReactionExPrefix(clientMsgID string, sessionType in
case constant.NotificationChatType: case constant.NotificationChatType:
return "EX_NOTIFICATION" + clientMsgID return "EX_NOTIFICATION" + clientMsgID
} }
return "" return ""
} }
@ -637,6 +707,7 @@ func (c *msgCache) JudgeMessageReactionExist(ctx context.Context, clientMsgID st
if err != nil { if err != nil {
return false, utils.Wrap(err, "") return false, utils.Wrap(err, "")
} }
return n > 0, nil return n > 0, nil
} }
@ -649,21 +720,11 @@ func (c *msgCache) SetMessageTypeKeyValue(
return errs.Wrap(c.rdb.HSet(ctx, c.getMessageReactionExPrefix(clientMsgID, sessionType), typeKey, value).Err()) return errs.Wrap(c.rdb.HSet(ctx, c.getMessageReactionExPrefix(clientMsgID, sessionType), typeKey, value).Err())
} }
func (c *msgCache) SetMessageReactionExpire( func (c *msgCache) SetMessageReactionExpire(ctx context.Context, clientMsgID string, sessionType int32, expiration time.Duration) (bool, error) {
ctx context.Context,
clientMsgID string,
sessionType int32,
expiration time.Duration,
) (bool, error) {
return utils.Wrap2(c.rdb.Expire(ctx, c.getMessageReactionExPrefix(clientMsgID, sessionType), expiration).Result()) return utils.Wrap2(c.rdb.Expire(ctx, c.getMessageReactionExPrefix(clientMsgID, sessionType), expiration).Result())
} }
func (c *msgCache) GetMessageTypeKeyValue( func (c *msgCache) GetMessageTypeKeyValue(ctx context.Context, clientMsgID string, sessionType int32, typeKey string) (string, error) {
ctx context.Context,
clientMsgID string,
sessionType int32,
typeKey string,
) (string, error) {
return utils.Wrap2(c.rdb.HGet(ctx, c.getMessageReactionExPrefix(clientMsgID, sessionType), typeKey).Result()) return utils.Wrap2(c.rdb.HGet(ctx, c.getMessageReactionExPrefix(clientMsgID, sessionType), typeKey).Result())
} }

@ -17,6 +17,7 @@ package cache
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"errors"
"hash/crc32" "hash/crc32"
"strconv" "strconv"
"time" "time"
@ -70,6 +71,7 @@ func NewUserCacheRedis(
options rockscache.Options, options rockscache.Options,
) UserCache { ) UserCache {
rcClient := rockscache.NewClient(rdb, options) rcClient := rockscache.NewClient(rdb, options)
return &UserCacheRedis{ return &UserCacheRedis{
rdb: rdb, rdb: rdb,
metaCache: NewMetaCacheRedis(rcClient), metaCache: NewMetaCacheRedis(rcClient),
@ -97,10 +99,6 @@ func (u *UserCacheRedis) getUserGlobalRecvMsgOptKey(userID string) string {
return userGlobalRecvMsgOptKey + userID return userGlobalRecvMsgOptKey + userID
} }
func (u *UserCacheRedis) getUserStatusHashKey(userID string, Id int32) string {
return userID + "_" + string(Id) + platformID
}
func (u *UserCacheRedis) GetUserInfo(ctx context.Context, userID string) (userInfo *relationtb.UserModel, err error) { func (u *UserCacheRedis) GetUserInfo(ctx context.Context, userID string) (userInfo *relationtb.UserModel, err error) {
return getCache( return getCache(
ctx, ctx,
@ -114,36 +112,42 @@ func (u *UserCacheRedis) GetUserInfo(ctx context.Context, userID string) (userIn
} }
func (u *UserCacheRedis) GetUsersInfo(ctx context.Context, userIDs []string) ([]*relationtb.UserModel, error) { func (u *UserCacheRedis) GetUsersInfo(ctx context.Context, userIDs []string) ([]*relationtb.UserModel, error) {
var keys []string //var keys []string
for _, userID := range userIDs { //for _, userID := range userIDs {
keys = append(keys, u.getUserInfoKey(userID)) // keys = append(keys, u.getUserInfoKey(userID))
} //}
return batchGetCache( //return batchGetCache(
ctx, // ctx,
u.rcClient, // u.rcClient,
keys, // keys,
u.expireTime, // u.expireTime,
func(user *relationtb.UserModel, keys []string) (int, error) { // func(user *relationtb.UserModel, keys []string) (int, error) {
for i, key := range keys { // for i, key := range keys {
if key == u.getUserInfoKey(user.UserID) { // if key == u.getUserInfoKey(user.UserID) {
return i, nil // return i, nil
} // }
} // }
return 0, errIndex // return 0, errIndex
}, // },
func(ctx context.Context) ([]*relationtb.UserModel, error) { // func(ctx context.Context) ([]*relationtb.UserModel, error) {
return u.userDB.Find(ctx, userIDs) // return u.userDB.Find(ctx, userIDs)
}, // },
) //)
return batchGetCache2(ctx, u.rcClient, u.expireTime, userIDs, func(userID string) string {
return u.getUserInfoKey(userID)
}, func(ctx context.Context, userID string) (*relationtb.UserModel, error) {
return u.userDB.Take(ctx, userID)
})
} }
func (u *UserCacheRedis) DelUsersInfo(userIDs ...string) UserCache { func (u *UserCacheRedis) DelUsersInfo(userIDs ...string) UserCache {
var keys []string keys := make([]string, 0, len(userIDs))
for _, userID := range userIDs { for _, userID := range userIDs {
keys = append(keys, u.getUserInfoKey(userID)) keys = append(keys, u.getUserInfoKey(userID))
} }
cache := u.NewCache() cache := u.NewCache()
cache.AddKeys(keys...) cache.AddKeys(keys...)
return cache return cache
} }
@ -160,22 +164,19 @@ func (u *UserCacheRedis) GetUserGlobalRecvMsgOpt(ctx context.Context, userID str
} }
func (u *UserCacheRedis) DelUsersGlobalRecvMsgOpt(userIDs ...string) UserCache { func (u *UserCacheRedis) DelUsersGlobalRecvMsgOpt(userIDs ...string) UserCache {
var keys []string keys := make([]string, 0, len(userIDs))
for _, userID := range userIDs { for _, userID := range userIDs {
keys = append(keys, u.getUserGlobalRecvMsgOptKey(userID)) keys = append(keys, u.getUserGlobalRecvMsgOptKey(userID))
} }
cache := u.NewCache() cache := u.NewCache()
cache.AddKeys(keys...) cache.AddKeys(keys...)
return cache
}
func (u *UserCacheRedis) getOnlineStatusKey(userID string) string { return cache
return olineStatusKey + userID
} }
// GetUserStatus get user status. // GetUserStatus get user status.
func (u *UserCacheRedis) GetUserStatus(ctx context.Context, userIDs []string) ([]*user.OnlineStatus, error) { func (u *UserCacheRedis) GetUserStatus(ctx context.Context, userIDs []string) ([]*user.OnlineStatus, error) {
var res []*user.OnlineStatus userStatus := make([]*user.OnlineStatus, 0, len(userIDs))
for _, userID := range userIDs { for _, userID := range userIDs {
UserIDNum := crc32.ChecksumIEEE([]byte(userID)) UserIDNum := crc32.ChecksumIEEE([]byte(userID))
modKey := strconv.Itoa(int(UserIDNum % statusMod)) modKey := strconv.Itoa(int(UserIDNum % statusMod))
@ -183,13 +184,14 @@ func (u *UserCacheRedis) GetUserStatus(ctx context.Context, userIDs []string) ([
key := olineStatusKey + modKey key := olineStatusKey + modKey
result, err := u.rdb.HGet(ctx, key, userID).Result() result, err := u.rdb.HGet(ctx, key, userID).Result()
if err != nil { if err != nil {
if err == redis.Nil { if errors.Is(err, redis.Nil) {
// key or field does not exist // key or field does not exist
res = append(res, &user.OnlineStatus{ userStatus = append(userStatus, &user.OnlineStatus{
UserID: userID, UserID: userID,
Status: constant.Offline, Status: constant.Offline,
PlatformIDs: nil, PlatformIDs: nil,
}) })
continue continue
} else { } else {
return nil, errs.Wrap(err) return nil, errs.Wrap(err)
@ -201,9 +203,10 @@ func (u *UserCacheRedis) GetUserStatus(ctx context.Context, userIDs []string) ([
} }
onlineStatus.UserID = userID onlineStatus.UserID = userID
onlineStatus.Status = constant.Online onlineStatus.Status = constant.Online
res = append(res, &onlineStatus) userStatus = append(userStatus, &onlineStatus)
} }
return res, nil
return userStatus, nil
} }
// SetUserStatus Set the user status and save it in redis. // SetUserStatus Set the user status and save it in redis.
@ -224,15 +227,16 @@ func (u *UserCacheRedis) SetUserStatus(ctx context.Context, userID string, statu
Status: constant.Online, Status: constant.Online,
PlatformIDs: []int32{platformID}, PlatformIDs: []int32{platformID},
} }
jsonData, err := json.Marshal(onlineStatus) jsonData, err2 := json.Marshal(&onlineStatus)
if err != nil { if err2 != nil {
return errs.Wrap(err) return errs.Wrap(err2)
} }
_, err = u.rdb.HSet(ctx, key, userID, string(jsonData)).Result() _, err2 = u.rdb.HSet(ctx, key, userID, string(jsonData)).Result()
if err != nil { if err2 != nil {
return errs.Wrap(err) return errs.Wrap(err2)
} }
u.rdb.Expire(ctx, key, userOlineStatusExpireTime) u.rdb.Expire(ctx, key, userOlineStatusExpireTime)
return nil return nil
} }
} }
@ -240,7 +244,7 @@ func (u *UserCacheRedis) SetUserStatus(ctx context.Context, userID string, statu
isNil := false isNil := false
result, err := u.rdb.HGet(ctx, key, userID).Result() result, err := u.rdb.HGet(ctx, key, userID).Result()
if err != nil { if err != nil {
if err == redis.Nil { if errors.Is(err, redis.Nil) {
isNil = true isNil = true
} else { } else {
return errs.Wrap(err) return errs.Wrap(err)
@ -248,9 +252,25 @@ func (u *UserCacheRedis) SetUserStatus(ctx context.Context, userID string, statu
} }
if status == constant.Offline { if status == constant.Offline {
err = u.refreshStatusOffline(ctx, userID, status, platformID, isNil, err, result, key)
if err != nil {
return err
}
} else {
err = u.refreshStatusOnline(ctx, userID, platformID, isNil, err, result, key)
if err != nil {
return errs.Wrap(err)
}
}
return nil
}
func (u *UserCacheRedis) refreshStatusOffline(ctx context.Context, userID string, status, platformID int32, isNil bool, err error, result, key string) error {
if isNil { if isNil {
log.ZWarn(ctx, "this user not online,maybe trigger order not right", log.ZWarn(ctx, "this user not online,maybe trigger order not right",
err, "userStatus", status) err, "userStatus", status)
return nil return nil
} }
var onlineStatus user.OnlineStatus var onlineStatus user.OnlineStatus
@ -280,17 +300,23 @@ func (u *UserCacheRedis) SetUserStatus(ctx context.Context, userID string, statu
return errs.Wrap(err) return errs.Wrap(err)
} }
} }
} else {
return nil
}
func (u *UserCacheRedis) refreshStatusOnline(ctx context.Context, userID string, platformID int32, isNil bool, err error, result, key string) error {
var onlineStatus user.OnlineStatus var onlineStatus user.OnlineStatus
if !isNil { if !isNil {
err = json.Unmarshal([]byte(result), &onlineStatus) err2 := json.Unmarshal([]byte(result), &onlineStatus)
if err != nil { if err != nil {
return errs.Wrap(err) return errs.Wrap(err2)
} }
onlineStatus.PlatformIDs = RemoveRepeatedElementsInList(append(onlineStatus.PlatformIDs, platformID))
} else {
onlineStatus.PlatformIDs = append(onlineStatus.PlatformIDs, platformID)
} }
onlineStatus.Status = constant.Online onlineStatus.Status = constant.Online
onlineStatus.UserID = userID onlineStatus.UserID = userID
onlineStatus.PlatformIDs = append(onlineStatus.PlatformIDs, platformID)
newjsonData, err := json.Marshal(&onlineStatus) newjsonData, err := json.Marshal(&onlineStatus)
if err != nil { if err != nil {
return errs.Wrap(err) return errs.Wrap(err)
@ -300,7 +326,22 @@ func (u *UserCacheRedis) SetUserStatus(ctx context.Context, userID string, statu
return errs.Wrap(err) return errs.Wrap(err)
} }
return nil
} }
return nil type Comparable interface {
~int | ~string | ~float64 | ~int32
}
func RemoveRepeatedElementsInList[T Comparable](slc []T) []T {
var result []T
tempMap := map[T]struct{}{}
for _, e := range slc {
if _, found := tempMap[e]; !found {
tempMap[e] = struct{}{}
result = append(result, e)
}
}
return result
} }

@ -50,7 +50,7 @@ type ConversationDatabase interface {
GetConversationIDs(ctx context.Context, userID string) ([]string, error) GetConversationIDs(ctx context.Context, userID string) ([]string, error)
GetUserConversationIDsHash(ctx context.Context, ownerUserID string) (hash uint64, err error) GetUserConversationIDsHash(ctx context.Context, ownerUserID string) (hash uint64, err error)
GetAllConversationIDs(ctx context.Context) ([]string, error) GetAllConversationIDs(ctx context.Context) ([]string, error)
GetUserAllHasReadSeqs(ctx context.Context, ownerUserID string) (map[string]int64, error) //GetUserAllHasReadSeqs(ctx context.Context, ownerUserID string) (map[string]int64, error)
GetConversationsByConversationID(ctx context.Context, conversationIDs []string) ([]*relationtb.ConversationModel, error) GetConversationsByConversationID(ctx context.Context, conversationIDs []string) ([]*relationtb.ConversationModel, error)
GetConversationIDsNeedDestruct(ctx context.Context) ([]*relationtb.ConversationModel, error) GetConversationIDsNeedDestruct(ctx context.Context) ([]*relationtb.ConversationModel, error)
GetConversationNotReceiveMessageUserIDs(ctx context.Context, conversationID string) ([]string, error) GetConversationNotReceiveMessageUserIDs(ctx context.Context, conversationID string) ([]string, error)
@ -295,9 +295,9 @@ func (c *conversationDatabase) GetAllConversationIDs(ctx context.Context) ([]str
return c.conversationDB.GetAllConversationIDs(ctx) return c.conversationDB.GetAllConversationIDs(ctx)
} }
func (c *conversationDatabase) GetUserAllHasReadSeqs(ctx context.Context, ownerUserID string) (map[string]int64, error) { //func (c *conversationDatabase) GetUserAllHasReadSeqs(ctx context.Context, ownerUserID string) (map[string]int64, error) {
return c.cache.GetUserAllHasReadSeqs(ctx, ownerUserID) // return c.cache.GetUserAllHasReadSeqs(ctx, ownerUserID)
} //}
func (c *conversationDatabase) GetConversationsByConversationID(ctx context.Context, conversationIDs []string) ([]*relationtb.ConversationModel, error) { func (c *conversationDatabase) GetConversationsByConversationID(ctx context.Context, conversationIDs []string) ([]*relationtb.ConversationModel, error) {
return c.conversationDB.GetConversationsByConversationID(ctx, conversationIDs) return c.conversationDB.GetConversationsByConversationID(ctx, conversationIDs)

@ -60,9 +60,11 @@ func (g *ConversationLocalCache) GetConversationIDs(ctx context.Context, userID
if err != nil { if err != nil {
return nil, err return nil, err
} }
g.lock.Lock() g.lock.Lock()
defer g.lock.Unlock()
hash, ok := g.conversationIDs[userID] hash, ok := g.conversationIDs[userID]
g.lock.Unlock()
if !ok || hash.hash != resp.Hash { if !ok || hash.hash != resp.Hash {
conversationIDsResp, err := g.client.Client.GetConversationIDs(ctx, &conversation.GetConversationIDsReq{ conversationIDsResp, err := g.client.Client.GetConversationIDs(ctx, &conversation.GetConversationIDsReq{
UserID: userID, UserID: userID,
@ -70,11 +72,16 @@ func (g *ConversationLocalCache) GetConversationIDs(ctx context.Context, userID
if err != nil { if err != nil {
return nil, err return nil, err
} }
g.lock.Lock()
defer g.lock.Unlock()
g.conversationIDs[userID] = Hash{ g.conversationIDs[userID] = Hash{
hash: resp.Hash, hash: resp.Hash,
ids: conversationIDsResp.ConversationIDs, ids: conversationIDsResp.ConversationIDs,
} }
return conversationIDsResp.ConversationIDs, nil return conversationIDsResp.ConversationIDs, nil
} }
return hash.ids, nil return hash.ids, nil
} }

@ -24,6 +24,7 @@ func (l *LogGorm) Search(ctx context.Context, keyword string, start time.Time, e
if end.UnixMilli() != 0 { if end.UnixMilli() != 0 {
db = l.db.WithContext(ctx).Where("create_time <= ?", end) db = l.db.WithContext(ctx).Where("create_time <= ?", end)
} }
db = db.Order("create_time desc")
return ormutil.GormSearch[relationtb.Log](db, []string{"user_id"}, keyword, pageNumber, showNumber) return ormutil.GormSearch[relationtb.Log](db, []string{"user_id"}, keyword, pageNumber, showNumber)
} }

@ -63,13 +63,12 @@ const (
) )
func NewMinio() (s3.Interface, error) { func NewMinio() (s3.Interface, error) {
conf := config.Config.Object.Minio u, err := url.Parse(config.Config.Object.Minio.Endpoint)
u, err := url.Parse(conf.Endpoint)
if err != nil { if err != nil {
return nil, err return nil, err
} }
opts := &minio.Options{ opts := &minio.Options{
Creds: credentials.NewStaticV4(conf.AccessKeyID, conf.SecretAccessKey, conf.SessionToken), Creds: credentials.NewStaticV4(config.Config.Object.Minio.AccessKeyID, config.Config.Object.Minio.SecretAccessKey, config.Config.Object.Minio.SessionToken),
Secure: u.Scheme == "https", Secure: u.Scheme == "https",
} }
client, err := minio.New(u.Host, opts) client, err := minio.New(u.Host, opts)
@ -77,29 +76,35 @@ func NewMinio() (s3.Interface, error) {
return nil, err return nil, err
} }
m := &Minio{ m := &Minio{
bucket: conf.Bucket, bucket: config.Config.Object.Minio.Bucket,
core: &minio.Core{Client: client}, core: &minio.Core{Client: client},
lock: &sync.Mutex{}, lock: &sync.Mutex{},
init: false, init: false,
} }
if conf.SignEndpoint == "" || conf.SignEndpoint == conf.Endpoint { if config.Config.Object.Minio.SignEndpoint == "" || config.Config.Object.Minio.SignEndpoint == config.Config.Object.Minio.Endpoint {
m.opts = opts m.opts = opts
m.sign = m.core.Client m.sign = m.core.Client
m.bucketURL = conf.Endpoint + "/" + conf.Bucket + "/" m.prefix = u.Path
u.Path = ""
config.Config.Object.Minio.Endpoint = u.String()
m.signEndpoint = config.Config.Object.Minio.Endpoint
} else { } else {
su, err := url.Parse(conf.SignEndpoint) su, err := url.Parse(config.Config.Object.Minio.SignEndpoint)
if err != nil { if err != nil {
return nil, err return nil, err
} }
m.opts = &minio.Options{ m.opts = &minio.Options{
Creds: credentials.NewStaticV4(conf.AccessKeyID, conf.SecretAccessKey, conf.SessionToken), Creds: credentials.NewStaticV4(config.Config.Object.Minio.AccessKeyID, config.Config.Object.Minio.SecretAccessKey, config.Config.Object.Minio.SessionToken),
Secure: su.Scheme == "https", Secure: su.Scheme == "https",
} }
m.sign, err = minio.New(su.Host, m.opts) m.sign, err = minio.New(su.Host, m.opts)
if err != nil { if err != nil {
return nil, err return nil, err
} }
m.bucketURL = conf.SignEndpoint + "/" + conf.Bucket + "/" m.prefix = su.Path
su.Path = ""
config.Config.Object.Minio.SignEndpoint = su.String()
m.signEndpoint = config.Config.Object.Minio.SignEndpoint
} }
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel() defer cancel()
@ -111,13 +116,14 @@ func NewMinio() (s3.Interface, error) {
type Minio struct { type Minio struct {
bucket string bucket string
bucketURL string signEndpoint string
location string location string
opts *minio.Options opts *minio.Options
core *minio.Core core *minio.Core
sign *minio.Client sign *minio.Client
lock sync.Locker lock sync.Locker
init bool init bool
prefix string
} }
func (m *Minio) initMinio(ctx context.Context) error { func (m *Minio) initMinio(ctx context.Context) error {
@ -255,7 +261,7 @@ func (m *Minio) AuthSign(ctx context.Context, uploadID string, name string, expi
return nil, err return nil, err
} }
result := s3.AuthSignResult{ result := s3.AuthSignResult{
URL: m.bucketURL + name, URL: m.signEndpoint + "/" + m.bucket + "/" + name,
Query: url.Values{"uploadId": {uploadID}}, Query: url.Values{"uploadId": {uploadID}},
Parts: make([]s3.SignPart, len(partNumbers)), Parts: make([]s3.SignPart, len(partNumbers)),
} }
@ -269,11 +275,13 @@ func (m *Minio) AuthSign(ctx context.Context, uploadID string, name string, expi
request = signer.SignV4Trailer(*request, creds.AccessKeyID, creds.SecretAccessKey, creds.SessionToken, m.location, nil) request = signer.SignV4Trailer(*request, creds.AccessKeyID, creds.SecretAccessKey, creds.SessionToken, m.location, nil)
result.Parts[i] = s3.SignPart{ result.Parts[i] = s3.SignPart{
PartNumber: partNumber, PartNumber: partNumber,
URL: request.URL.String(),
Query: url.Values{"partNumber": {strconv.Itoa(partNumber)}}, Query: url.Values{"partNumber": {strconv.Itoa(partNumber)}},
Header: request.Header, Header: request.Header,
} }
} }
if m.prefix != "" {
result.URL = m.signEndpoint + m.prefix + "/" + m.bucket + "/" + name
}
return &result, nil return &result, nil
} }
@ -285,6 +293,9 @@ func (m *Minio) PresignedPutObject(ctx context.Context, name string, expire time
if err != nil { if err != nil {
return "", err return "", err
} }
if m.prefix != "" {
rawURL.Path = path.Join(m.prefix, rawURL.Path)
}
return rawURL.String(), nil return rawURL.String(), nil
} }
@ -396,6 +407,9 @@ func (m *Minio) presignedGetObject(ctx context.Context, name string, expire time
if err != nil { if err != nil {
return "", err return "", err
} }
if m.prefix != "" {
rawURL.Path = path.Join(m.prefix, rawURL.Path)
}
return rawURL.String(), nil return rawURL.String(), nil
} }

@ -86,3 +86,6 @@ func (cli *K8sDR) GetClientLocalConns() map[string][]*grpc.ClientConn {
fmt.Println("should not call this function!!!!!!!!!!!!!!!!!!!!!!!!!") fmt.Println("should not call this function!!!!!!!!!!!!!!!!!!!!!!!!!")
return nil return nil
} }
func (cli *K8sDR) Close() {
return
}

@ -12,4 +12,5 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
package startrpc // import "github.com/openimsdk/open-im-server/v3/pkg/common/startrpc" // Package startrpc start rpc server.
package startrpc

@ -16,11 +16,12 @@ package startrpc
import ( import (
"fmt" "fmt"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/discovery_register"
"net" "net"
"strconv" "strconv"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/discovery_register"
grpcprometheus "github.com/grpc-ecosystem/go-grpc-prometheus" grpcprometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/credentials/insecure"
@ -32,6 +33,7 @@ import (
"github.com/OpenIMSDK/tools/utils" "github.com/OpenIMSDK/tools/utils"
) )
// Start rpc server.
func Start( func Start(
rpcPort int, rpcPort int,
rpcRegisterName string, rpcRegisterName string,
@ -39,16 +41,8 @@ func Start(
rpcFn func(client discoveryregistry.SvcDiscoveryRegistry, server *grpc.Server) error, rpcFn func(client discoveryregistry.SvcDiscoveryRegistry, server *grpc.Server) error,
options ...grpc.ServerOption, options ...grpc.ServerOption,
) error { ) error {
fmt.Println( fmt.Printf("start %s server, port: %d, prometheusPort: %d, OpenIM version: %s",
"start", rpcRegisterName, rpcPort, prometheusPort, config.Version)
rpcRegisterName,
"server, port: ",
rpcPort,
"prometheusPort:",
prometheusPort,
", OpenIM version: ",
config.Version,
)
listener, err := net.Listen( listener, err := net.Listen(
"tcp", "tcp",
net.JoinHostPort(network.GetListenIP(config.Config.Rpc.ListenIP), strconv.Itoa(rpcPort)), net.JoinHostPort(network.GetListenIP(config.Config.Rpc.ListenIP), strconv.Itoa(rpcPort)),
@ -57,18 +51,12 @@ func Start(
return err return err
} }
defer listener.Close() defer listener.Close()
zkClient, err := discovery_register.NewDiscoveryRegister(config.Config.Envs.Discovery) client, err := discovery_register.NewDiscoveryRegister(config.Config.Envs.Discovery)
/*
zkClient, err := zookeeper.NewClient(config.Config.Zookeeper.ZkAddr, config.Config.Zookeeper.Schema,
zookeeper.WithFreq(time.Hour), zookeeper.WithUserNameAndPassword(
config.Config.Zookeeper.Username,
config.Config.Zookeeper.Password,
), zookeeper.WithRoundRobin(), zookeeper.WithTimeout(10), zookeeper.WithLogger(log.NewZkLogger()))*/
if err != nil { if err != nil {
return utils.Wrap1(err) return utils.Wrap1(err)
} }
//defer zkClient.CloseZK() defer client.Close()
zkClient.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials())) client.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials()))
registerIP, err := network.GetRpcRegisterIP(config.Config.Rpc.RegisterIP) registerIP, err := network.GetRpcRegisterIP(config.Config.Rpc.RegisterIP)
if err != nil { if err != nil {
return err return err
@ -88,11 +76,11 @@ func Start(
} }
srv := grpc.NewServer(options...) srv := grpc.NewServer(options...)
defer srv.GracefulStop() defer srv.GracefulStop()
err = rpcFn(zkClient, srv) err = rpcFn(client, srv)
if err != nil { if err != nil {
return utils.Wrap1(err) return utils.Wrap1(err)
} }
err = zkClient.Register( err = client.Register(
rpcRegisterName, rpcRegisterName,
registerIP, registerIP,
rpcPort, rpcPort,
@ -108,5 +96,6 @@ func Start(
} }
} }
}() }()
return utils.Wrap1(srv.Serve(listener)) return utils.Wrap1(srv.Serve(listener))
} }

@ -59,8 +59,13 @@ func (c *ConversationRpcClient) GetSingleConversationRecvMsgOpt(ctx context.Cont
return conversation.GetConversation().RecvMsgOpt, err return conversation.GetConversation().RecvMsgOpt, err
} }
func (c *ConversationRpcClient) SingleChatFirstCreateConversation(ctx context.Context, recvID, sendID string) error { func (c *ConversationRpcClient) SingleChatFirstCreateConversation(ctx context.Context, recvID, sendID,
_, err := c.Client.CreateSingleChatConversations(ctx, &pbconversation.CreateSingleChatConversationsReq{RecvID: recvID, SendID: sendID}) conversationID string, conversationType int32) error {
_, err := c.Client.CreateSingleChatConversations(ctx,
&pbconversation.CreateSingleChatConversationsReq{
RecvID: recvID, SendID: sendID, ConversationID: conversationID,
ConversationType: conversationType,
})
return err return err
} }

@ -228,8 +228,9 @@ func (s *NotificationSender) NotificationWithSesstionType(ctx context.Context, s
} }
var req msg.SendMsgReq var req msg.SendMsgReq
var msg sdkws.MsgData var msg sdkws.MsgData
var userInfo *sdkws.UserInfo
if notificationOpt.WithRpcGetUsername && s.getUserInfo != nil { if notificationOpt.WithRpcGetUsername && s.getUserInfo != nil {
userInfo, err := s.getUserInfo(ctx, sendID) userInfo, err = s.getUserInfo(ctx, sendID)
if err != nil { if err != nil {
log.ZWarn(ctx, "getUserInfo failed", err, "sendID", sendID) log.ZWarn(ctx, "getUserInfo failed", err, "sendID", sendID)
} else { } else {
@ -251,7 +252,7 @@ func (s *NotificationSender) NotificationWithSesstionType(ctx context.Context, s
msg.CreateTime = utils.GetCurrentTimestampByMill() msg.CreateTime = utils.GetCurrentTimestampByMill()
msg.ClientMsgID = utils.GetMsgID(sendID) msg.ClientMsgID = utils.GetMsgID(sendID)
optionsConfig := s.contentTypeConf[contentType] optionsConfig := s.contentTypeConf[contentType]
if sesstionType == constant.SuperGroupChatType && contentType == constant.HasReadReceipt { if sendID == recvID && contentType == constant.HasReadReceipt {
optionsConfig.ReliabilityLevel = constant.UnreliableNotification optionsConfig.ReliabilityLevel = constant.UnreliableNotification
} }
options := config.GetOptionsByNotification(optionsConfig) options := config.GetOptionsByNotification(optionsConfig)

@ -41,6 +41,7 @@ func (c *ConversationNotificationSender) ConversationSetPrivateNotification(ctx
IsPrivate: isPrivateChat, IsPrivate: isPrivateChat,
ConversationID: conversationID, ConversationID: conversationID,
} }
return c.Notification(ctx, sendID, recvID, constant.ConversationPrivateChatNotification, tips) return c.Notification(ctx, sendID, recvID, constant.ConversationPrivateChatNotification, tips)
} }
@ -50,6 +51,7 @@ func (c *ConversationNotificationSender) ConversationChangeNotification(ctx cont
UserID: userID, UserID: userID,
ConversationIDList: conversationIDs, ConversationIDList: conversationIDs,
} }
return c.Notification(ctx, userID, userID, constant.ConversationChangeNotification, tips) return c.Notification(ctx, userID, userID, constant.ConversationChangeNotification, tips)
} }
@ -65,5 +67,6 @@ func (c *ConversationNotificationSender) ConversationUnreadChangeNotification(
HasReadSeq: hasReadSeq, HasReadSeq: hasReadSeq,
UnreadCountTime: unreadCountTime, UnreadCountTime: unreadCountTime,
} }
return c.Notification(ctx, userID, userID, constant.ConversationUnreadNotification, tips) return c.Notification(ctx, userID, userID, constant.ConversationUnreadNotification, tips)
} }

@ -64,9 +64,9 @@ set +e
# Later, after discarding Docker, the Docker keyword is unreliable, and Kubepods is used # Later, after discarding Docker, the Docker keyword is unreliable, and Kubepods is used
if grep -qE 'docker|kubepods' /proc/1/cgroup || [ -f /.dockerenv ]; then if grep -qE 'docker|kubepods' /proc/1/cgroup || [ -f /.dockerenv ]; then
openim::color::echo ${COLOR_BLUE} "Environment in the interior of the container" openim::color::echo ${COLOR_CYAN} "Environment in the interior of the container"
else else
openim::color::echo ${COLOR_BLUE} "The environment is outside the container" openim::color::echo ${COLOR_CYAN} "The environment is outside the container"
openim::util::check_ports ${OPENIM_DEPENDENCY_PORT_LISTARIES[@]} || return 0 openim::util::check_ports ${OPENIM_DEPENDENCY_PORT_LISTARIES[@]} || return 0
fi fi

@ -37,6 +37,8 @@ readonly ipv6regex='(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:
clear clear
. $(dirname ${BASH_SOURCE})/lib/util.sh . $(dirname ${BASH_SOURCE})/lib/util.sh
openim::util::ensure-bash-version
trap 'openim::util::onCtrlC' INT trap 'openim::util::onCtrlC' INT
function openim::util::onCtrlC() { function openim::util::onCtrlC() {

@ -19,11 +19,7 @@ source "${OPENIM_ROOT}/scripts/install/common.sh"
cd "$OPENIM_ROOT" cd "$OPENIM_ROOT"
if command -v docker-compose &> /dev/null; then openim::util::check_docker_and_compose_versions
docker-compose ps
else
docker compose ps
fi
progress() { progress() {
local _main_pid="$1" local _main_pid="$1"

@ -30,6 +30,8 @@ openim::util::ensure_docker_daemon_connectivity
DOCKER_COMPOSE_COMMAND= DOCKER_COMPOSE_COMMAND=
# Check if docker-compose command is available # Check if docker-compose command is available
openim::util::check_docker_and_compose_versions
if command -v docker compose &> /dev/null if command -v docker compose &> /dev/null
then then
openim::log::info "docker compose command is available" openim::log::info "docker compose command is available"

@ -51,17 +51,7 @@ execute_scripts() {
"${OPENIM_ROOT}"/scripts/env_check.sh "${OPENIM_ROOT}"/scripts/env_check.sh
} }
# Start docker compose openim::util::check_docker_and_compose_versions
start_docker_compose() {
openim::log::info "Checking if docker-compose command is available"
if command -v docker-compose &> /dev/null; then
docker-compose up -d
else
docker compose up -d
fi
"${OPENIM_ROOT}"/scripts/docker-check-service.sh
}
main() { main() {
load_env load_env

@ -250,6 +250,39 @@ openim::util::host_arch() {
echo "${host_arch}" echo "${host_arch}"
} }
# Define a bash function to check the versions of Docker and Docker Compose
openim::util::check_docker_and_compose_versions() {
# Define the required versions of Docker and Docker Compose
required_docker_version="20.10.0"
required_compose_version="2.0"
# Get the currently installed Docker version
installed_docker_version=$(docker --version | awk '{print $3}' | sed 's/,//')
# Check if the installed Docker version matches the required version
if [[ "$installed_docker_version" < "$required_docker_version" ]]; then
echo "Docker version mismatch. Installed: $installed_docker_version, Required: $required_docker_version"
return 1
fi
# Check if the docker compose sub-command is available
if ! docker compose version &> /dev/null; then
echo "Docker does not support the docker compose sub-command"
return 1
fi
# Get the currently installed Docker Compose version
installed_compose_version=$(docker compose version --short)
# Check if the installed Docker Compose version matches the required version
if [[ "$installed_compose_version" < "$required_compose_version" ]]; then
echo "Docker Compose version mismatch. Installed: $installed_compose_version, Required: $required_compose_version"
return 1
fi
}
# The `openim::util::check_ports` function analyzes the state of processes based on given ports. # The `openim::util::check_ports` function analyzes the state of processes based on given ports.
# It accepts multiple ports as arguments and prints: # It accepts multiple ports as arguments and prints:
# 1. The state of the process (whether it's running or not). # 1. The state of the process (whether it's running or not).
@ -1321,3 +1354,7 @@ function openim::util::gen_os_arch() {
exit 1 exit 1
fi fi
} }
if [[ "$*" =~ openim::util:: ]];then
eval $*
fi

@ -167,6 +167,7 @@ define MAKEFILE_EXAMPLE
# make install-deepcopy-gen Install deepcopy-gen tools if the license is missing. # make install-deepcopy-gen Install deepcopy-gen tools if the license is missing.
# make build BINS=openim-api V=1 DEBUG=1 Build debug binaries for only openim-api. # make build BINS=openim-api V=1 DEBUG=1 Build debug binaries for only openim-api.
# make multiarch -j PLATFORMS="linux_arm64 linux_amd64" V=1 Build binaries for both platforms. # make multiarch -j PLATFORMS="linux_arm64 linux_amd64" V=1 Build binaries for both platforms.
# make image
endef endef
export MAKEFILE_EXAMPLE export MAKEFILE_EXAMPLE

@ -17,7 +17,7 @@
# #
GO := go GO := go
GO_SUPPORTED_VERSIONS ?= 1.18|1.19|1.20|1.21|1.22 GO_SUPPORTED_VERSIONS ?= 1.19|1.20|1.21|1.22
GO_LDFLAGS += -X $(VERSION_PACKAGE).gitVersion=$(GIT_TAG) \ GO_LDFLAGS += -X $(VERSION_PACKAGE).gitVersion=$(GIT_TAG) \
-X $(VERSION_PACKAGE).gitCommit=$(GIT_COMMIT) \ -X $(VERSION_PACKAGE).gitCommit=$(GIT_COMMIT) \
@ -45,7 +45,7 @@ ifeq ($(origin GOBIN), undefined)
endif endif
# COMMANDS is Specify all files under ${ROOT_DIR}/cmd/ and ${ROOT_DIR}/tools/ except those ending in.md # COMMANDS is Specify all files under ${ROOT_DIR}/cmd/ and ${ROOT_DIR}/tools/ except those ending in.md
COMMANDS ?= $(filter-out %.md, $(wildcard ${ROOT_DIR}/cmd/* ${ROOT_DIR}/tools/* ${ROOT_DIR}/cmd/openim-rpc/*)) COMMANDS ?= $(filter-out %.md, $(wildcard ${ROOT_DIR}/cmd/* ${ROOT_DIR}/tools/* ${ROOT_DIR}/tools/data-conversion/chat/cmd/* ${ROOT_DIR}/tools/data-conversion/openim/cmd/* ${ROOT_DIR}/cmd/openim-rpc/*))
ifeq (${COMMANDS},) ifeq (${COMMANDS},)
$(error Could not determine COMMANDS, set ROOT_DIR or run in source dir) $(error Could not determine COMMANDS, set ROOT_DIR or run in source dir)
endif endif
@ -136,6 +136,7 @@ ifneq ($(shell $(GO) version | grep -q -E '\bgo($(GO_SUPPORTED_VERSIONS))\b' &&
$(error unsupported go version. Please make install one of the following supported version: '$(GO_SUPPORTED_VERSIONS)') $(error unsupported go version. Please make install one of the following supported version: '$(GO_SUPPORTED_VERSIONS)')
endif endif
## go.build.%: Build binaries for a specific platform
.PHONY: go.build.% .PHONY: go.build.%
go.build.%: go.build.%:
$(eval COMMAND := $(word 2,$(subst ., ,$*))) $(eval COMMAND := $(word 2,$(subst ., ,$*)))
@ -159,6 +160,14 @@ go.build.%:
CGO_ENABLED=0 GOOS=$(OS) GOARCH=$(ARCH) $(GO) build $(GO_BUILD_FLAGS) -o \ CGO_ENABLED=0 GOOS=$(OS) GOARCH=$(ARCH) $(GO) build $(GO_BUILD_FLAGS) -o \
$(BIN_TOOLS_DIR)/$(OS)/$(ARCH)/$(COMMAND)$(GO_OUT_EXT) $(ROOT_DIR)/tools/$(COMMAND)/$(COMMAND).go; \ $(BIN_TOOLS_DIR)/$(OS)/$(ARCH)/$(COMMAND)$(GO_OUT_EXT) $(ROOT_DIR)/tools/$(COMMAND)/$(COMMAND).go; \
chmod +x $(BIN_TOOLS_DIR)/$(OS)/$(ARCH)/$(COMMAND)$(GO_OUT_EXT); \ chmod +x $(BIN_TOOLS_DIR)/$(OS)/$(ARCH)/$(COMMAND)$(GO_OUT_EXT); \
elif [ -f $(ROOT_DIR)/tools/data-conversion/openim/cmd/$(COMMAND)/$(COMMAND).go ]; then \
CGO_ENABLED=0 GOOS=$(OS) GOARCH=$(ARCH) $(GO) build $(GO_BUILD_FLAGS) -o \
$(BIN_TOOLS_DIR)/$(OS)/$(ARCH)/$(COMMAND)$(GO_OUT_EXT) $(ROOT_DIR)/tools/data-conversion/openim/cmd/$(COMMAND)/$(COMMAND).go; \
chmod +x $(BIN_TOOLS_DIR)/$(OS)/$(ARCH)/$(COMMAND)$(GO_OUT_EXT); \
elif [ -f $(ROOT_DIR)/tools/data-conversion/chat/cmd/$(COMMAND)/$(COMMAND).go ]; then \
CGO_ENABLED=0 GOOS=$(OS) GOARCH=$(ARCH) $(GO) build $(GO_BUILD_FLAGS) -o \
$(BIN_TOOLS_DIR)/$(OS)/$(ARCH)/$(COMMAND)$(GO_OUT_EXT) $(ROOT_DIR)/tools/data-conversion/chat/cmd/$(COMMAND)/$(COMMAND).go; \
chmod +x $(BIN_TOOLS_DIR)/$(OS)/$(ARCH)/$(COMMAND)$(GO_OUT_EXT); \
fi \ fi \
fi fi

@ -21,7 +21,6 @@
# #
DOCKER := docker DOCKER := docker
DOCKER_SUPPORTED_API_VERSION ?= 1.32|1.40|1.41|1.42
# read: https://github.com/openimsdk/open-im-server/blob/main/docs/conversions/images.md # read: https://github.com/openimsdk/open-im-server/blob/main/docs/conversions/images.md
REGISTRY_PREFIX ?= ghcr.io/openimsdk REGISTRY_PREFIX ?= ghcr.io/openimsdk
@ -43,8 +42,8 @@ endif
# Determine image files by looking into build/images/*/Dockerfile # Determine image files by looking into build/images/*/Dockerfile
IMAGES_DIR ?= $(wildcard ${ROOT_DIR}/build/images/*) IMAGES_DIR ?= $(wildcard ${ROOT_DIR}/build/images/*)
# Determine images names by stripping out the dir names # Determine images names by stripping out the dir names, and filter out the undesired directories
IMAGES ?= $(filter-out tools,$(foreach image,${IMAGES_DIR},$(notdir ${image}))) IMAGES ?= $(filter-out Dockerfile openim-tools openim-cmdutils,$(foreach image,${IMAGES_DIR},$(notdir ${image})))
ifeq (${IMAGES},) ifeq (${IMAGES},)
$(error Could not determine IMAGES, set ROOT_DIR or run in source dir) $(error Could not determine IMAGES, set ROOT_DIR or run in source dir)
@ -75,22 +74,13 @@ image.docker-buildx:
## image.verify: Verify docker version ## image.verify: Verify docker version
.PHONY: image.verify .PHONY: image.verify
image.verify: image.verify:
$(eval API_VERSION := $(shell $(DOCKER) version | grep -E 'API version: {1,6}[0-9]' | head -n1 | awk '{print $$3} END { if (NR==0) print 0}' )) @$(ROOT_DIR)/scripts/lib/util.sh openim::util::check_docker_and_compose_versions
$(eval PASS := $(shell echo "$(API_VERSION) > $(DOCKER_SUPPORTED_API_VERSION)" | bc))
@if [ $(PASS) -ne 1 ]; then \
$(DOCKER) -v ;\
echo "Unsupported docker version. Docker API version should be greater than $(DOCKER_SUPPORTED_API_VERSION)"; \
exit 1; \
fi
## image.daemon.verify: Verify docker daemon experimental features ## image.daemon.verify: Verify docker daemon experimental features
.PHONY: image.daemon.verify .PHONY: image.daemon.verify
image.daemon.verify: image.daemon.verify:
$(eval PASS := $(shell $(DOCKER) version | grep -q -E 'Experimental: {1,5}true' && echo 1 || echo 0)) @$(ROOT_DIR)/scripts/lib/util.sh openim::util::ensure_docker_daemon_connectivity
@if [ $(PASS) -ne 1 ]; then \ @$(ROOT_DIR)/scripts/lib/util.sh openim::util::ensure-docker-buildx
echo "Experimental features of Docker daemon is not enabled. Please add \"experimental\": true in '/etc/docker/daemon.json' and then restart Docker daemon."; \
exit 1; \
fi
# If you wish built the manager image targeting other platforms you can use the --platform flag. # If you wish built the manager image targeting other platforms you can use the --platform flag.
# (i.e. docker build --platform linux/arm64 ). However, you must enable docker buildKit for it. # (i.e. docker build --platform linux/arm64 ). However, you must enable docker buildKit for it.
@ -110,10 +100,9 @@ image.build.%: go.build.%
$(eval ARCH := $(word 2,$(subst _, ,$(PLATFORM)))) $(eval ARCH := $(word 2,$(subst _, ,$(PLATFORM))))
@echo "===========> Building docker image $(IMAGE) $(VERSION) for $(IMAGE_PLAT)" @echo "===========> Building docker image $(IMAGE) $(VERSION) for $(IMAGE_PLAT)"
@mkdir -p $(TMP_DIR)/$(IMAGE)/$(PLATFORM) @mkdir -p $(TMP_DIR)/$(IMAGE)/$(PLATFORM)
@awk '/FROM/ {c++; if (c==2) {print; next}} c>=2' $(ROOT_DIR)/build/images/$(IMAGE)/Dockerfile \ @cat $(ROOT_DIR)/build/images/Dockerfile\
| sed -e "s#BASE_IMAGE#$(BASE_IMAGE)#g" \ | sed "s#BASE_IMAGE#$(BASE_IMAGE)#g" \
-e 's/--from=builder //g' \ | sed "s#BINARY_NAME#$(IMAGE)#g" >$(TMP_DIR)/$(IMAGE)/Dockerfile
-e 's#COPY /openim/openim-server/#COPY ./#g' > $(TMP_DIR)/$(IMAGE)/Dockerfile
@cp $(BIN_DIR)/platforms/$(IMAGE_PLAT)/$(IMAGE) $(TMP_DIR)/$(IMAGE) @cp $(BIN_DIR)/platforms/$(IMAGE_PLAT)/$(IMAGE) $(TMP_DIR)/$(IMAGE)
$(eval BUILD_SUFFIX := $(_DOCKER_BUILD_EXTRA_ARGS) --pull -t $(REGISTRY_PREFIX)/$(IMAGE)-$(ARCH):$(VERSION) $(TMP_DIR)/$(IMAGE)) $(eval BUILD_SUFFIX := $(_DOCKER_BUILD_EXTRA_ARGS) --pull -t $(REGISTRY_PREFIX)/$(IMAGE)-$(ARCH):$(VERSION) $(TMP_DIR)/$(IMAGE))
@if [ $(shell $(GO) env GOARCH) != $(ARCH) ] ; then \ @if [ $(shell $(GO) env GOARCH) != $(ARCH) ] ; then \
@ -124,7 +113,6 @@ image.build.%: go.build.%
fi fi
@rm -rf $(TMP_DIR)/$(IMAGE) @rm -rf $(TMP_DIR)/$(IMAGE)
# https://docs.docker.com/build/building/multi-platform/ # https://docs.docker.com/build/building/multi-platform/
# busybox image supports amd64, arm32v5, arm32v6, arm32v7, arm64v8, i386, ppc64le, and s390x # busybox image supports amd64, arm32v5, arm32v6, arm32v7, arm64v8, i386, ppc64le, and s390x
## image.buildx.%: Build docker images with buildx ## image.buildx.%: Build docker images with buildx

@ -1,6 +1,6 @@
module github.com/openimsdk/open-im-server/test/typecheck module github.com/openimsdk/open-im-server/test/typecheck
go 1.18 go 1.19
require golang.org/x/tools v0.12.0 require golang.org/x/tools v0.12.0

@ -1,3 +1,3 @@
module github.com/openimsdk/open-im-server/v3/tools/changelog module github.com/openimsdk/open-im-server/v3/tools/changelog
go 1.18 go 1.19

@ -35,7 +35,6 @@ import (
"github.com/go-zookeeper/zk" "github.com/go-zookeeper/zk"
"go.mongodb.org/mongo-driver/mongo" "go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options" "go.mongodb.org/mongo-driver/mongo/options"
"go.mongodb.org/mongo-driver/mongo/readpref"
"gorm.io/driver/mysql" "gorm.io/driver/mysql"
"gorm.io/gorm" "gorm.io/gorm"
@ -158,11 +157,15 @@ func checkMysql() error {
func checkMongo() error { func checkMongo() error {
var client *mongo.Client var client *mongo.Client
uri := "mongodb://sample.host:27017/?maxPoolSize=20&w=majority"
defer func() { defer func() {
if client != nil { if client != nil {
client.Disconnect(context.TODO()) client.Disconnect(context.TODO())
} }
}() }()
if config.Config.Mongo.Uri != "" {
uri = config.Config.Mongo.Uri
} else {
mongodbHosts := "" mongodbHosts := ""
for i, v := range config.Config.Mongo.Address { for i, v := range config.Config.Mongo.Address {
if i == len(config.Config.Mongo.Address)-1 { if i == len(config.Config.Mongo.Address)-1 {
@ -171,13 +174,21 @@ func checkMongo() error {
mongodbHosts += v + "," mongodbHosts += v + ","
} }
} }
client, err := mongo.Connect(context.TODO(), options.Client().ApplyURI( if config.Config.Mongo.Password != "" && config.Config.Mongo.Username != "" {
fmt.Sprintf("mongodb://%v:%v@%v/?authSource=admin", uri = fmt.Sprintf("mongodb://%s:%s@%s/%s?maxPoolSize=%d&authSource=admin",
config.Config.Mongo.Username, config.Config.Mongo.Password, mongodbHosts))) config.Config.Mongo.Username, config.Config.Mongo.Password, mongodbHosts,
config.Config.Mongo.Database, config.Config.Mongo.MaxPoolSize)
} else {
uri = fmt.Sprintf("mongodb://%s/%s/?maxPoolSize=%d&authSource=admin",
mongodbHosts, config.Config.Mongo.Database,
config.Config.Mongo.MaxPoolSize)
}
}
client, err := mongo.Connect(context.TODO(), options.Client().ApplyURI(uri))
if err != nil { if err != nil {
return errs.Wrap(err) return errs.Wrap(err)
} else { } else {
err = client.Ping(context.TODO(), &readpref.ReadPref{}) err = client.Ping(context.TODO(), nil)
if err != nil { if err != nil {
return errs.Wrap(err) return errs.Wrap(err)
} }

@ -1,3 +1,3 @@
module github.com/openimsdk/open-im-server/v3/tools/component module github.com/openimsdk/open-im-server/v3/tools/component
go 1.18 go 1.19

@ -0,0 +1,98 @@
# OpenIM V2 至 V3 数据迁移指南
该指南提供了从 OpenIM V2 迁移至 V3 的详细步骤。请确保在开始迁移过程之前,熟悉所有步骤,并按照指南准确执行。
+ [OpenIM Chat](https://github.com/OpenIMSDK/chat)
+ [OpenIM Server](https://github.com/OpenIMSDK/Open-IM-Server)
### 1. 数据备份
在开始数据迁移之前,强烈建议备份所有相关的数据以防止任何可能的数据丢失。
### 2. 迁移 OpenIM MySQL 数据
+ 位置: `open-im-server/v3/tools/data-conversion/openim/mysql.go`
+ 配置 `mysql.go` 文件中的数据库信息。
+ 手动创建 V3 版本的数据库,并确保字符集为 `utf8mb4`
```bash
// V2 数据库配置
var (
usernameV2 = "root"
passwordV2 = "openIM"
addrV2 = "127.0.0.1:13306"
databaseV2 = "openIM_v2"
)
// V3 数据库配置
var (
usernameV3 = "root"
passwordV3 = "openIM123"
addrV3 = "127.0.0.1:13306"
databaseV3 = "openIM_v3"
)
```
**执行数据迁移命令:**
```bash
make build BINS="conversion-mysql"
```
启动的二进制在 `_output/bin/tools`
### 3. 转换聊天消息(可选)
+ 只支持转换存储在 Kafka 中的消息。
+ 位置: `open-im-server/v3/tools/data-conversion/openim/msg.go`
+ 配置 `msg.go` 文件中的消息和服务器信息。
```bash
var (
topic = "ws2ms_chat" // V2 版本 Kafka 主题
kafkaAddr = "127.0.0.1:9092" // V2 版本 Kafka 地址
rpcAddr = "127.0.0.1:10130" // V3 版本 RPC 地址
adminUserID = "openIM123456" // V3 版本管理员用户ID
concurrency = 4 // 并发数量
)
```
**执行数据迁移命令:**
```bash
make build BINS="conversion-msg"
```
### 4. 转换业务服务器数据
+ 只支持转换存储在 Kafka 中的消息。
+ 位置: `open-im-server/v3/tools/data-conversion/chat/chat.go`
+ 需要手动创建 V3 版本的数据库,并确保字符集为 `utf8mb4`
+ 配置 `main.go` 文件中的数据库信息。
```bash
// V2 数据库配置
var (
usernameV2 = "root"
passwordV2 = "openIM"
addrV2 = "127.0.0.1:13306"
databaseV2 = "admin_chat"
)
// V3 数据库配置
var (
usernameV3 = "root"
passwordV3 = "openIM123"
addrV3 = "127.0.0.1:13306"
databaseV3 = "openim_enterprise"
)
```
**执行数据迁移命令:**
```bash
make build BINS="conversion-chat"
```

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save