diff --git a/.env b/.env
index 1e7b1e11a..3199b3714 100644
--- a/.env
+++ b/.env
@@ -5,6 +5,9 @@ ZOOKEEPER_IMAGE=bitnami/zookeeper:3.8
KAFKA_IMAGE=bitnami/kafka:3.5.1
MINIO_IMAGE=minio/minio:RELEASE.2024-01-11T07-46-16Z
ETCD_IMAGE=quay.io/coreos/etcd:v3.5.13
+PROMETHEUS_IMAGE=prom/prometheus:v2.45.6
+ALERTMANAGER_IMAGE=prom/alertmanager:v0.27.0
+GRAFANA_IMAGE=grafana/grafana:11.0.1
OPENIM_WEB_FRONT_IMAGE=openim/openim-web-front:release-v3.5.1
OPENIM_ADMIN_FRONT_IMAGE=openim/openim-admin-front:release-v1.7
diff --git a/.gitignore b/.gitignore
index fb8d428d2..77cf855b7 100644
--- a/.gitignore
+++ b/.gitignore
@@ -34,11 +34,7 @@ deployments/charts/generated-configs/
### OpenIM Config ###
.env
config/config.yaml
-config/alertmanager.yml
-config/prometheus.yml
-config/email.tmpl
config/notification.yaml
-config/instance-down-rules.yml
### OpenIM deploy ###
deployments/openim-server/charts
diff --git a/Dockerfile b/Dockerfile
index 3f765805c..e082dd64c 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -43,7 +43,7 @@ COPY --from=builder $SERVER_DIR/start-config.yml $SERVER_DIR/
COPY --from=builder $SERVER_DIR/go.mod $SERVER_DIR/
COPY --from=builder $SERVER_DIR/go.sum $SERVER_DIR/
-RUN go get github.com/openimsdk/gomake@v0.0.13
+RUN go get github.com/openimsdk/gomake@v0.0.14-alpha.5
# Set the command to run when the container starts
ENTRYPOINT ["sh", "-c", "mage start && tail -f /dev/null"]
diff --git a/README.md b/README.md
index d73d5749a..a99559cdb 100644
--- a/README.md
+++ b/README.md
@@ -19,7 +19,7 @@
- Englist ·
+ English ·
中文 ·
Українська ·
Česky ·
diff --git a/README_zh_CN.md b/README_zh_CN.md
index 65aac9ebc..59198eafb 100644
--- a/README_zh_CN.md
+++ b/README_zh_CN.md
@@ -19,7 +19,7 @@
- Englist ·
+ English ·
中文 ·
Українська ·
Česky ·
diff --git a/cmd/openim-api/main.go b/cmd/openim-api/main.go
index 58e540c05..e29ed2a59 100644
--- a/cmd/openim-api/main.go
+++ b/cmd/openim-api/main.go
@@ -25,5 +25,4 @@ func main() {
if err := cmd.NewApiCmd().Exec(); err != nil {
program.ExitWithError(err)
}
-
}
diff --git a/config/alertmanager.yml b/config/alertmanager.yml
new file mode 100644
index 000000000..6c675ab6f
--- /dev/null
+++ b/config/alertmanager.yml
@@ -0,0 +1,34 @@
+global:
+ resolve_timeout: 5m
+ smtp_from: alert@openim.io
+ smtp_smarthost: smtp.163.com:465
+ smtp_auth_username: alert@openim.io
+ smtp_auth_password: YOURAUTHPASSWORD
+ smtp_require_tls: false
+ smtp_hello: xxx
+
+templates:
+ - /etc/alertmanager/email.tmpl
+
+route:
+ group_by: [ 'alertname' ]
+ group_wait: 5s
+ group_interval: 5s
+ repeat_interval: 5m
+ receiver: email
+ routes:
+ - matchers:
+ - alertname = "XXX"
+ group_by: [ 'instance' ]
+ group_wait: 5s
+ group_interval: 5s
+ repeat_interval: 5m
+ receiver: email
+
+receivers:
+ - name: email
+ email_configs:
+ - to: 'alert@example.com'
+ html: '{{ template "email.to.html" . }}'
+ headers: { Subject: "[OPENIM-SERVER]Alarm" }
+ send_resolved: true
diff --git a/config/email.tmpl b/config/email.tmpl
new file mode 100644
index 000000000..824144e9d
--- /dev/null
+++ b/config/email.tmpl
@@ -0,0 +1,36 @@
+{{ define "email.to.html" }}
+{{ if eq .Status "firing" }}
+ {{ range .Alerts }}
+
+
+
OpenIM Alert
+
Alert Status: firing
+
Alert Program: Prometheus Alert
+
Severity Level: {{ .Labels.severity }}
+
Alert Type: {{ .Labels.alertname }}
+
Affected Host: {{ .Labels.instance }}
+
Affected Service: {{ .Labels.job }}
+
Alert Subject: {{ .Annotations.summary }}
+
Trigger Time: {{ .StartsAt.Format "2006-01-02 15:04:05" }}
+
+ {{ end }}
+
+
+{{ else if eq .Status "resolved" }}
+ {{ range .Alerts }}
+
+
+
OpenIM Alert
+
Alert Status: resolved
+
Alert Program: Prometheus Alert
+
Severity Level: {{ .Labels.severity }}
+
Alert Type: {{ .Labels.alertname }}
+
Affected Host: {{ .Labels.instance }}
+
Affected Service: {{ .Labels.job }}
+
Alert Subject: {{ .Annotations.summary }}
+
Trigger Time: {{ .StartsAt.Format "2006-01-02 15:04:05" }}
+
+ {{ end }}
+
+{{ end }}
+{{ end }}
diff --git a/config/grafana-template/Demo.json b/config/grafana-template/Demo.json
new file mode 100644
index 000000000..dbb11fbf3
--- /dev/null
+++ b/config/grafana-template/Demo.json
@@ -0,0 +1,5356 @@
+{
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": {
+ "type": "grafana",
+ "uid": "-- Grafana --"
+ },
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "type": "dashboard"
+ }
+ ]
+ },
+ "editable": true,
+ "fiscalYearStartMonth": 0,
+ "graphTooltip": 0,
+ "id": 3,
+ "links": [],
+ "liveNow": false,
+ "panels": [
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 0
+ },
+ "id": 35,
+ "panels": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "description": "Is the service up.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "stepBefore",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 2,
+ "pointSize": 9,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "fieldMinMax": false,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "bool_on_off"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 11,
+ "w": 12,
+ "x": 6,
+ "y": 1
+ },
+ "id": 1,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "up",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "interval": "",
+ "legendFormat": "$legendName",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "UP",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "description": "This metric represents the number of online users and login users within the time frame.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "fieldMinMax": false,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "online users"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#37bbff",
+ "mode": "fixed",
+ "seriesBy": "last"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 11,
+ "w": 12,
+ "x": 0,
+ "y": 12
+ },
+ "id": 37,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "online_user_num",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "interval": "",
+ "legendFormat": "online users",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "editorMode": "code",
+ "expr": "increase(user_login_total[$time])",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "login num",
+ "range": true,
+ "refId": "B"
+ }
+ ],
+ "title": "Login Information",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "description": "This metric represents the number of register users within the time frame.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "fieldMinMax": false,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "register users"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7437ff",
+ "mode": "fixed",
+ "seriesBy": "last"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 11,
+ "w": 12,
+ "x": 12,
+ "y": 12
+ },
+ "id": 59,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "user_register_total",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "interval": "",
+ "legendFormat": "register users",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Register num",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "description": "This metric represents the number of chat msg success.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "fieldMinMax": false,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 10,
+ "w": 12,
+ "x": 0,
+ "y": 23
+ },
+ "id": 38,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "increase(single_chat_msg_process_success_total[$time])",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "interval": "",
+ "legendFormat": "single msgs",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "editorMode": "code",
+ "expr": "increase(group_chat_msg_process_success_total[$time])",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "group msgs",
+ "range": true,
+ "refId": "B"
+ }
+ ],
+ "title": "Chat Msg Success Num",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "description": "This metric represents the number of chat msg failed .",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "fieldMinMax": false,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "single msgs"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#ff00dc",
+ "mode": "fixed",
+ "seriesBy": "last"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "group msgs"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#0cffef",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 10,
+ "w": 12,
+ "x": 12,
+ "y": 23
+ },
+ "id": 39,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "increase(single_chat_msg_process_failed_total[$time])",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "interval": "",
+ "legendFormat": "single msgs",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "editorMode": "code",
+ "expr": "increase(group_chat_msg_process_failed_total[$time])",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "group msgs",
+ "range": true,
+ "refId": "B"
+ }
+ ],
+ "title": "Chat Msg Failed Num",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "description": "This metric represents the number of msg failed offline pushed.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "fieldMinMax": false,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "failed msgs"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "dark-red",
+ "mode": "fixed",
+ "seriesBy": "last"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 11,
+ "w": 6,
+ "x": 4,
+ "y": 33
+ },
+ "id": 42,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "increase(msg_offline_push_failed_total[$time])",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "interval": "",
+ "legendFormat": "failed msgs",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Msg Offline Push Failed Num",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "description": "This metric represents the number of failed set seq.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "fieldMinMax": false,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "failed msgs"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "semi-dark-green",
+ "mode": "fixed",
+ "seriesBy": "last"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 11,
+ "w": 6,
+ "x": 14,
+ "y": 33
+ },
+ "id": 43,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "increase(seq_set_failed_total[$time])",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "interval": "",
+ "legendFormat": "failed addr: {{instance}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Seq Set Failed Num",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "description": "This metric represents the number of successfully inserted messages.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "fieldMinMax": false,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 10,
+ "w": 12,
+ "x": 0,
+ "y": 44
+ },
+ "id": 44,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "increase(msg_insert_redis_success_total[$time])",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "interval": "",
+ "legendFormat": "redis: {{instance}}",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "editorMode": "code",
+ "expr": "increase(msg_insert_mongo_success_total[$time])",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "mongo: {{instance}}",
+ "range": true,
+ "refId": "B"
+ }
+ ],
+ "title": "Msg Success Insert Num",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "description": "This metric represents the number of failed insertion messages.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "fieldMinMax": false,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 10,
+ "w": 12,
+ "x": 12,
+ "y": 44
+ },
+ "id": 45,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "increase(msg_insert_redis_failed_total[$time])",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "interval": "",
+ "legendFormat": "redis: {{instance}}",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "editorMode": "code",
+ "expr": "increase(msg_insert_mongo_failed_total[$time])",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "mongo: {{instance}}",
+ "range": true,
+ "refId": "B"
+ }
+ ],
+ "title": "Msg Failed Insert Num",
+ "type": "timeseries"
+ }
+ ],
+ "title": "Server",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 1
+ },
+ "id": 22,
+ "panels": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "description": "This metric represents the number of call of all API.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "fieldMinMax": false,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 12,
+ "x": 0,
+ "y": 13
+ },
+ "id": 29,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "right",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "sum by (path) (api_count)",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "interval": "",
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "API Requests Total",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "description": "This metric represents the number of call of all API within the time frame.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "fieldMinMax": false,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": [
+ {
+ "__systemRef": "hideSeriesFrom",
+ "matcher": {
+ "id": "byNames",
+ "options": {
+ "mode": "exclude",
+ "names": [
+ "/friend/get_friend_list"
+ ],
+ "prefix": "All except:",
+ "readOnly": true
+ }
+ },
+ "properties": [
+ {
+ "id": "custom.hideFrom",
+ "value": {
+ "legend": false,
+ "tooltip": false,
+ "viz": true
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 12,
+ "x": 12,
+ "y": 13
+ },
+ "id": 48,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "right",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "sum by (path) (increase(api_count[$time]))",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "interval": "",
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "API Requests Num",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "description": "This metric represents the number of err return of API.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "fieldMinMax": false,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 14,
+ "w": 12,
+ "x": 0,
+ "y": 22
+ },
+ "id": 24,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "right",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "sum by (path) (api_count{code != \"0\"})",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "interval": "",
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "API Error Total",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "description": "This metric represents the number of err return of API with err code.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "fieldMinMax": false,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 14,
+ "w": 12,
+ "x": 12,
+ "y": 22
+ },
+ "id": 23,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "right",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "sum by (path, code) (api_count{code != \"0\"})",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "interval": "",
+ "legendFormat": "{{path}}: code={{code}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "API Error Total With Code",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "description": "This metric represents the qps of API.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "fieldMinMax": false,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "reqps"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Value"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#1ed9d4",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 24,
+ "x": 0,
+ "y": 36
+ },
+ "id": 51,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "sum(rate(api_count[1m]))",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "interval": "",
+ "legendFormat": "qps",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "API QPS",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "description": "This metric represents the number of err return of API within the time frame.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "fieldMinMax": false,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 12,
+ "w": 12,
+ "x": 0,
+ "y": 45
+ },
+ "id": 49,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "right",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "sum by (path) (increase(api_count{code != \"0\"}[$time]))",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "interval": "",
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "API Error Num",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "description": "This metric represents the number of err return of API with err code within the time frame..",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "fieldMinMax": false,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 12,
+ "w": 12,
+ "x": 12,
+ "y": 45
+ },
+ "id": 50,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "right",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "sum by (path, code) (increase(api_count{code != \"0\"}[$time]))",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "interval": "",
+ "legendFormat": "{{path}}: code={{code}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "API Error Num With Code",
+ "type": "timeseries"
+ }
+ ],
+ "title": "API",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 2
+ },
+ "id": 28,
+ "panels": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "description": "This metric represents the number of call of all RPC.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "fieldMinMax": false,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 10,
+ "w": 24,
+ "x": 0,
+ "y": 14
+ },
+ "id": 21,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "right",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "sum by (path) (rpc_count)",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "interval": "",
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "RPC Total Count",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "description": "This metric represents the error return of RPC.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "fieldMinMax": false,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 10,
+ "w": 12,
+ "x": 0,
+ "y": 24
+ },
+ "id": 31,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "right",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "sum by (path) (rpc_count{code!=\"0\"})",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "interval": "",
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "RPC Error Count",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "description": "This metric represents the error return of RPC with code.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "fieldMinMax": false,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 10,
+ "w": 12,
+ "x": 12,
+ "y": 24
+ },
+ "id": 33,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "right",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "sum by (path, code) (rpc_count{code!=\"0\"})",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "interval": "",
+ "legendFormat": "{{path}}: code={{code}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "RPC Error Count With Code",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "description": "This metric represents the number of call of all RPC within the time frame.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "fieldMinMax": false,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 24,
+ "x": 0,
+ "y": 34
+ },
+ "id": 52,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "right",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "sum by (path) (increase(rpc_count[$time]))",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "interval": "",
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "RPC Total Num",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "description": "This metric represents the number of RPC calls within the time frame, aggregated by name.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "fieldMinMax": false,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 13,
+ "w": 12,
+ "x": 0,
+ "y": 43
+ },
+ "id": 30,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "sum by (name) (increase(rpc_count[$time]))",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "interval": "",
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "RPC Num by Name",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "description": "This metric represents the number of call of RPC within the time frame, aggregated by address.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "fieldMinMax": false,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 13,
+ "w": 12,
+ "x": 12,
+ "y": 43
+ },
+ "id": 32,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "sum by (instance) (increase(rpc_count[$time]))",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "interval": "",
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "RPC Num by Address",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "description": "This metric represents the error return of RPC within the time frame within the time frame.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "fieldMinMax": false,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 10,
+ "w": 12,
+ "x": 0,
+ "y": 56
+ },
+ "id": 54,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "right",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "sum by (path) (increase(rpc_count{code!=\"0\"}[$time]))",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "interval": "",
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "RPC Error Num",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "description": "This metric represents the error return of RPC with code within the time frame within the time frame.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "fieldMinMax": false,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 10,
+ "w": 12,
+ "x": 12,
+ "y": 56
+ },
+ "id": 53,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "right",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "sum by (path, code) (increase(rpc_count{code!=\"0\"}[$time]))",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "interval": "",
+ "legendFormat": "{{path}}: code={{code}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "RPC Error Num With Code",
+ "type": "timeseries"
+ }
+ ],
+ "title": "RPC",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 3
+ },
+ "id": 25,
+ "panels": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "description": "This metric represents the number of HTTP requests.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "fieldMinMax": false,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 11,
+ "w": 12,
+ "x": 0,
+ "y": 15
+ },
+ "id": 27,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "right",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "sum by (method, path) (http_count)",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "interval": "",
+ "legendFormat": "{{method}}: {{path}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "HTTP Total Count",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "description": "This metric represents the number of HTTP requests with status.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "fieldMinMax": false,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 11,
+ "w": 12,
+ "x": 12,
+ "y": 15
+ },
+ "id": 26,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "right",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "sum by (method, path, status) (http_count)",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "interval": "",
+ "legendFormat": "{{method}}: {{path}}: {{status}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "HTTP Total Count With Status",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "description": "This metric represents the number of HTTP requests within the time frame.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "fieldMinMax": false,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 11,
+ "w": 12,
+ "x": 0,
+ "y": 26
+ },
+ "id": 55,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "right",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "sum by (method, path) (increase(http_count[$time]))",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "interval": "",
+ "legendFormat": "{{method}}: {{path}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "HTTP Total Num",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "description": "This metric represents the number of HTTP requests with status within the time frame.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "fieldMinMax": false,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 11,
+ "w": 12,
+ "x": 12,
+ "y": 26
+ },
+ "id": 56,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "right",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "sum by (method, path, status) (increase(http_count[$time]))",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "interval": "",
+ "legendFormat": "{{method}}: {{path}}: {{status}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "HTTP Total Num With Status",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "description": "This metric represents the qps of HTTP.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "fieldMinMax": false,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "reqps"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Value"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#1ed9d4",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 24,
+ "x": 0,
+ "y": 37
+ },
+ "id": 57,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "sum(rate(http_count[1m]))",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "interval": "",
+ "legendFormat": "qps",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "HTTP QPS",
+ "type": "timeseries"
+ }
+ ],
+ "title": "HTTP",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 4
+ },
+ "id": 6,
+ "panels": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "description": "This metric represents the proportion of CPU runtime within 1 second. It is calculated as the average CPU runtime over 1 minute.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "fieldMinMax": false,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "percent"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 11,
+ "w": 12,
+ "x": 0,
+ "y": 16
+ },
+ "id": 5,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "10.3.7",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "label_replace(\r\n rate(process_cpu_seconds_total{job=~\"$rpcNameFilter\"}[1m])*100,\r\n \"job\",\r\n \"$1\",\r\n \"job\",\r\n \".*openim-(.*)\"\r\n)",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "interval": "",
+ "legendFormat": "{{job}}: {{instance}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "CPU Usage Percentage",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "description": "This metric represents the proportion of CPU runtime within 1 second. It is calculated as the average CPU runtime over 1 minute.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "fieldMinMax": false,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "percent"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 11,
+ "w": 12,
+ "x": 12,
+ "y": 16
+ },
+ "id": 4,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "10.3.7",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "label_replace(\r\n rate(process_cpu_seconds_total{job!~\"$rpcNameFilter\"}[1m])*100,\r\n \"job\",\r\n \"$1\",\r\n \"job\",\r\n \".*openim-(.*)\"\r\n)",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "interval": "",
+ "legendFormat": "{{job}}: {{instance}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "CPU Usage Percentage",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "description": "This metric represents the number of open file descriptors.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "fieldMinMax": false,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 11,
+ "w": 12,
+ "x": 0,
+ "y": 27
+ },
+ "id": 7,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "10.3.7",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "label_replace(\r\n process_open_fds{job=~\"$rpcNameFilter\"},\r\n \"job\",\r\n \"$1\",\r\n \"job\",\r\n \".*openim-(.*)\"\r\n)",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "interval": "",
+ "legendFormat": "{{job}}: {{instance}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Open File Descriptors",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "description": "This metric represents the number of open file descriptors.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "fieldMinMax": false,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 11,
+ "w": 12,
+ "x": 12,
+ "y": 27
+ },
+ "id": 8,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "10.3.7",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "label_replace(\r\n process_open_fds{job!~\"$rpcNameFilter\"},\r\n \"job\",\r\n \"$1\",\r\n \"job\",\r\n \".*openim-(.*)\"\r\n)",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "interval": "",
+ "legendFormat": "{{job}}: {{instance}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Open File Descriptors",
+ "type": "timeseries"
+ },
+ {
+ "gridPos": {
+ "h": 11,
+ "w": 12,
+ "x": 0,
+ "y": 38
+ },
+ "id": 9,
+ "libraryPanel": {
+ "name": "Virtual Memory bytes",
+ "uid": "fdriqgnk5lnnke"
+ },
+ "title": "Virtual Memory bytes"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "description": "This metric represents the number of process virtual memory bytes.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "fieldMinMax": false,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 11,
+ "w": 12,
+ "x": 12,
+ "y": 38
+ },
+ "id": 10,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "10.3.7",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "label_replace(\r\n process_virtual_memory_bytes{job!~\"$rpcNameFilter\"},\r\n \"job\",\r\n \"$1\",\r\n \"job\",\r\n \".*openim-(.*)\"\r\n)",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "interval": "",
+ "legendFormat": "{{job}}: {{instance}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Virtual Memory bytes",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "description": "This metric represents the number of process resident memory bytes.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "fieldMinMax": false,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 11,
+ "w": 12,
+ "x": 0,
+ "y": 49
+ },
+ "id": 11,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "10.3.7",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "label_replace(\r\n process_resident_memory_bytes{job=~\"$rpcNameFilter\"},\r\n \"job\",\r\n \"$1\",\r\n \"job\",\r\n \".*openim-(.*)\"\r\n)",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "interval": "",
+ "legendFormat": "{{job}}: {{instance}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Resident Memory bytes",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "description": "This metric represents the number of process resident memory bytes.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "fieldMinMax": false,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 11,
+ "w": 12,
+ "x": 12,
+ "y": 49
+ },
+ "id": 12,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "10.3.7",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "label_replace(\r\n process_resident_memory_bytes{job!~\"$rpcNameFilter\"},\r\n \"job\",\r\n \"$1\",\r\n \"job\",\r\n \".*openim-(.*)\"\r\n)",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "interval": "",
+ "legendFormat": "{{job}}: {{instance}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Resident Memory bytes",
+ "type": "timeseries"
+ }
+ ],
+ "title": "Process",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 5
+ },
+ "id": 3,
+ "panels": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "description": "Measures the frequency of garbage collection operations in the Go environment, averaged over the last five minutes.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "fieldMinMax": false,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 11,
+ "w": 12,
+ "x": 0,
+ "y": 6
+ },
+ "id": 58,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "label_replace(\r\n rate(go_gc_duration_seconds_count{job=~\"$rpcNameFilter\"}[5m]),\r\n \"job\",\r\n \"$1\",\r\n \"job\",\r\n \".*openim-(.*)\"\r\n)",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "interval": "",
+ "legendFormat": "$legendName",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "GC Rate Per Second",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "description": "Measures the frequency of garbage collection operations in the Go environment, averaged over the last five minutes.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 11,
+ "w": 12,
+ "x": 12,
+ "y": 6
+ },
+ "id": 2,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "editorMode": "code",
+ "expr": "label_replace(\r\n rate(go_gc_duration_seconds_count{job!~\"$rpcNameFilter\"}[5m]),\r\n \"job\",\r\n \"$1\",\r\n \"job\",\r\n \".*openim-(.*)\"\r\n)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "$legendName",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "GC Rate Per Second",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "description": "This metric represents the number of goroutines.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "fieldMinMax": false,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 11,
+ "w": 12,
+ "x": 0,
+ "y": 17
+ },
+ "id": 13,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "label_replace(\r\n go_goroutines{job=~\"$rpcNameFilter\"},\r\n \"job\",\r\n \"$1\",\r\n \"job\",\r\n \".*openim-(.*)\"\r\n)",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "interval": "",
+ "legendFormat": "$legendName",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Goroutines",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "description": "This metric represents the number of goroutines.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "fieldMinMax": false,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 11,
+ "w": 12,
+ "x": 12,
+ "y": 17
+ },
+ "id": 14,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "label_replace(\r\n go_goroutines{job!~\"$rpcNameFilter\"},\r\n \"job\",\r\n \"$1\",\r\n \"job\",\r\n \".*openim-(.*)\"\r\n)",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "interval": "",
+ "legendFormat": "$legendName",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Goroutines",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "description": "This metric represents the number of bytes allocated and still in use.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "fieldMinMax": false,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 11,
+ "w": 12,
+ "x": 0,
+ "y": 28
+ },
+ "id": 15,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "label_replace(\r\n go_memstats_alloc_bytes{job=~\"$rpcNameFilter\"},\r\n \"job\",\r\n \"$1\",\r\n \"job\",\r\n \".*openim-(.*)\"\r\n)",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "interval": "",
+ "legendFormat": "$legendName",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Go Alloc Bytes ",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "description": "This metric represents the number of bytes allocated and still in use.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "fieldMinMax": false,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 11,
+ "w": 12,
+ "x": 12,
+ "y": 28
+ },
+ "id": 16,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "label_replace(\r\n go_memstats_alloc_bytes{job!~\"$rpcNameFilter\"},\r\n \"job\",\r\n \"$1\",\r\n \"job\",\r\n \".*openim-(.*)\"\r\n)",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "interval": "",
+ "legendFormat": "$legendName",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Go Alloc Bytes ",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "description": "This metric represents the number of bytes used by the profiling bucket hash table.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "fieldMinMax": false,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 11,
+ "w": 12,
+ "x": 0,
+ "y": 39
+ },
+ "id": 17,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "label_replace(\r\n go_memstats_buck_hash_sys_bytes{job=~\"$rpcNameFilter\"},\r\n \"job\",\r\n \"$1\",\r\n \"job\",\r\n \".*openim-(.*)\"\r\n)",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "interval": "",
+ "legendFormat": "$legendName",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Go Buck Hash Sys Bytes ",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "description": "This metric represents the number of bytes used by the profiling bucket hash table.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "fieldMinMax": false,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 11,
+ "w": 12,
+ "x": 12,
+ "y": 39
+ },
+ "id": 18,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "label_replace(\r\n go_memstats_buck_hash_sys_bytes{job!~\"$rpcNameFilter\"},\r\n \"job\",\r\n \"$1\",\r\n \"job\",\r\n \".*openim-(.*)\"\r\n)",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "interval": "",
+ "legendFormat": "$legendName",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Go Buck Hash Sys Bytes ",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "description": "This metric represents the number of bytes in use by mcache structures.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "fieldMinMax": false,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 11,
+ "w": 12,
+ "x": 0,
+ "y": 50
+ },
+ "id": 19,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "label_replace(\r\n go_memstats_mcache_inuse_bytes{job=~\"$rpcNameFilter\"},\r\n \"job\",\r\n \"$1\",\r\n \"job\",\r\n \".*openim-(.*)\"\r\n)",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "interval": "",
+ "legendFormat": "$legendName",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Go Mcache Bytes",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "description": "This metric represents the number of bytes in use by mcache structures.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "fieldMinMax": false,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 11,
+ "w": 12,
+ "x": 12,
+ "y": 50
+ },
+ "id": 20,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "label_replace(\r\n go_memstats_mcache_inuse_bytes{job!~\"$rpcNameFilter\"},\r\n \"job\",\r\n \"$1\",\r\n \"job\",\r\n \".*openim-(.*)\"\r\n)",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "interval": "",
+ "legendFormat": "$legendName",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Go Mcache Bytes",
+ "type": "timeseries"
+ }
+ ],
+ "title": "GO infomation",
+ "type": "row"
+ }
+ ],
+ "refresh": "5s",
+ "schemaVersion": 39,
+ "tags": [],
+ "templating": {
+ "list": [
+ {
+ "current": {
+ "selected": false,
+ "text": "openimserver-openim-rpc.*",
+ "value": "openimserver-openim-rpc.*"
+ },
+ "hide": 0,
+ "includeAll": false,
+ "label": "filter",
+ "multi": false,
+ "name": "rpcNameFilter",
+ "options": [
+ {
+ "selected": true,
+ "text": "openimserver-openim-rpc.*",
+ "value": "openimserver-openim-rpc.*"
+ }
+ ],
+ "query": "openimserver-openim-rpc.*",
+ "queryValue": "",
+ "skipUrlSync": false,
+ "type": "custom"
+ },
+ {
+ "current": {
+ "selected": false,
+ "text": "{{job}}: {{instance}}",
+ "value": "{{job}}: {{instance}}"
+ },
+ "description": "common legend name",
+ "hide": 0,
+ "includeAll": false,
+ "label": "legend",
+ "multi": false,
+ "name": "legendName",
+ "options": [
+ {
+ "selected": true,
+ "text": "{{job}}: {{instance}}",
+ "value": "{{job}}: {{instance}}"
+ }
+ ],
+ "query": "{{job}}: {{instance}}",
+ "queryValue": "",
+ "skipUrlSync": false,
+ "type": "custom"
+ },
+ {
+ "current": {
+ "selected": true,
+ "text": "1h",
+ "value": "1h"
+ },
+ "description": "Global promQL time range.",
+ "hide": 0,
+ "includeAll": false,
+ "label": "time",
+ "multi": false,
+ "name": "time",
+ "options": [
+ {
+ "selected": false,
+ "text": "1m",
+ "value": "1m"
+ },
+ {
+ "selected": false,
+ "text": "5m",
+ "value": "5m"
+ },
+ {
+ "selected": false,
+ "text": "30m",
+ "value": "30m"
+ },
+ {
+ "selected": true,
+ "text": "1h",
+ "value": "1h"
+ },
+ {
+ "selected": false,
+ "text": "3h",
+ "value": "3h"
+ },
+ {
+ "selected": false,
+ "text": "6h",
+ "value": "6h"
+ },
+ {
+ "selected": false,
+ "text": "12h",
+ "value": "12h"
+ },
+ {
+ "selected": false,
+ "text": "24h",
+ "value": "24h"
+ },
+ {
+ "selected": false,
+ "text": "1w",
+ "value": "1w"
+ },
+ {
+ "selected": false,
+ "text": "4w",
+ "value": "4w"
+ },
+ {
+ "selected": false,
+ "text": "12w",
+ "value": "12w"
+ },
+ {
+ "selected": false,
+ "text": "24w",
+ "value": "24w"
+ },
+ {
+ "selected": false,
+ "text": "1y",
+ "value": "1y"
+ },
+ {
+ "selected": false,
+ "text": "2y",
+ "value": "2y"
+ },
+ {
+ "selected": false,
+ "text": "4y",
+ "value": "4y"
+ },
+ {
+ "selected": false,
+ "text": "10y",
+ "value": "10y"
+ }
+ ],
+ "query": "1m,5m,30m,1h,3h,6h,12h,24h,1w,4w,12w,24w,1y,2y,4y,10y",
+ "queryValue": "",
+ "skipUrlSync": false,
+ "type": "custom"
+ }
+ ]
+ },
+ "time": {
+ "from": "now-15m",
+ "to": "now"
+ },
+ "timeRangeUpdatedDuringEditOrView": false,
+ "timepicker": {},
+ "timezone": "",
+ "title": "Demo",
+ "uid": "a506d250-b606-4702-86a7-ac6aa1d069a1",
+ "version": 22,
+ "weekStart": ""
+}
\ No newline at end of file
diff --git a/config/instance-down-rules.yml b/config/instance-down-rules.yml
new file mode 100644
index 000000000..bcac7ba60
--- /dev/null
+++ b/config/instance-down-rules.yml
@@ -0,0 +1,44 @@
+groups:
+ - name: instance_down
+ rules:
+ - alert: InstanceDown
+ expr: up == 0
+ for: 1m
+ labels:
+ severity: critical
+ annotations:
+ summary: "Instance {{ $labels.instance }} down"
+ description: "{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 1 minutes."
+
+ - name: database_insert_failure_alerts
+ rules:
+ - alert: DatabaseInsertFailed
+ expr: (increase(msg_insert_redis_failed_total[5m]) > 0) or (increase(msg_insert_mongo_failed_total[5m]) > 0)
+ for: 1m
+ labels:
+ severity: critical
+ annotations:
+ summary: "Increase in MsgInsertRedisFailedCounter or MsgInsertMongoFailedCounter detected"
+ description: "Either MsgInsertRedisFailedCounter or MsgInsertMongoFailedCounter has increased in the last 5 minutes, indicating failures in message insert operations to Redis or MongoDB,maybe the redis or mongodb is crash."
+
+ - name: registrations_few
+ rules:
+ - alert: RegistrationsFew
+ expr: increase(user_login_total[1h]) == 0
+ for: 1m
+ labels:
+ severity: info
+ annotations:
+ summary: "Too few registrations within the time frame"
+ description: "The number of registrations in the last hour is 0. There might be some issues."
+
+ - name: messages_few
+ rules:
+ - alert: MessagesFew
+ expr: (increase(single_chat_msg_process_success_total[1h])+increase(group_chat_msg_process_success_total[1h])) == 0
+ for: 1m
+ labels:
+ severity: info
+ annotations:
+ summary: "Too few messages within the time frame"
+ description: "The number of messages sent in the last hour is 0. There might be some issues."
diff --git a/config/log.yml b/config/log.yml
index 2194d8917..8620af611 100644
--- a/config/log.yml
+++ b/config/log.yml
@@ -10,4 +10,5 @@ remainLogLevel: 6
isStdout: false
# Whether to log in JSON format, default is acceptable
isJson: false
-
+# output simplify log when KeyAndValues's value len is bigger than 50 in rpc method log
+isSimplify: true
\ No newline at end of file
diff --git a/config/openim-crontask.yml b/config/openim-crontask.yml
index 9bbccfd25..3839104a4 100644
--- a/config/openim-crontask.yml
+++ b/config/openim-crontask.yml
@@ -1,2 +1,3 @@
-chatRecordsClearTime: "0 2 * * *"
+cronExecuteTime: "0 2 * * *"
retainChatRecords: 365
+fileExpireTime: 90
diff --git a/config/openim-push.yml b/config/openim-push.yml
index a1abfcf88..9384008a0 100644
--- a/config/openim-push.yml
+++ b/config/openim-push.yml
@@ -23,7 +23,9 @@ geTui:
channelID: ''
channelName: ''
fcm:
- serviceAccount: "x.json"
+ # Prioritize using file paths. If the file path is empty, use URL
+ filePath: "" # File path is concatenated with the parameters passed in through - c(`mage` default pass in `config/`) and filePath.
+ authURL: "" # Must start with https or http.
jpns:
appKey: ''
masterSecret: ''
diff --git a/config/openim-rpc-third.yml b/config/openim-rpc-third.yml
index bde38ccc4..6fb60f47f 100644
--- a/config/openim-rpc-third.yml
+++ b/config/openim-rpc-third.yml
@@ -29,4 +29,12 @@ object:
accessKeyID: ''
accessKeySecret: ''
sessionToken: ''
+ publicRead: false
+ kodo:
+ endpoint: "http://s3.cn-south-1.qiniucs.com"
+ bucket: "kodo-bucket-test"
+ bucketURL: "http://kodo-bucket-test-oetobfb.qiniudns.com"
+ accessKeyID: ''
+ accessKeySecret: ''
+ sessionToken: ''
publicRead: false
\ No newline at end of file
diff --git a/config/prometheus.yml b/config/prometheus.yml
new file mode 100644
index 000000000..5db41679f
--- /dev/null
+++ b/config/prometheus.yml
@@ -0,0 +1,83 @@
+# my global config
+global:
+ scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
+ evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
+ # scrape_timeout is set to the global default (10s).
+
+# Alertmanager configuration
+alerting:
+ alertmanagers:
+ - static_configs:
+ - targets: ['internal_ip:19093']
+
+# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
+rule_files:
+ - "instance-down-rules.yml"
+# - "first_rules.yml"
+# - "second_rules.yml"
+
+# A scrape configuration containing exactly one endpoint to scrape:
+# Here it's Prometheus itself.
+scrape_configs:
+ # The job name is added as a label "job='job_name'"" to any timeseries scraped from this config.
+ # Monitored information captured by prometheus
+
+ # prometheus fetches application services
+ - job_name: 'node_exporter'
+ static_configs:
+ - targets: [ 'internal_ip:20114' ]
+ - job_name: 'openimserver-openim-api'
+ static_configs:
+ - targets: [ 'internal_ip:20113' ]
+ labels:
+ namespace: 'default'
+ - job_name: 'openimserver-openim-msggateway'
+ static_configs:
+ - targets: [ 'internal_ip:20112' ]
+ labels:
+ namespace: 'default'
+ - job_name: 'openimserver-openim-msgtransfer'
+ static_configs:
+ - targets: [ 'internal_ip:20111', 'internal_ip:20110', 'internal_ip:20109', 'internal_ip:20108' ]
+ labels:
+ namespace: 'default'
+ - job_name: 'openimserver-openim-push'
+ static_configs:
+ - targets: [ 'internal_ip:20107' ]
+ labels:
+ namespace: 'default'
+ - job_name: 'openimserver-openim-rpc-auth'
+ static_configs:
+ - targets: [ 'internal_ip:20106' ]
+ labels:
+ namespace: 'default'
+ - job_name: 'openimserver-openim-rpc-conversation'
+ static_configs:
+ - targets: [ 'internal_ip:20105' ]
+ labels:
+ namespace: 'default'
+ - job_name: 'openimserver-openim-rpc-friend'
+ static_configs:
+ - targets: [ 'internal_ip:20104' ]
+ labels:
+ namespace: 'default'
+ - job_name: 'openimserver-openim-rpc-group'
+ static_configs:
+ - targets: [ 'internal_ip:20103' ]
+ labels:
+ namespace: 'default'
+ - job_name: 'openimserver-openim-rpc-msg'
+ static_configs:
+ - targets: [ 'internal_ip:20102' ]
+ labels:
+ namespace: 'default'
+ - job_name: 'openimserver-openim-rpc-third'
+ static_configs:
+ - targets: [ 'internal_ip:20101' ]
+ labels:
+ namespace: 'default'
+ - job_name: 'openimserver-openim-rpc-user'
+ static_configs:
+ - targets: [ 'internal_ip:20100' ]
+ labels:
+ namespace: 'default'
\ No newline at end of file
diff --git a/config/redis.yml b/config/redis.yml
index 6fe0dd02d..87abed0e1 100644
--- a/config/redis.yml
+++ b/config/redis.yml
@@ -3,4 +3,4 @@ username: ''
password: openIM123
clusterMode: false
db: 0
-maxRetry: 10
\ No newline at end of file
+maxRetry: 10
diff --git a/config/webhooks.yml b/config/webhooks.yml
index c7839d4f2..11a85ba0c 100644
--- a/config/webhooks.yml
+++ b/config/webhooks.yml
@@ -13,6 +13,9 @@ afterUpdateUserInfoEx:
afterSendSingleMsg:
enable: false
timeout: 5
+ # Only the senID/recvID specified in attentionIds will send the callback
+ # if not set, all user messages will be callback
+ attentionIds: []
beforeSendGroupMsg:
enable: false
timeout: 5
diff --git a/docker-compose.yml b/docker-compose.yml
index d72c1a2fa..8cc1f24b2 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -140,5 +140,50 @@ services:
networks:
- openim
+ prometheus:
+ image: ${PROMETHEUS_IMAGE}
+ container_name: prometheus
+ restart: always
+ volumes:
+ - ./config/prometheus.yml:/etc/prometheus/prometheus.yml
+ - ./config/instance-down-rules.yml:/etc/prometheus/instance-down-rules.yml
+ - ${DATA_DIR}/components/prometheus/data:/prometheus
+ command:
+ - '--config.file=/etc/prometheus/prometheus.yml'
+ - '--storage.tsdb.path=/prometheus'
+ ports:
+ - "19091:9090"
+ networks:
+ - openim
+
+ alertmanager:
+ image: ${ALERTMANAGER_IMAGE}
+ container_name: alertmanager
+ restart: always
+ volumes:
+ - ./config/alertmanager.yml:/etc/alertmanager/alertmanager.yml
+ - ./config/email.tmpl:/etc/alertmanager/email.tmpl
+ ports:
+ - "19093:9093"
+ networks:
+ - openim
+
+ grafana:
+ image: ${GRAFANA_IMAGE}
+ container_name: grafana
+ user: root
+ restart: always
+ environment:
+ - GF_SECURITY_ALLOW_EMBEDDING=true
+ - GF_SESSION_COOKIE_SAMESITE=none
+ - GF_SESSION_COOKIE_SECURE=true
+ - GF_AUTH_ANONYMOUS_ENABLED=true
+ - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
+ ports:
+ - "13000:3000"
+ volumes:
+ - ${DATA_DIR:-./}/components/grafana:/var/lib/grafana
+ networks:
+ - openim
diff --git a/docs/contrib/util-scripts.md b/docs/contrib/util-scripts.md
index 0bf6f23e5..30da871a4 100644
--- a/docs/contrib/util-scripts.md
+++ b/docs/contrib/util-scripts.md
@@ -32,7 +32,7 @@ This script offers a variety of utilities and helpers to enhance and simplify op
## brief descriptions of each function
-**Englist:**
+**English:**
1. `openim::util::ensure-gnu-sed` - Determines if GNU version of `sed` exists on the system and sets its name.
2. `openim::util::ensure-gnu-date` - Determines if GNU version of `date` exists on the system and sets its name.
3. `openim::util::check-file-in-alphabetical-order` - Checks if a file is sorted in alphabetical order.
diff --git a/docs/contributing/CONTRIBUTING-JP.md b/docs/contributing/CONTRIBUTING-JP.md
index 86bbfefcd..1798d4e3d 100644
--- a/docs/contributing/CONTRIBUTING-JP.md
+++ b/docs/contributing/CONTRIBUTING-JP.md
@@ -1,7 +1,7 @@
# How do I contribute code to OpenIM
- Englist ·
+ English ·
中文 ·
Українська ·
Česky ·
diff --git a/docs/contributing/CONTRIBUTING-PL.md b/docs/contributing/CONTRIBUTING-PL.md
index 86bbfefcd..1798d4e3d 100644
--- a/docs/contributing/CONTRIBUTING-PL.md
+++ b/docs/contributing/CONTRIBUTING-PL.md
@@ -1,7 +1,7 @@
# How do I contribute code to OpenIM
- Englist ·
+ English ·
中文 ·
Українська ·
Česky ·
diff --git a/docs/readme/README_cs.md b/docs/readme/README_cs.md
index 5a9eeb232..63f730a51 100644
--- a/docs/readme/README_cs.md
+++ b/docs/readme/README_cs.md
@@ -19,7 +19,7 @@
- Englist ·
+ English ·
中文 ·
Українська ·
Česky ·
diff --git a/docs/readme/README_da.md b/docs/readme/README_da.md
index 1b776ddb8..60d97348a 100644
--- a/docs/readme/README_da.md
+++ b/docs/readme/README_da.md
@@ -19,7 +19,7 @@
- Englist ·
+ English ·
中文 ·
Українська ·
Česky ·
diff --git a/docs/readme/README_el.md b/docs/readme/README_el.md
index 252521f35..da01fcb47 100644
--- a/docs/readme/README_el.md
+++ b/docs/readme/README_el.md
@@ -19,7 +19,7 @@
- Englist ·
+ English ·
中文 ·
Українська ·
Česky ·
diff --git a/docs/readme/README_es.md b/docs/readme/README_es.md
index cd1b7290e..f123b85c3 100644
--- a/docs/readme/README_es.md
+++ b/docs/readme/README_es.md
@@ -19,7 +19,7 @@
- Englist ·
+ English ·
中文 ·
Українська ·
Česky ·
diff --git a/docs/readme/README_fa.md b/docs/readme/README_fa.md
index 49f05cd4c..7a1512e84 100644
--- a/docs/readme/README_fa.md
+++ b/docs/readme/README_fa.md
@@ -19,7 +19,7 @@
- Englist ·
+ English ·
中文 ·
Українська ·
Česky ·
diff --git a/docs/readme/README_fr.md b/docs/readme/README_fr.md
index e707fc59b..aaf7a9bd4 100644
--- a/docs/readme/README_fr.md
+++ b/docs/readme/README_fr.md
@@ -19,7 +19,7 @@
- Englist ·
+ English ·
中文 ·
Українська ·
Česky ·
diff --git a/docs/readme/README_hu.md b/docs/readme/README_hu.md
index 57f006692..61013c334 100644
--- a/docs/readme/README_hu.md
+++ b/docs/readme/README_hu.md
@@ -19,7 +19,7 @@
- Englist ·
+ English ·
中文 ·
Українська ·
Česky ·
diff --git a/docs/readme/README_ja.md b/docs/readme/README_ja.md
index bd94b1153..5a083c1bf 100644
--- a/docs/readme/README_ja.md
+++ b/docs/readme/README_ja.md
@@ -19,7 +19,7 @@
- Englist ·
+ English ·
中文 ·
Українська ·
Česky ·
diff --git a/docs/readme/README_ko.md b/docs/readme/README_ko.md
index bd7a1aed3..ebcdd71ee 100644
--- a/docs/readme/README_ko.md
+++ b/docs/readme/README_ko.md
@@ -19,7 +19,7 @@
- Englist ·
+ English ·
中文 ·
Українська ·
Česky ·
diff --git a/docs/readme/README_tr.md b/docs/readme/README_tr.md
index ca2a816db..3cf19f537 100644
--- a/docs/readme/README_tr.md
+++ b/docs/readme/README_tr.md
@@ -19,7 +19,7 @@
- Englist ·
+ English ·
中文 ·
Українська ·
Česky ·
diff --git a/docs/readme/README_uk.md b/docs/readme/README_uk.md
index 30bc76730..81820590b 100644
--- a/docs/readme/README_uk.md
+++ b/docs/readme/README_uk.md
@@ -19,7 +19,7 @@
- Englist ·
+ English ·
中文 ·
Українська ·
Česky ·
diff --git a/docs/readme/README_vi.md b/docs/readme/README_vi.md
index e500da6d2..a6ab39253 100644
--- a/docs/readme/README_vi.md
+++ b/docs/readme/README_vi.md
@@ -19,7 +19,7 @@
- Englist ·
+ English ·
中文 ·
Українська ·
Česky ·
diff --git a/go.mod b/go.mod
index e34e3e4bd..71301d290 100644
--- a/go.mod
+++ b/go.mod
@@ -11,10 +11,9 @@ require (
github.com/golang-jwt/jwt/v4 v4.5.0
github.com/gorilla/websocket v1.5.1
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
- github.com/lestrrat-go/file-rotatelogs v2.4.0+incompatible // indirect
github.com/mitchellh/mapstructure v1.5.0
- github.com/openimsdk/protocol v0.0.65
- github.com/openimsdk/tools v0.0.49-alpha.19
+ github.com/openimsdk/protocol v0.0.69-alpha.42
+ github.com/openimsdk/tools v0.0.49-alpha.55
github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_golang v1.18.0
github.com/stretchr/testify v1.9.0
@@ -35,7 +34,7 @@ require (
github.com/hashicorp/golang-lru/v2 v2.0.7
github.com/kelindar/bitmap v1.5.2
github.com/likexian/gokit v0.25.13
- github.com/openimsdk/gomake v0.0.13
+ github.com/openimsdk/gomake v0.0.14-alpha.5
github.com/redis/go-redis/v9 v9.4.0
github.com/robfig/cron/v3 v3.0.1
github.com/shirou/gopsutil v3.21.11+incompatible
@@ -54,6 +53,24 @@ require (
cloud.google.com/go/longrunning v0.5.4 // indirect
cloud.google.com/go/storage v1.36.0 // indirect
github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible // indirect
+ github.com/aws/aws-sdk-go-v2 v1.23.1 // indirect
+ github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.1 // indirect
+ github.com/aws/aws-sdk-go-v2/config v1.25.4 // indirect
+ github.com/aws/aws-sdk-go-v2/credentials v1.16.3 // indirect
+ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.5 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.4 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.4 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.4 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.1 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.4 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.4 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.4 // indirect
+ github.com/aws/aws-sdk-go-v2/service/s3 v1.43.1 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sso v1.17.3 // indirect
+ github.com/aws/aws-sdk-go-v2/service/ssooidc v1.20.1 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sts v1.25.4 // indirect
+ github.com/aws/smithy-go v1.17.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bytedance/sonic v1.9.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
@@ -101,6 +118,7 @@ require (
github.com/klauspost/compress v1.17.7 // indirect
github.com/klauspost/cpuid/v2 v2.2.6 // indirect
github.com/leodido/go-urn v1.4.0 // indirect
+ github.com/lestrrat-go/strftime v1.0.6 // indirect
github.com/lithammer/shortuuid v3.0.0+incompatible // indirect
github.com/magefile/mage v1.15.0 // indirect
github.com/magiconair/properties v1.8.7 // indirect
@@ -119,6 +137,7 @@ require (
github.com/prometheus/client_model v0.5.0 // indirect
github.com/prometheus/common v0.45.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
+ github.com/qiniu/go-sdk/v7 v7.18.2 // indirect
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
github.com/rs/xid v1.5.0 // indirect
github.com/sagikazarmark/locafero v0.4.0 // indirect
@@ -168,7 +187,6 @@ require (
require (
github.com/go-playground/locales v0.14.1 // indirect
github.com/goccy/go-json v0.10.2 // indirect
- github.com/lestrrat-go/strftime v1.0.6 // indirect
github.com/mattn/go-isatty v0.0.19 // indirect
github.com/spf13/cobra v1.8.0
github.com/ugorji/go/codec v1.2.11 // indirect
diff --git a/go.sum b/go.sum
index b2fa7f318..53060b198 100644
--- a/go.sum
+++ b/go.sum
@@ -21,6 +21,42 @@ github.com/IBM/sarama v1.43.0/go.mod h1:zlE6HEbC/SMQ9mhEYaF7nNLYOUyrs0obySKCckWP
github.com/QcloudApi/qcloud_sign_golang v0.0.0-20141224014652-e4130a326409/go.mod h1:1pk82RBxDY/JZnPQrtqHlUFfCctgdorsd9M06fMynOM=
github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible h1:8psS8a+wKfiLt1iVDX79F7Y6wUM49Lcha2FMXt4UM8g=
github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8=
+github.com/aws/aws-sdk-go-v2 v1.23.1 h1:qXaFsOOMA+HsZtX8WoCa+gJnbyW7qyFFBlPqvTSzbaI=
+github.com/aws/aws-sdk-go-v2 v1.23.1/go.mod h1:i1XDttT4rnf6vxc9AuskLc6s7XBee8rlLilKlc03uAA=
+github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.1 h1:ZY3108YtBNq96jNZTICHxN1gSBSbnvIdYwwqnvCV4Mc=
+github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.1/go.mod h1:t8PYl/6LzdAqsU4/9tz28V/kU+asFePvpOMkdul0gEQ=
+github.com/aws/aws-sdk-go-v2/config v1.25.4 h1:r+X1x8QI6FEPdJDWCNBDZHyAcyFwSjHN8q8uuus+Axs=
+github.com/aws/aws-sdk-go-v2/config v1.25.4/go.mod h1:8GTjImECskr7D88P/Nn9uM4M4rLY9i77hLJZgkZEWV8=
+github.com/aws/aws-sdk-go-v2/credentials v1.16.3 h1:8PeI2krzzjDJ5etmgaMiD1JswsrLrWvKKu/uBUtNy1g=
+github.com/aws/aws-sdk-go-v2/credentials v1.16.3/go.mod h1:Kdh/okh+//vQ/AjEt81CjvkTo64+/zIE4OewP7RpfXk=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.5 h1:KehRNiVzIfAcj6gw98zotVbb/K67taJE0fkfgM6vzqU=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.5/go.mod h1:VhnExhw6uXy9QzetvpXDolo1/hjhx4u9qukBGkuUwjs=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.4 h1:LAm3Ycm9HJfbSCd5I+wqC2S9Ej7FPrgr5CQoOljJZcE=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.4/go.mod h1:xEhvbJcyUf/31yfGSQBe01fukXwXJ0gxDp7rLfymWE0=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.4 h1:4GV0kKZzUxiWxSVpn/9gwR0g21NF1Jsyduzo9rHgC/Q=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.4/go.mod h1:dYvTNAggxDZy6y1AF7YDwXsPuHFy/VNEpEI/2dWK9IU=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1 h1:uR9lXYjdPX0xY+NhvaJ4dD8rpSRz5VY81ccIIoNG+lw=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY=
+github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.4 h1:40Q4X5ebZruRtknEZH/bg91sT5pR853F7/1X9QRbI54=
+github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.4/go.mod h1:u77N7eEECzUv7F0xl2gcfK/vzc8wcjWobpy+DcrLJ5E=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.1 h1:rpkF4n0CyFcrJUG/rNNohoTmhtWlFTRI4BsZOh9PvLs=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.1/go.mod h1:l9ymW25HOqymeU2m1gbUQ3rUIsTwKs8gYHXkqDQUhiI=
+github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.4 h1:6DRKQc+9cChgzL5gplRGusI5dBGeiEod4m/pmGbcX48=
+github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.4/go.mod h1:s8ORvrW4g4v7IvYKIAoBg17w3GQ+XuwXDXYrQ5SkzU0=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.4 h1:rdovz3rEu0vZKbzoMYPTehp0E8veoE9AyfzqCr5Eeao=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.4/go.mod h1:aYCGNjyUCUelhofxlZyj63srdxWUSsBSGg5l6MCuXuE=
+github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.4 h1:o3DcfCxGDIT20pTbVKVhp3vWXOj/VvgazNJvumWeYW0=
+github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.4/go.mod h1:Uy0KVOxuTK2ne+/PKQ+VvEeWmjMMksE17k/2RK/r5oM=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.43.1 h1:1w11lfXOa8HoHoSlNtt4mqv/N3HmDOa+OnUH3Y9DHm8=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.43.1/go.mod h1:dqJ5JBL0clzgHriH35Amx3LRFY6wNIPUX7QO/BerSBo=
+github.com/aws/aws-sdk-go-v2/service/sso v1.17.3 h1:CdsSOGlFF3Pn+koXOIpTtvX7st0IuGsZ8kJqcWMlX54=
+github.com/aws/aws-sdk-go-v2/service/sso v1.17.3/go.mod h1:oA6VjNsLll2eVuUoF2D+CMyORgNzPEW/3PyUdq6WQjI=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.20.1 h1:cbRqFTVnJV+KRpwFl76GJdIZJKKCdTPnjUZ7uWh3pIU=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.20.1/go.mod h1:hHL974p5auvXlZPIjJTblXJpbkfK4klBczlsEaMCGVY=
+github.com/aws/aws-sdk-go-v2/service/sts v1.25.4 h1:yEvZ4neOQ/KpUqyR+X0ycUTW/kVRNR4nDZ38wStHGAA=
+github.com/aws/aws-sdk-go-v2/service/sts v1.25.4/go.mod h1:feTnm2Tk/pJxdX+eooEsxvlvTWBvDm6CasRZ+JOs2IY=
+github.com/aws/smithy-go v1.17.0 h1:wWJD7LX6PBV6etBUwO0zElG0nWN9rUhp0WdYeHSHAaI=
+github.com/aws/smithy-go v1.17.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE=
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
@@ -49,6 +85,7 @@ github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee
github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI=
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
@@ -95,12 +132,18 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
+github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
+github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
+github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs=
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
+github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
+github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA=
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
+github.com/go-playground/validator/v10 v10.8.0/go.mod h1:9JhgTzTaE31GZDpH/HSvHiRJrJ3iKAgqqH0Bl/Ocjdk=
github.com/go-playground/validator/v10 v10.18.0 h1:BvolUXjp4zuvkZ5YN5t7ebzbhlUtPsPm2S9NAZ5nl9U=
github.com/go-playground/validator/v10 v10.18.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg=
@@ -216,10 +259,16 @@ github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa02
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc=
github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
github.com/lestrrat-go/envload v0.0.0-20180220234015-a3eb8ddeffcc h1:RKf14vYWi2ttpEmkA4aQ3j4u9dStX2t4M8UM6qqNsG8=
@@ -268,16 +317,17 @@ github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
github.com/onsi/gomega v1.25.0 h1:Vw7br2PCDYijJHSfBOWhov+8cAnUf8MfMaIOV323l6Y=
github.com/onsi/gomega v1.25.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM=
-github.com/openimsdk/gomake v0.0.13 h1:xLDe/moqgWpRoptHzI4packAWzs4C16b+sVY+txNJp0=
-github.com/openimsdk/gomake v0.0.13/go.mod h1:PndCozNc2IsQIciyn9mvEblYWZwJmAI+06z94EY+csI=
-github.com/openimsdk/protocol v0.0.65 h1:SPT9qyUsFRTTKSKb/FjpS+xr6sxz/Kbnu+su1bxYagc=
-github.com/openimsdk/protocol v0.0.65/go.mod h1:OZQA9FR55lseYoN2Ql1XAHYKHJGu7OMNkUbuekrKCM8=
-github.com/openimsdk/tools v0.0.49-alpha.19 h1:CbASL0yefRSVAmWPVeRnhF7wZKd6umLfz31CIhEgrBs=
-github.com/openimsdk/tools v0.0.49-alpha.19/go.mod h1:g7mkHXYUPi0/8aAX8VPMHpnb3hqdV69Jph+bXOGvvNM=
+github.com/openimsdk/gomake v0.0.14-alpha.5 h1:VY9c5x515lTfmdhhPjMvR3BBRrRquAUCFsz7t7vbv7Y=
+github.com/openimsdk/gomake v0.0.14-alpha.5/go.mod h1:PndCozNc2IsQIciyn9mvEblYWZwJmAI+06z94EY+csI=
+github.com/openimsdk/protocol v0.0.69-alpha.42 h1:Vwuru2NtyTHuqaM+1JGxcoGvP25QWjS92oI0zGJp+lM=
+github.com/openimsdk/protocol v0.0.69-alpha.42/go.mod h1:OZQA9FR55lseYoN2Ql1XAHYKHJGu7OMNkUbuekrKCM8=
+github.com/openimsdk/tools v0.0.49-alpha.55 h1:KPgC53oqiwZYssLKljhtXbWXifMlTj2SSQEusj4Uf4k=
+github.com/openimsdk/tools v0.0.49-alpha.55/go.mod h1:h1cYmfyaVtgFbKmb1Cfsl8XwUOMTt8ubVUQrdGtsUh4=
github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4=
github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
+github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
@@ -294,12 +344,18 @@ github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lne
github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY=
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
+github.com/qiniu/dyn v1.3.0/go.mod h1:E8oERcm8TtwJiZvkQPbcAh0RL8jO1G0VXJMW3FAWdkk=
+github.com/qiniu/go-sdk/v7 v7.18.2 h1:vk9eo5OO7aqgAOPF0Ytik/gt7CMKuNgzC/IPkhda6rk=
+github.com/qiniu/go-sdk/v7 v7.18.2/go.mod h1:nqoYCNo53ZlGA521RvRethvxUDvXKt4gtYXOwye868w=
+github.com/qiniu/x v1.10.5/go.mod h1:03Ni9tj+N2h2aKnAz+6N0Xfl8FwMEDRC2PAlxekASDs=
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/redis/go-redis/v9 v9.4.0 h1:Yzoz33UZw9I/mFhx4MNrB6Fk+XHO1VukNcCa1+lwyKk=
github.com/redis/go-redis/v9 v9.4.0/go.mod h1:hdY0cQFCN4fnSYT6TkisLufl/4W5UIXyv0b/CLO2V2M=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
+github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
+github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc=
@@ -332,6 +388,7 @@ github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
@@ -404,7 +461,9 @@ golang.org/x/arch v0.3.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
@@ -431,6 +490,7 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc=
@@ -453,20 +513,26 @@ golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
+golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
@@ -523,8 +589,10 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
@@ -533,6 +601,7 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gorm.io/gorm v1.25.8 h1:WAGEZ/aEcznN4D03laj8DKnehe1e9gYQAjW8xyPRdeo=
diff --git a/internal/api/conversation.go b/internal/api/conversation.go
index f273eaa4a..360313ea8 100644
--- a/internal/api/conversation.go
+++ b/internal/api/conversation.go
@@ -50,3 +50,15 @@ func (o *ConversationApi) SetConversations(c *gin.Context) {
func (o *ConversationApi) GetConversationOfflinePushUserIDs(c *gin.Context) {
a2r.Call(conversation.ConversationClient.GetConversationOfflinePushUserIDs, o.Client, c)
}
+
+func (o *ConversationApi) GetFullOwnerConversationIDs(c *gin.Context) {
+ a2r.Call(conversation.ConversationClient.GetFullOwnerConversationIDs, o.Client, c)
+}
+
+func (o *ConversationApi) GetIncrementalConversation(c *gin.Context) {
+ a2r.Call(conversation.ConversationClient.GetIncrementalConversation, o.Client, c)
+}
+
+func (o *ConversationApi) GetOwnerConversation(c *gin.Context) {
+ a2r.Call(conversation.ConversationClient.GetOwnerConversation, o.Client, c)
+}
diff --git a/internal/api/friend.go b/internal/api/friend.go
index 1fea38b31..f9f15fb24 100644
--- a/internal/api/friend.go
+++ b/internal/api/friend.go
@@ -16,8 +16,9 @@ package api
import (
"github.com/gin-gonic/gin"
+
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
- "github.com/openimsdk/protocol/friend"
+ "github.com/openimsdk/protocol/relation"
"github.com/openimsdk/tools/a2r"
)
@@ -28,68 +29,83 @@ func NewFriendApi(client rpcclient.Friend) FriendApi {
}
func (o *FriendApi) ApplyToAddFriend(c *gin.Context) {
- a2r.Call(friend.FriendClient.ApplyToAddFriend, o.Client, c)
+ a2r.Call(relation.FriendClient.ApplyToAddFriend, o.Client, c)
}
func (o *FriendApi) RespondFriendApply(c *gin.Context) {
- a2r.Call(friend.FriendClient.RespondFriendApply, o.Client, c)
+ a2r.Call(relation.FriendClient.RespondFriendApply, o.Client, c)
}
func (o *FriendApi) DeleteFriend(c *gin.Context) {
- a2r.Call(friend.FriendClient.DeleteFriend, o.Client, c)
+ a2r.Call(relation.FriendClient.DeleteFriend, o.Client, c)
}
func (o *FriendApi) GetFriendApplyList(c *gin.Context) {
- a2r.Call(friend.FriendClient.GetPaginationFriendsApplyTo, o.Client, c)
+ a2r.Call(relation.FriendClient.GetPaginationFriendsApplyTo, o.Client, c)
}
func (o *FriendApi) GetDesignatedFriendsApply(c *gin.Context) {
- a2r.Call(friend.FriendClient.GetDesignatedFriendsApply, o.Client, c)
+ a2r.Call(relation.FriendClient.GetDesignatedFriendsApply, o.Client, c)
}
func (o *FriendApi) GetSelfApplyList(c *gin.Context) {
- a2r.Call(friend.FriendClient.GetPaginationFriendsApplyFrom, o.Client, c)
+ a2r.Call(relation.FriendClient.GetPaginationFriendsApplyFrom, o.Client, c)
}
func (o *FriendApi) GetFriendList(c *gin.Context) {
- a2r.Call(friend.FriendClient.GetPaginationFriends, o.Client, c)
+ a2r.Call(relation.FriendClient.GetPaginationFriends, o.Client, c)
}
func (o *FriendApi) GetDesignatedFriends(c *gin.Context) {
- a2r.Call(friend.FriendClient.GetDesignatedFriends, o.Client, c)
+ a2r.Call(relation.FriendClient.GetDesignatedFriends, o.Client, c)
}
func (o *FriendApi) SetFriendRemark(c *gin.Context) {
- a2r.Call(friend.FriendClient.SetFriendRemark, o.Client, c)
+ a2r.Call(relation.FriendClient.SetFriendRemark, o.Client, c)
}
func (o *FriendApi) AddBlack(c *gin.Context) {
- a2r.Call(friend.FriendClient.AddBlack, o.Client, c)
+ a2r.Call(relation.FriendClient.AddBlack, o.Client, c)
}
func (o *FriendApi) GetPaginationBlacks(c *gin.Context) {
- a2r.Call(friend.FriendClient.GetPaginationBlacks, o.Client, c)
+ a2r.Call(relation.FriendClient.GetPaginationBlacks, o.Client, c)
}
func (o *FriendApi) RemoveBlack(c *gin.Context) {
- a2r.Call(friend.FriendClient.RemoveBlack, o.Client, c)
+ a2r.Call(relation.FriendClient.RemoveBlack, o.Client, c)
}
func (o *FriendApi) ImportFriends(c *gin.Context) {
- a2r.Call(friend.FriendClient.ImportFriends, o.Client, c)
+ a2r.Call(relation.FriendClient.ImportFriends, o.Client, c)
}
func (o *FriendApi) IsFriend(c *gin.Context) {
- a2r.Call(friend.FriendClient.IsFriend, o.Client, c)
+ a2r.Call(relation.FriendClient.IsFriend, o.Client, c)
}
func (o *FriendApi) GetFriendIDs(c *gin.Context) {
- a2r.Call(friend.FriendClient.GetFriendIDs, o.Client, c)
+ a2r.Call(relation.FriendClient.GetFriendIDs, o.Client, c)
}
func (o *FriendApi) GetSpecifiedFriendsInfo(c *gin.Context) {
- a2r.Call(friend.FriendClient.GetSpecifiedFriendsInfo, o.Client, c)
+ a2r.Call(relation.FriendClient.GetSpecifiedFriendsInfo, o.Client, c)
}
+
func (o *FriendApi) UpdateFriends(c *gin.Context) {
- a2r.Call(friend.FriendClient.UpdateFriends, o.Client, c)
+ a2r.Call(relation.FriendClient.UpdateFriends, o.Client, c)
+}
+
+func (o *FriendApi) GetIncrementalFriends(c *gin.Context) {
+ a2r.Call(relation.FriendClient.GetIncrementalFriends, o.Client, c)
+}
+
+// GetIncrementalBlacks is temporarily unused.
+// Deprecated: This function is currently unused and may be removed in future versions.
+func (o *FriendApi) GetIncrementalBlacks(c *gin.Context) {
+ a2r.Call(relation.FriendClient.GetIncrementalBlacks, o.Client, c)
+}
+
+func (o *FriendApi) GetFullFriendUserIDs(c *gin.Context) {
+ a2r.Call(relation.FriendClient.GetFullFriendUserIDs, o.Client, c)
}
diff --git a/internal/api/group.go b/internal/api/group.go
index 6079c5343..bff008974 100644
--- a/internal/api/group.go
+++ b/internal/api/group.go
@@ -65,6 +65,7 @@ func (o *GroupApi) GetGroupUsersReqApplicationList(c *gin.Context) {
func (o *GroupApi) GetGroupsInfo(c *gin.Context) {
a2r.Call(group.GroupClient.GetGroupsInfo, o.Client, c)
+ //a2r.Call(group.GroupClient.GetGroupsInfo, o.Client, c, a2r.NewNilReplaceOption(group.GroupClient.GetGroupsInfo))
}
func (o *GroupApi) KickGroupMember(c *gin.Context) {
@@ -73,6 +74,7 @@ func (o *GroupApi) KickGroupMember(c *gin.Context) {
func (o *GroupApi) GetGroupMembersInfo(c *gin.Context) {
a2r.Call(group.GroupClient.GetGroupMembersInfo, o.Client, c)
+ //a2r.Call(group.GroupClient.GetGroupMembersInfo, o.Client, c, a2r.NewNilReplaceOption(group.GroupClient.GetGroupMembersInfo))
}
func (o *GroupApi) GetGroupMemberList(c *gin.Context) {
@@ -134,3 +136,23 @@ func (o *GroupApi) GetGroups(c *gin.Context) {
func (o *GroupApi) GetGroupMemberUserIDs(c *gin.Context) {
a2r.Call(group.GroupClient.GetGroupMemberUserIDs, o.Client, c)
}
+
+func (o *GroupApi) GetIncrementalJoinGroup(c *gin.Context) {
+ a2r.Call(group.GroupClient.GetIncrementalJoinGroup, o.Client, c)
+}
+
+func (o *GroupApi) GetIncrementalGroupMember(c *gin.Context) {
+ a2r.Call(group.GroupClient.GetIncrementalGroupMember, o.Client, c)
+}
+
+func (o *GroupApi) GetIncrementalGroupMemberBatch(c *gin.Context) {
+ a2r.Call(group.GroupClient.BatchGetIncrementalGroupMember, o.Client, c)
+}
+
+func (o *GroupApi) GetFullGroupMemberUserIDs(c *gin.Context) {
+ a2r.Call(group.GroupClient.GetFullGroupMemberUserIDs, o.Client, c)
+}
+
+func (o *GroupApi) GetFullJoinGroupIDs(c *gin.Context) {
+ a2r.Call(group.GroupClient.GetFullJoinGroupIDs, o.Client, c)
+}
diff --git a/internal/api/init.go b/internal/api/init.go
index 23866c4a0..e83dfc2ea 100644
--- a/internal/api/init.go
+++ b/internal/api/init.go
@@ -29,7 +29,6 @@ import (
"time"
kdisc "github.com/openimsdk/open-im-server/v3/pkg/common/discoveryregister"
- ginprom "github.com/openimsdk/open-im-server/v3/pkg/common/ginprometheus"
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
"github.com/openimsdk/tools/discovery"
"github.com/openimsdk/tools/errs"
@@ -72,10 +71,8 @@ func Start(ctx context.Context, index int, config *Config) error {
netDone <- struct{}{}
return
}
- p := ginprom.NewPrometheus("app", prommetrics.GetGinCusMetrics("Api"))
- p.SetListenAddress(fmt.Sprintf(":%d", prometheusPort))
- if err = p.Use(router); err != nil && err != http.ErrServerClosed {
- netErr = errs.WrapMsg(err, fmt.Sprintf("prometheus start err: %d", prometheusPort))
+ if err := prommetrics.ApiInit(prometheusPort); err != nil && err != http.ErrServerClosed {
+ netErr = errs.WrapMsg(err, fmt.Sprintf("api prometheus start err: %d", prometheusPort))
netDone <- struct{}{}
}
}()
diff --git a/internal/api/msg.go b/internal/api/msg.go
index 180342e59..ba63fbb66 100644
--- a/internal/api/msg.go
+++ b/internal/api/msg.go
@@ -101,6 +101,7 @@ func (m MessageApi) newUserSendMsgReq(_ *gin.Context, params *apistruct.SendMsg)
SendTime: params.SendTime,
Options: options,
OfflinePushInfo: params.OfflinePushInfo,
+ Ex: params.Ex,
},
}
return &pbData
diff --git a/internal/api/router.go b/internal/api/router.go
index 600567178..0667c3e75 100644
--- a/internal/api/router.go
+++ b/internal/api/router.go
@@ -5,6 +5,10 @@ import (
"github.com/gin-gonic/gin"
"github.com/gin-gonic/gin/binding"
"github.com/go-playground/validator/v10"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials/insecure"
+
+ "github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
"github.com/openimsdk/protocol/constant"
@@ -12,12 +16,25 @@ import (
"github.com/openimsdk/tools/discovery"
"github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/mw"
- "google.golang.org/grpc"
- "google.golang.org/grpc/credentials/insecure"
"net/http"
"strings"
)
+func prommetricsGin() gin.HandlerFunc {
+ return func(c *gin.Context) {
+ c.Next()
+ path := c.FullPath()
+ if c.Writer.Status() == http.StatusNotFound {
+ prommetrics.HttpCall("<404>", c.Request.Method, c.Writer.Status())
+ } else {
+ prommetrics.HttpCall(path, c.Request.Method, c.Writer.Status())
+ }
+ if resp := apiresp.GetGinApiResponse(c); resp != nil {
+ prommetrics.APICall(path, c.Request.Method, resp.ErrCode)
+ }
+ }
+}
+
func newGinRouter(disCov discovery.SvcDiscoveryRegistry, config *Config) *gin.Engine {
disCov.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"LoadBalancingPolicy": "%s"}`, "round_robin")))
@@ -36,7 +53,7 @@ func newGinRouter(disCov discovery.SvcDiscoveryRegistry, config *Config) *gin.En
authRpc := rpcclient.NewAuth(disCov, config.Share.RpcRegisterName.Auth)
thirdRpc := rpcclient.NewThird(disCov, config.Share.RpcRegisterName.Third, config.API.Prometheus.GrafanaURL)
- r.Use(gin.Recovery(), mw.CorsHandler(), mw.GinParseOperationID(), GinParseToken(authRpc))
+ r.Use(prommetricsGin(), gin.Recovery(), mw.CorsHandler(), mw.GinParseOperationID(), GinParseToken(authRpc))
u := NewUserApi(*userRpc)
m := NewMessageApi(messageRpc, userRpc, config.Share.IMAdminUserID)
userRouterGroup := r.Group("/user")
@@ -81,11 +98,14 @@ func newGinRouter(disCov discovery.SvcDiscoveryRegistry, config *Config) *gin.En
friendRouterGroup.POST("/add_black", f.AddBlack)
friendRouterGroup.POST("/get_black_list", f.GetPaginationBlacks)
friendRouterGroup.POST("/remove_black", f.RemoveBlack)
+ friendRouterGroup.POST("/get_incremental_blacks", f.GetIncrementalBlacks)
friendRouterGroup.POST("/import_friend", f.ImportFriends)
friendRouterGroup.POST("/is_friend", f.IsFriend)
friendRouterGroup.POST("/get_friend_id", f.GetFriendIDs)
friendRouterGroup.POST("/get_specified_friends_info", f.GetSpecifiedFriendsInfo)
friendRouterGroup.POST("/update_friends", f.UpdateFriends)
+ friendRouterGroup.POST("/get_incremental_friends", f.GetIncrementalFriends)
+ friendRouterGroup.POST("/get_full_friend_user_ids", f.GetFullFriendUserIDs)
}
g := NewGroupApi(*groupRpc)
groupRouterGroup := r.Group("/group")
@@ -114,6 +134,11 @@ func newGinRouter(disCov discovery.SvcDiscoveryRegistry, config *Config) *gin.En
groupRouterGroup.POST("/get_group_abstract_info", g.GetGroupAbstractInfo)
groupRouterGroup.POST("/get_groups", g.GetGroups)
groupRouterGroup.POST("/get_group_member_user_id", g.GetGroupMemberUserIDs)
+ groupRouterGroup.POST("/get_incremental_join_groups", g.GetIncrementalJoinGroup)
+ groupRouterGroup.POST("/get_incremental_group_members", g.GetIncrementalGroupMember)
+ groupRouterGroup.POST("/get_incremental_group_members_batch", g.GetIncrementalGroupMemberBatch)
+ groupRouterGroup.POST("/get_full_group_member_user_ids", g.GetFullGroupMemberUserIDs)
+ groupRouterGroup.POST("/get_full_join_group_ids", g.GetFullJoinGroupIDs)
}
// certificate
authRouterGroup := r.Group("/auth")
@@ -183,6 +208,9 @@ func newGinRouter(disCov discovery.SvcDiscoveryRegistry, config *Config) *gin.En
conversationGroup.POST("/get_conversations", c.GetConversations)
conversationGroup.POST("/set_conversations", c.SetConversations)
conversationGroup.POST("/get_conversation_offline_push_user_ids", c.GetConversationOfflinePushUserIDs)
+ conversationGroup.POST("/get_full_conversation_ids", c.GetFullOwnerConversationIDs)
+ conversationGroup.POST("/get_incremental_conversations", c.GetIncrementalConversation)
+ conversationGroup.POST("/get_owner_conversation", c.GetOwnerConversation)
}
statisticsGroup := r.Group("/statistics")
diff --git a/internal/msggateway/client.go b/internal/msggateway/client.go
index 0581a025b..ded830c43 100644
--- a/internal/msggateway/client.go
+++ b/internal/msggateway/client.go
@@ -20,6 +20,7 @@ import (
"runtime/debug"
"sync"
"sync/atomic"
+ "time"
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
"github.com/openimsdk/protocol/constant"
@@ -72,6 +73,10 @@ type Client struct {
closed atomic.Bool
closedErr error
token string
+ hbCtx context.Context
+ hbCancel context.CancelFunc
+ subLock *sync.Mutex
+ subUserIDs map[string]struct{} // client conn subscription list
}
// ResetClient updates the client's state with new connection and context information.
@@ -88,14 +93,28 @@ func (c *Client) ResetClient(ctx *UserConnContext, conn LongConn, longConnServer
c.closed.Store(false)
c.closedErr = nil
c.token = ctx.GetToken()
+ c.hbCtx, c.hbCancel = context.WithCancel(c.ctx)
+ c.subLock = new(sync.Mutex)
+ if c.subUserIDs != nil {
+ clear(c.subUserIDs)
+ }
+ c.subUserIDs = make(map[string]struct{})
}
-func (c *Client) pingHandler(_ string) error {
+func (c *Client) pingHandler(appData string) error {
if err := c.conn.SetReadDeadline(pongWait); err != nil {
return err
}
- return c.writePongMsg()
+ log.ZDebug(c.ctx, "ping Handler Success.", "appData", appData)
+ return c.writePongMsg(appData)
+}
+
+func (c *Client) pongHandler(_ string) error {
+ if err := c.conn.SetReadDeadline(pongWait); err != nil {
+ return err
+ }
+ return nil
}
// readMessage continuously reads messages from the connection.
@@ -110,7 +129,9 @@ func (c *Client) readMessage() {
c.conn.SetReadLimit(maxMessageSize)
_ = c.conn.SetReadDeadline(pongWait)
+ c.conn.SetPongHandler(c.pongHandler)
c.conn.SetPingHandler(c.pingHandler)
+ c.activeHeartbeat(c.hbCtx)
for {
log.ZDebug(c.ctx, "readMessage")
@@ -141,12 +162,13 @@ func (c *Client) readMessage() {
return
case PingMessage:
- err := c.writePongMsg()
+ err := c.writePongMsg("")
log.ZError(c.ctx, "writePongMsg", err)
case CloseMessage:
c.closedErr = ErrClientClosed
return
+
default:
}
}
@@ -202,6 +224,8 @@ func (c *Client) handleMessage(message []byte) error {
resp, messageErr = c.longConnServer.UserLogout(ctx, binaryReq)
case WsSetBackgroundStatus:
resp, messageErr = c.setAppBackgroundStatus(ctx, binaryReq)
+ case WsSubUserOnlineStatus:
+ resp, messageErr = c.longConnServer.SubUserOnlineStatus(ctx, c, binaryReq)
default:
return fmt.Errorf(
"ReqIdentifier failed,sendID:%s,msgIncr:%s,reqIdentifier:%d",
@@ -226,15 +250,14 @@ func (c *Client) setAppBackgroundStatus(ctx context.Context, req *Req) ([]byte,
}
func (c *Client) close() {
+ c.w.Lock()
+ defer c.w.Unlock()
if c.closed.Load() {
return
}
-
- c.w.Lock()
- defer c.w.Unlock()
-
c.closed.Store(true)
c.conn.Close()
+ c.hbCancel() // Close server-initiated heartbeat.
c.longConnServer.UnRegister(c)
}
@@ -292,6 +315,14 @@ func (c *Client) KickOnlineMessage() error {
return err
}
+func (c *Client) PushUserOnlineStatus(data []byte) error {
+ resp := Resp{
+ ReqIdentifier: WsSubUserOnlineStatus,
+ Data: data,
+ }
+ return c.writeBinaryMsg(resp)
+}
+
func (c *Client) writeBinaryMsg(resp Resp) error {
if c.closed.Load() {
return nil
@@ -321,7 +352,29 @@ func (c *Client) writeBinaryMsg(resp Resp) error {
return c.conn.WriteMessage(MessageBinary, encodedBuf)
}
-func (c *Client) writePongMsg() error {
+// Actively initiate Heartbeat when platform in Web.
+func (c *Client) activeHeartbeat(ctx context.Context) {
+ if c.PlatformID == constant.WebPlatformID {
+ go func() {
+ log.ZDebug(ctx, "server initiative send heartbeat start.")
+ ticker := time.NewTicker(pingPeriod)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ if err := c.writePingMsg(); err != nil {
+ log.ZWarn(c.ctx, "send Ping Message error.", err)
+ return
+ }
+ case <-c.hbCtx.Done():
+ return
+ }
+ }
+ }()
+ }
+}
+func (c *Client) writePingMsg() error {
if c.closed.Load() {
return nil
}
@@ -334,5 +387,28 @@ func (c *Client) writePongMsg() error {
return err
}
- return c.conn.WriteMessage(PongMessage, nil)
+ return c.conn.WriteMessage(PingMessage, nil)
+}
+
+func (c *Client) writePongMsg(appData string) error {
+ log.ZDebug(c.ctx, "write Pong Msg in Server", "appData", appData)
+ if c.closed.Load() {
+ return nil
+ }
+
+ log.ZDebug(c.ctx, "write Pong Msg in Server", "appData", appData)
+ c.w.Lock()
+ defer c.w.Unlock()
+
+ log.ZDebug(c.ctx, "write Pong Msg in Server", "appData", appData)
+ err := c.conn.SetWriteDeadline(writeWait)
+ if err != nil {
+ return errs.Wrap(err)
+ }
+ err = c.conn.WriteMessage(PongMessage, []byte(appData))
+ if err != nil {
+ log.ZWarn(c.ctx, "Write Message have error", errs.Wrap(err), "Pong msg", PongMessage)
+ }
+
+ return errs.Wrap(err)
}
diff --git a/internal/msggateway/compressor_test.go b/internal/msggateway/compressor_test.go
index 173c9bb20..952bd4d95 100644
--- a/internal/msggateway/compressor_test.go
+++ b/internal/msggateway/compressor_test.go
@@ -16,10 +16,10 @@ package msggateway
import (
"crypto/rand"
+ "github.com/stretchr/testify/assert"
"sync"
"testing"
-
- "github.com/stretchr/testify/assert"
+ "unsafe"
)
func mockRandom() []byte {
@@ -132,3 +132,8 @@ func BenchmarkDecompressWithSyncPool(b *testing.B) {
assert.Equal(b, nil, err)
}
}
+
+func TestName(t *testing.T) {
+ t.Log(unsafe.Sizeof(Client{}))
+
+}
diff --git a/internal/msggateway/constant.go b/internal/msggateway/constant.go
index 64664ac0a..dc5ad7786 100644
--- a/internal/msggateway/constant.go
+++ b/internal/msggateway/constant.go
@@ -43,6 +43,7 @@ const (
WSKickOnlineMsg = 2002
WsLogoutMsg = 2003
WsSetBackgroundStatus = 2004
+ WsSubUserOnlineStatus = 2005
WSDataError = 3001
)
@@ -53,6 +54,9 @@ const (
// Time allowed to read the next pong message from the peer.
pongWait = 30 * time.Second
+ // Send pings to peer with this period. Must be less than pongWait.
+ pingPeriod = (pongWait * 9) / 10
+
// Maximum message size allowed from peer.
maxMessageSize = 51200
)
diff --git a/internal/msggateway/hub_server.go b/internal/msggateway/hub_server.go
index 8ff6d1001..28c227162 100644
--- a/internal/msggateway/hub_server.go
+++ b/internal/msggateway/hub_server.go
@@ -19,18 +19,27 @@ import (
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
"github.com/openimsdk/open-im-server/v3/pkg/common/startrpc"
+ "github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
"github.com/openimsdk/protocol/constant"
"github.com/openimsdk/protocol/msggateway"
+ "github.com/openimsdk/protocol/sdkws"
"github.com/openimsdk/tools/discovery"
"github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/mcontext"
+ "github.com/openimsdk/tools/mq/memamq"
+ "github.com/openimsdk/tools/utils/datautil"
"google.golang.org/grpc"
+ "sync/atomic"
)
func (s *Server) InitServer(ctx context.Context, config *Config, disCov discovery.SvcDiscoveryRegistry, server *grpc.Server) error {
s.LongConnServer.SetDiscoveryRegistry(disCov, config)
msggateway.RegisterMsgGatewayServer(server, s)
+ s.userRcp = rpcclient.NewUserRpcClient(disCov, config.Share.RpcRegisterName.User, config.Share.IMAdminUserID)
+ if s.ready != nil {
+ return s.ready(s)
+ }
return nil
}
@@ -50,18 +59,23 @@ type Server struct {
LongConnServer LongConnServer
config *Config
pushTerminal map[int]struct{}
+ ready func(srv *Server) error
+ userRcp rpcclient.UserRpcClient
+ queue *memamq.MemoryQueue
}
func (s *Server) SetLongConnServer(LongConnServer LongConnServer) {
s.LongConnServer = LongConnServer
}
-func NewServer(rpcPort int, longConnServer LongConnServer, conf *Config) *Server {
+func NewServer(rpcPort int, longConnServer LongConnServer, conf *Config, ready func(srv *Server) error) *Server {
s := &Server{
rpcPort: rpcPort,
LongConnServer: longConnServer,
pushTerminal: make(map[int]struct{}),
config: conf,
+ ready: ready,
+ queue: memamq.NewMemoryQueue(512, 1024*16),
}
s.pushTerminal[constant.IOSPlatformID] = struct{}{}
s.pushTerminal[constant.AndroidPlatformID] = struct{}{}
@@ -117,55 +131,93 @@ func (s *Server) OnlineBatchPushOneMsg(ctx context.Context, req *msggateway.Onli
return nil, nil
}
-func (s *Server) SuperGroupOnlineBatchPushOneMsg(ctx context.Context, req *msggateway.OnlineBatchPushOneMsgReq,
-) (*msggateway.OnlineBatchPushOneMsgResp, error) {
- var singleUserResults []*msggateway.SingleMsgToUserResults
- for _, v := range req.PushToUserIDs {
- var resp []*msggateway.SingleMsgToUserPlatform
- results := &msggateway.SingleMsgToUserResults{
- UserID: v,
+func (s *Server) pushToUser(ctx context.Context, userID string, msgData *sdkws.MsgData) *msggateway.SingleMsgToUserResults {
+ clients, ok := s.LongConnServer.GetUserAllCons(userID)
+ if !ok {
+ log.ZDebug(ctx, "push user not online", "userID", userID)
+ return &msggateway.SingleMsgToUserResults{
+ UserID: userID,
}
- clients, ok := s.LongConnServer.GetUserAllCons(v)
- if !ok {
- log.ZDebug(ctx, "push user not online", "userID", v)
- results.Resp = resp
- singleUserResults = append(singleUserResults, results)
+ }
+ log.ZDebug(ctx, "push user online", "clients", clients, "userID", userID)
+ result := &msggateway.SingleMsgToUserResults{
+ UserID: userID,
+ Resp: make([]*msggateway.SingleMsgToUserPlatform, 0, len(clients)),
+ }
+ for _, client := range clients {
+ if client == nil {
continue
}
-
- log.ZDebug(ctx, "push user online", "clients", clients, "userID", v)
- for _, client := range clients {
- if client == nil {
- continue
+ userPlatform := &msggateway.SingleMsgToUserPlatform{
+ RecvPlatFormID: int32(client.PlatformID),
+ }
+ if !client.IsBackground ||
+ (client.IsBackground && client.PlatformID != constant.IOSPlatformID) {
+ err := client.PushMessage(ctx, msgData)
+ if err != nil {
+ userPlatform.ResultCode = int64(servererrs.ErrPushMsgErr.Code())
+ } else {
+ if _, ok := s.pushTerminal[client.PlatformID]; ok {
+ result.OnlinePush = true
+ }
}
+ } else {
+ userPlatform.ResultCode = int64(servererrs.ErrIOSBackgroundPushErr.Code())
+ }
+ result.Resp = append(result.Resp, userPlatform)
+ }
+ return result
+}
- userPlatform := &msggateway.SingleMsgToUserPlatform{
- RecvPlatFormID: int32(client.PlatformID),
+func (s *Server) SuperGroupOnlineBatchPushOneMsg(ctx context.Context, req *msggateway.OnlineBatchPushOneMsgReq) (*msggateway.OnlineBatchPushOneMsgResp, error) {
+ if len(req.PushToUserIDs) == 0 {
+ return &msggateway.OnlineBatchPushOneMsgResp{}, nil
+ }
+ ch := make(chan *msggateway.SingleMsgToUserResults, len(req.PushToUserIDs))
+ var count atomic.Int64
+ count.Add(int64(len(req.PushToUserIDs)))
+ for i := range req.PushToUserIDs {
+ userID := req.PushToUserIDs[i]
+ err := s.queue.PushCtx(ctx, func() {
+ ch <- s.pushToUser(ctx, userID, req.MsgData)
+ if count.Add(-1) == 0 {
+ close(ch)
}
- if !client.IsBackground ||
- (client.IsBackground && client.PlatformID != constant.IOSPlatformID) {
- err := client.PushMessage(ctx, req.MsgData)
- if err != nil {
- userPlatform.ResultCode = int64(servererrs.ErrPushMsgErr.Code())
- resp = append(resp, userPlatform)
- } else {
- if _, ok := s.pushTerminal[client.PlatformID]; ok {
- results.OnlinePush = true
- resp = append(resp, userPlatform)
- }
- }
- } else {
- userPlatform.ResultCode = int64(servererrs.ErrIOSBackgroundPushErr.Code())
- resp = append(resp, userPlatform)
+ })
+ if err != nil {
+ if count.Add(-1) == 0 {
+ close(ch)
+ }
+ log.ZError(ctx, "pushToUser MemoryQueue failed", err, "userID", userID)
+ ch <- &msggateway.SingleMsgToUserResults{
+ UserID: userID,
}
}
- results.Resp = resp
- singleUserResults = append(singleUserResults, results)
}
-
- return &msggateway.OnlineBatchPushOneMsgResp{
- SinglePushResult: singleUserResults,
- }, nil
+ resp := &msggateway.OnlineBatchPushOneMsgResp{
+ SinglePushResult: make([]*msggateway.SingleMsgToUserResults, 0, len(req.PushToUserIDs)),
+ }
+ for {
+ select {
+ case <-ctx.Done():
+ log.ZError(ctx, "SuperGroupOnlineBatchPushOneMsg ctx done", context.Cause(ctx))
+ userIDSet := datautil.SliceSet(req.PushToUserIDs)
+ for _, results := range resp.SinglePushResult {
+ delete(userIDSet, results.UserID)
+ }
+ for userID := range userIDSet {
+ resp.SinglePushResult = append(resp.SinglePushResult, &msggateway.SingleMsgToUserResults{
+ UserID: userID,
+ })
+ }
+ return resp, nil
+ case res, ok := <-ch:
+ if !ok {
+ return resp, nil
+ }
+ resp.SinglePushResult = append(resp.SinglePushResult, res)
+ }
+ }
}
func (s *Server) KickUserOffline(
diff --git a/internal/msggateway/init.go b/internal/msggateway/init.go
index f4d8b0381..44e79e412 100644
--- a/internal/msggateway/init.go
+++ b/internal/msggateway/init.go
@@ -17,6 +17,8 @@ package msggateway
import (
"context"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
+ "github.com/openimsdk/open-im-server/v3/pkg/rpccache"
+ "github.com/openimsdk/tools/db/redisutil"
"github.com/openimsdk/tools/utils/datautil"
"time"
@@ -26,6 +28,7 @@ import (
type Config struct {
MsgGateway config.MsgGateway
Share config.Share
+ RedisConfig config.Redis
WebhooksConfig config.Webhooks
Discovery config.Discovery
}
@@ -42,18 +45,25 @@ func Start(ctx context.Context, index int, conf *Config) error {
if err != nil {
return err
}
- longServer, err := NewWsServer(
+ rdb, err := redisutil.NewRedisClient(ctx, conf.RedisConfig.Build())
+ if err != nil {
+ return err
+ }
+ longServer := NewWsServer(
conf,
WithPort(wsPort),
WithMaxConnNum(int64(conf.MsgGateway.LongConnSvr.WebsocketMaxConnNum)),
WithHandshakeTimeout(time.Duration(conf.MsgGateway.LongConnSvr.WebsocketTimeout)*time.Second),
WithMessageMaxMsgLength(conf.MsgGateway.LongConnSvr.WebsocketMaxMsgLen),
)
- if err != nil {
- return err
- }
- hubServer := NewServer(rpcPort, longServer, conf)
+ hubServer := NewServer(rpcPort, longServer, conf, func(srv *Server) error {
+ longServer.online = rpccache.NewOnlineCache(srv.userRcp, nil, rdb, longServer.subscriberUserOnlineStatusChanges)
+ return nil
+ })
+
+ go longServer.ChangeOnlineStatus(4)
+
netDone := make(chan error)
go func() {
err = hubServer.Start(ctx, index, conf)
diff --git a/internal/msggateway/long_conn.go b/internal/msggateway/long_conn.go
index 7d5bef4c3..c1b3e27c9 100644
--- a/internal/msggateway/long_conn.go
+++ b/internal/msggateway/long_conn.go
@@ -16,10 +16,11 @@ package msggateway
import (
"encoding/json"
- "github.com/openimsdk/tools/apiresp"
"net/http"
"time"
+ "github.com/openimsdk/tools/apiresp"
+
"github.com/gorilla/websocket"
"github.com/openimsdk/tools/errs"
)
diff --git a/internal/msggateway/online.go b/internal/msggateway/online.go
new file mode 100644
index 000000000..b50608f93
--- /dev/null
+++ b/internal/msggateway/online.go
@@ -0,0 +1,112 @@
+package msggateway
+
+import (
+ "context"
+ "crypto/md5"
+ "encoding/binary"
+ "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
+ pbuser "github.com/openimsdk/protocol/user"
+ "github.com/openimsdk/tools/log"
+ "github.com/openimsdk/tools/mcontext"
+ "github.com/openimsdk/tools/utils/datautil"
+ "math/rand"
+ "strconv"
+ "time"
+)
+
+func (ws *WsServer) ChangeOnlineStatus(concurrent int) {
+ if concurrent < 1 {
+ concurrent = 1
+ }
+ const renewalTime = cachekey.OnlineExpire / 3
+ //const renewalTime = time.Second * 10
+ renewalTicker := time.NewTicker(renewalTime)
+
+ requestChs := make([]chan *pbuser.SetUserOnlineStatusReq, concurrent)
+ changeStatus := make([][]UserState, concurrent)
+
+ for i := 0; i < concurrent; i++ {
+ requestChs[i] = make(chan *pbuser.SetUserOnlineStatusReq, 64)
+ changeStatus[i] = make([]UserState, 0, 100)
+ }
+
+ mergeTicker := time.NewTicker(time.Second)
+
+ local2pb := func(u UserState) *pbuser.UserOnlineStatus {
+ return &pbuser.UserOnlineStatus{
+ UserID: u.UserID,
+ Online: u.Online,
+ Offline: u.Offline,
+ }
+ }
+
+ rNum := rand.Uint64()
+ pushUserState := func(us ...UserState) {
+ for _, u := range us {
+ sum := md5.Sum([]byte(u.UserID))
+ i := (binary.BigEndian.Uint64(sum[:]) + rNum) % uint64(concurrent)
+ changeStatus[i] = append(changeStatus[i], u)
+ status := changeStatus[i]
+ if len(status) == cap(status) {
+ req := &pbuser.SetUserOnlineStatusReq{
+ Status: datautil.Slice(status, local2pb),
+ }
+ changeStatus[i] = status[:0]
+ select {
+ case requestChs[i] <- req:
+ default:
+ log.ZError(context.Background(), "user online processing is too slow", nil)
+ }
+ }
+ }
+ }
+
+ pushAllUserState := func() {
+ for i, status := range changeStatus {
+ if len(status) == 0 {
+ continue
+ }
+ req := &pbuser.SetUserOnlineStatusReq{
+ Status: datautil.Slice(status, local2pb),
+ }
+ changeStatus[i] = status[:0]
+ select {
+ case requestChs[i] <- req:
+ default:
+ log.ZError(context.Background(), "user online processing is too slow", nil)
+ }
+ }
+ }
+
+ opIdCtx := mcontext.SetOperationID(context.Background(), "r"+strconv.FormatUint(rNum, 10))
+ doRequest := func(req *pbuser.SetUserOnlineStatusReq) {
+ ctx, cancel := context.WithTimeout(opIdCtx, time.Second*5)
+ defer cancel()
+ if _, err := ws.userClient.Client.SetUserOnlineStatus(ctx, req); err != nil {
+ log.ZError(ctx, "update user online status", err)
+ }
+ }
+
+ for i := 0; i < concurrent; i++ {
+ go func(ch <-chan *pbuser.SetUserOnlineStatusReq) {
+ for req := range ch {
+ doRequest(req)
+ }
+ }(requestChs[i])
+ }
+
+ for {
+ select {
+ case <-mergeTicker.C:
+ pushAllUserState()
+ case now := <-renewalTicker.C:
+ deadline := now.Add(-cachekey.OnlineExpire / 3)
+ users := ws.clients.GetAllUserStatus(deadline, now)
+ log.ZDebug(context.Background(), "renewal ticker", "deadline", deadline, "nowtime", now, "num", len(users))
+ pushUserState(users...)
+ case state := <-ws.clients.UserState():
+ log.ZDebug(context.Background(), "OnlineCache user online change", "userID", state.UserID, "online", state.Online, "offline", state.Offline)
+ pushUserState(state)
+ }
+ }
+}
diff --git a/internal/msggateway/subscription.go b/internal/msggateway/subscription.go
new file mode 100644
index 000000000..9bb41e0df
--- /dev/null
+++ b/internal/msggateway/subscription.go
@@ -0,0 +1,166 @@
+package msggateway
+
+import (
+ "context"
+ "github.com/openimsdk/protocol/sdkws"
+ "github.com/openimsdk/tools/log"
+ "github.com/openimsdk/tools/utils/datautil"
+ "google.golang.org/protobuf/proto"
+ "sync"
+)
+
+func (ws *WsServer) subscriberUserOnlineStatusChanges(ctx context.Context, userID string, platformIDs []int32) {
+ if ws.clients.RecvSubChange(userID, platformIDs) {
+ log.ZDebug(ctx, "gateway receive subscription message and go back online", "userID", userID, "platformIDs", platformIDs)
+ } else {
+ log.ZDebug(ctx, "gateway ignore user online status changes", "userID", userID, "platformIDs", platformIDs)
+ }
+ ws.pushUserIDOnlineStatus(ctx, userID, platformIDs)
+}
+
+func (ws *WsServer) SubUserOnlineStatus(ctx context.Context, client *Client, data *Req) ([]byte, error) {
+ var sub sdkws.SubUserOnlineStatus
+ if err := proto.Unmarshal(data.Data, &sub); err != nil {
+ return nil, err
+ }
+ ws.subscription.Sub(client, sub.SubscribeUserID, sub.UnsubscribeUserID)
+ var resp sdkws.SubUserOnlineStatusTips
+ if len(sub.SubscribeUserID) > 0 {
+ resp.Subscribers = make([]*sdkws.SubUserOnlineStatusElem, 0, len(sub.SubscribeUserID))
+ for _, userID := range sub.SubscribeUserID {
+ platformIDs, err := ws.online.GetUserOnlinePlatform(ctx, userID)
+ if err != nil {
+ return nil, err
+ }
+ resp.Subscribers = append(resp.Subscribers, &sdkws.SubUserOnlineStatusElem{
+ UserID: userID,
+ OnlinePlatformIDs: platformIDs,
+ })
+ }
+ }
+ return proto.Marshal(&resp)
+}
+
+func newSubscription() *Subscription {
+ return &Subscription{
+ userIDs: make(map[string]*subClient),
+ }
+}
+
+type subClient struct {
+ clients map[string]*Client
+}
+
+type Subscription struct {
+ lock sync.RWMutex
+ userIDs map[string]*subClient // subscribe to the user's client connection
+}
+
+func (s *Subscription) DelClient(client *Client) {
+ client.subLock.Lock()
+ userIDs := datautil.Keys(client.subUserIDs)
+ for _, userID := range userIDs {
+ delete(client.subUserIDs, userID)
+ }
+ client.subLock.Unlock()
+ if len(userIDs) == 0 {
+ return
+ }
+ addr := client.ctx.GetRemoteAddr()
+ s.lock.Lock()
+ defer s.lock.Unlock()
+ for _, userID := range userIDs {
+ sub, ok := s.userIDs[userID]
+ if !ok {
+ continue
+ }
+ delete(sub.clients, addr)
+ if len(sub.clients) == 0 {
+ delete(s.userIDs, userID)
+ }
+ }
+}
+
+func (s *Subscription) GetClient(userID string) []*Client {
+ s.lock.RLock()
+ defer s.lock.RUnlock()
+ cs, ok := s.userIDs[userID]
+ if !ok {
+ return nil
+ }
+ clients := make([]*Client, 0, len(cs.clients))
+ for _, client := range cs.clients {
+ clients = append(clients, client)
+ }
+ return clients
+}
+
+func (s *Subscription) Sub(client *Client, addUserIDs, delUserIDs []string) {
+ if len(addUserIDs)+len(delUserIDs) == 0 {
+ return
+ }
+ var (
+ del = make(map[string]struct{})
+ add = make(map[string]struct{})
+ )
+ client.subLock.Lock()
+ for _, userID := range delUserIDs {
+ if _, ok := client.subUserIDs[userID]; !ok {
+ continue
+ }
+ del[userID] = struct{}{}
+ delete(client.subUserIDs, userID)
+ }
+ for _, userID := range addUserIDs {
+ delete(del, userID)
+ if _, ok := client.subUserIDs[userID]; ok {
+ continue
+ }
+ client.subUserIDs[userID] = struct{}{}
+ add[userID] = struct{}{}
+ }
+ client.subLock.Unlock()
+ if len(del)+len(add) == 0 {
+ return
+ }
+ addr := client.ctx.GetRemoteAddr()
+ s.lock.Lock()
+ defer s.lock.Unlock()
+ for userID := range del {
+ sub, ok := s.userIDs[userID]
+ if !ok {
+ continue
+ }
+ delete(sub.clients, addr)
+ if len(sub.clients) == 0 {
+ delete(s.userIDs, userID)
+ }
+ }
+ for userID := range add {
+ sub, ok := s.userIDs[userID]
+ if !ok {
+ sub = &subClient{clients: make(map[string]*Client)}
+ s.userIDs[userID] = sub
+ }
+ sub.clients[addr] = client
+ }
+}
+
+func (ws *WsServer) pushUserIDOnlineStatus(ctx context.Context, userID string, platformIDs []int32) {
+ clients := ws.subscription.GetClient(userID)
+ if len(clients) == 0 {
+ return
+ }
+ onlineStatus, err := proto.Marshal(&sdkws.SubUserOnlineStatusTips{
+ Subscribers: []*sdkws.SubUserOnlineStatusElem{{UserID: userID, OnlinePlatformIDs: platformIDs}},
+ })
+ if err != nil {
+ log.ZError(ctx, "pushUserIDOnlineStatus json.Marshal", err)
+ return
+ }
+ for _, client := range clients {
+ if err := client.PushUserOnlineStatus(onlineStatus); err != nil {
+ log.ZError(ctx, "UserSubscribeOnlineStatusNotification push failed", err, "userID", client.UserID, "platformID", client.PlatformID, "changeUserID", userID, "changePlatformID", platformIDs)
+ }
+ }
+}
diff --git a/internal/msggateway/user_map.go b/internal/msggateway/user_map.go
index 79cc53d1b..bd1f19728 100644
--- a/internal/msggateway/user_map.go
+++ b/internal/msggateway/user_map.go
@@ -1,135 +1,185 @@
-// Copyright © 2023 OpenIM. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
package msggateway
import (
- "context"
- "sync"
-
- "github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/utils/datautil"
+ "sync"
+ "time"
)
-type UserMap struct {
- m sync.Map
+type UserMap interface {
+ GetAll(userID string) ([]*Client, bool)
+ Get(userID string, platformID int) ([]*Client, bool, bool)
+ Set(userID string, v *Client)
+ DeleteClients(userID string, clients []*Client) (isDeleteUser bool)
+ UserState() <-chan UserState
+ GetAllUserStatus(deadline time.Time, nowtime time.Time) []UserState
+ RecvSubChange(userID string, platformIDs []int32) bool
}
-func newUserMap() *UserMap {
- return &UserMap{}
+type UserState struct {
+ UserID string
+ Online []int32
+ Offline []int32
}
-func (u *UserMap) GetAll(key string) ([]*Client, bool) {
- allClients, ok := u.m.Load(key)
- if ok {
- return allClients.([]*Client), ok
- }
- return nil, ok
+type UserPlatform struct {
+ Time time.Time
+ Clients []*Client
}
-func (u *UserMap) Get(key string, platformID int) ([]*Client, bool, bool) {
- allClients, userExisted := u.m.Load(key)
- if userExisted {
- var clients []*Client
- for _, client := range allClients.([]*Client) {
- if client.PlatformID == platformID {
- clients = append(clients, client)
- }
- }
- if len(clients) > 0 {
- return clients, userExisted, true
- }
- return clients, userExisted, false
+func (u *UserPlatform) PlatformIDs() []int32 {
+ if len(u.Clients) == 0 {
+ return nil
+ }
+ platformIDs := make([]int32, 0, len(u.Clients))
+ for _, client := range u.Clients {
+ platformIDs = append(platformIDs, int32(client.PlatformID))
}
- return nil, userExisted, false
+ return platformIDs
}
-// Set adds a client to the map.
-func (u *UserMap) Set(key string, v *Client) {
- allClients, existed := u.m.Load(key)
- if existed {
- log.ZDebug(context.Background(), "Set existed", "user_id", key, "client_user_id", v.UserID)
- oldClients := allClients.([]*Client)
- oldClients = append(oldClients, v)
- u.m.Store(key, oldClients)
- } else {
- log.ZDebug(context.Background(), "Set not existed", "user_id", key, "client_user_id", v.UserID)
+func (u *UserPlatform) PlatformIDSet() map[int32]struct{} {
+ if len(u.Clients) == 0 {
+ return nil
+ }
+ platformIDs := make(map[int32]struct{})
+ for _, client := range u.Clients {
+ platformIDs[int32(client.PlatformID)] = struct{}{}
+ }
+ return platformIDs
+}
- var clients []*Client
- clients = append(clients, v)
- u.m.Store(key, clients)
+func newUserMap() UserMap {
+ return &userMap{
+ data: make(map[string]*UserPlatform),
+ ch: make(chan UserState, 10000),
}
}
-func (u *UserMap) delete(key string, connRemoteAddr string) (isDeleteUser bool) {
- // Attempt to load the clients associated with the key.
- allClients, existed := u.m.Load(key)
- if !existed {
- // Return false immediately if the key does not exist.
+type userMap struct {
+ lock sync.RWMutex
+ data map[string]*UserPlatform
+ ch chan UserState
+}
+
+func (u *userMap) RecvSubChange(userID string, platformIDs []int32) bool {
+ u.lock.RLock()
+ defer u.lock.RUnlock()
+ result, ok := u.data[userID]
+ if !ok {
return false
}
-
- // Convert allClients to a slice of *Client.
- oldClients := allClients.([]*Client)
- var remainingClients []*Client
- for _, client := range oldClients {
- // Keep clients that do not match the connRemoteAddr.
- if client.ctx.GetRemoteAddr() != connRemoteAddr {
- remainingClients = append(remainingClients, client)
- }
+ localPlatformIDs := result.PlatformIDSet()
+ for _, platformID := range platformIDs {
+ delete(localPlatformIDs, platformID)
}
+ if len(localPlatformIDs) == 0 {
+ return false
+ }
+ u.push(userID, result, nil)
+ return true
+}
- // If no clients remain after filtering, delete the key from the map.
- if len(remainingClients) == 0 {
- u.m.Delete(key)
+func (u *userMap) push(userID string, userPlatform *UserPlatform, offline []int32) bool {
+ select {
+ case u.ch <- UserState{UserID: userID, Online: userPlatform.PlatformIDs(), Offline: offline}:
+ userPlatform.Time = time.Now()
return true
+ default:
+ return false
}
+}
- // Otherwise, update the key with the remaining clients.
- u.m.Store(key, remainingClients)
- return false
+func (u *userMap) GetAll(userID string) ([]*Client, bool) {
+ u.lock.RLock()
+ defer u.lock.RUnlock()
+ result, ok := u.data[userID]
+ if !ok {
+ return nil, false
+ }
+ return result.Clients, true
}
-func (u *UserMap) deleteClients(key string, clients []*Client) (isDeleteUser bool) {
- m := datautil.SliceToMapAny(clients, func(c *Client) (string, struct{}) {
- return c.ctx.GetRemoteAddr(), struct{}{}
- })
- allClients, existed := u.m.Load(key)
- if !existed {
- // If the key doesn't exist, return false.
- return false
+func (u *userMap) Get(userID string, platformID int) ([]*Client, bool, bool) {
+ u.lock.RLock()
+ defer u.lock.RUnlock()
+ result, ok := u.data[userID]
+ if !ok {
+ return nil, false, false
}
+ var clients []*Client
+ for _, client := range result.Clients {
+ if client.PlatformID == platformID {
+ clients = append(clients, client)
+ }
+ }
+ return clients, true, len(clients) > 0
+}
- // Filter out clients that are in the deleteMap.
- oldClients := allClients.([]*Client)
- var remainingClients []*Client
- for _, client := range oldClients {
- if _, shouldBeDeleted := m[client.ctx.GetRemoteAddr()]; !shouldBeDeleted {
- remainingClients = append(remainingClients, client)
+func (u *userMap) Set(userID string, client *Client) {
+ u.lock.Lock()
+ defer u.lock.Unlock()
+ result, ok := u.data[userID]
+ if ok {
+ result.Clients = append(result.Clients, client)
+ } else {
+ result = &UserPlatform{
+ Clients: []*Client{client},
}
+ u.data[userID] = result
}
+ u.push(client.UserID, result, nil)
+}
- // Update or delete the key based on the remaining clients.
- if len(remainingClients) == 0 {
- u.m.Delete(key)
- return true
+func (u *userMap) DeleteClients(userID string, clients []*Client) (isDeleteUser bool) {
+ if len(clients) == 0 {
+ return false
}
+ u.lock.Lock()
+ defer u.lock.Unlock()
+ result, ok := u.data[userID]
+ if !ok {
+ return false
+ }
+ offline := make([]int32, 0, len(clients))
+ deleteAddr := datautil.SliceSetAny(clients, func(client *Client) string {
+ return client.ctx.GetRemoteAddr()
+ })
+ tmp := result.Clients
+ result.Clients = result.Clients[:0]
+ for _, client := range tmp {
+ if _, delCli := deleteAddr[client.ctx.GetRemoteAddr()]; delCli {
+ offline = append(offline, int32(client.PlatformID))
+ } else {
+ result.Clients = append(result.Clients, client)
+ }
+ }
+ defer u.push(userID, result, offline)
+ if len(result.Clients) > 0 {
+ return false
+ }
+ delete(u.data, userID)
+ return true
+}
- u.m.Store(key, remainingClients)
- return false
+func (u *userMap) GetAllUserStatus(deadline time.Time, nowtime time.Time) []UserState {
+ u.lock.RLock()
+ defer u.lock.RUnlock()
+ result := make([]UserState, 0, len(u.data))
+ for userID, userPlatform := range u.data {
+ if userPlatform.Time.Before(deadline) {
+ continue
+ }
+ userPlatform.Time = nowtime
+ online := make([]int32, 0, len(userPlatform.Clients))
+ for _, client := range userPlatform.Clients {
+ online = append(online, int32(client.PlatformID))
+ }
+ result = append(result, UserState{UserID: userID, Online: online})
+ }
+ return result
}
-func (u *UserMap) DeleteAll(key string) {
- u.m.Delete(key)
+func (u *userMap) UserState() <-chan UserState {
+ return u.ch
}
diff --git a/internal/msggateway/n_ws_server.go b/internal/msggateway/ws_server.go
similarity index 89%
rename from internal/msggateway/n_ws_server.go
rename to internal/msggateway/ws_server.go
index defec16df..537b8c5f0 100644
--- a/internal/msggateway/n_ws_server.go
+++ b/internal/msggateway/ws_server.go
@@ -18,6 +18,7 @@ import (
"context"
"fmt"
"github.com/openimsdk/open-im-server/v3/pkg/common/webhook"
+ "github.com/openimsdk/open-im-server/v3/pkg/rpccache"
pbAuth "github.com/openimsdk/protocol/auth"
"github.com/openimsdk/tools/mcontext"
"net/http"
@@ -48,6 +49,7 @@ type LongConnServer interface {
KickUserConn(client *Client) error
UnRegister(c *Client)
SetKickHandlerInfo(i *kickHandler)
+ SubUserOnlineStatus(ctx context.Context, client *Client, data *Req) ([]byte, error)
Compressor
Encoder
MessageHandler
@@ -60,7 +62,9 @@ type WsServer struct {
registerChan chan *Client
unregisterChan chan *Client
kickHandlerChan chan *kickHandler
- clients *UserMap
+ clients UserMap
+ online *rpccache.OnlineCache
+ subscription *Subscription
clientPool sync.Pool
onlineUserNum atomic.Int64
onlineUserConnNum atomic.Int64
@@ -90,18 +94,18 @@ func (ws *WsServer) SetDiscoveryRegistry(disCov discovery.SvcDiscoveryRegistry,
ws.disCov = disCov
}
-func (ws *WsServer) SetUserOnlineStatus(ctx context.Context, client *Client, status int32) {
- err := ws.userClient.SetUserStatus(ctx, client.UserID, status, client.PlatformID)
- if err != nil {
- log.ZWarn(ctx, "SetUserStatus err", err)
- }
- switch status {
- case constant.Online:
- ws.webhookAfterUserOnline(ctx, &ws.msgGatewayConfig.WebhooksConfig.AfterUserOnline, client.UserID, client.PlatformID, client.IsBackground, client.ctx.GetConnID())
- case constant.Offline:
- ws.webhookAfterUserOffline(ctx, &ws.msgGatewayConfig.WebhooksConfig.AfterUserOffline, client.UserID, client.PlatformID, client.ctx.GetConnID())
- }
-}
+//func (ws *WsServer) SetUserOnlineStatus(ctx context.Context, client *Client, status int32) {
+// err := ws.userClient.SetUserStatus(ctx, client.UserID, status, client.PlatformID)
+// if err != nil {
+// log.ZWarn(ctx, "SetUserStatus err", err)
+// }
+// switch status {
+// case constant.Online:
+// ws.webhookAfterUserOnline(ctx, &ws.msgGatewayConfig.WebhooksConfig.AfterUserOnline, client.UserID, client.PlatformID, client.IsBackground, client.ctx.GetConnID())
+// case constant.Offline:
+// ws.webhookAfterUserOffline(ctx, &ws.msgGatewayConfig.WebhooksConfig.AfterUserOffline, client.UserID, client.PlatformID, client.ctx.GetConnID())
+// }
+//}
func (ws *WsServer) UnRegister(c *Client) {
ws.unregisterChan <- c
@@ -119,11 +123,13 @@ func (ws *WsServer) GetUserPlatformCons(userID string, platform int) ([]*Client,
return ws.clients.Get(userID, platform)
}
-func NewWsServer(msgGatewayConfig *Config, opts ...Option) (*WsServer, error) {
+func NewWsServer(msgGatewayConfig *Config, opts ...Option) *WsServer {
var config configs
for _, o := range opts {
o(&config)
}
+ //userRpcClient := rpcclient.NewUserRpcClient(client, config.Share.RpcRegisterName.User, config.Share.IMAdminUserID)
+
v := validator.New()
return &WsServer{
msgGatewayConfig: msgGatewayConfig,
@@ -141,10 +147,11 @@ func NewWsServer(msgGatewayConfig *Config, opts ...Option) (*WsServer, error) {
kickHandlerChan: make(chan *kickHandler, 1000),
validate: v,
clients: newUserMap(),
+ subscription: newSubscription(),
Compressor: NewGzipCompressor(),
Encoder: NewGobEncoder(),
webhookClient: webhook.NewWebhookClient(msgGatewayConfig.WebhooksConfig.URL),
- }, nil
+ }
}
func (ws *WsServer) Run(done chan error) error {
@@ -278,11 +285,11 @@ func (ws *WsServer) registerClient(client *Client) {
}()
}
- wg.Add(1)
- go func() {
- defer wg.Done()
- ws.SetUserOnlineStatus(client.ctx, client, constant.Online)
- }()
+ //wg.Add(1)
+ //go func() {
+ // defer wg.Done()
+ // ws.SetUserOnlineStatus(client.ctx, client, constant.Online)
+ //}()
wg.Wait()
@@ -309,7 +316,7 @@ func getRemoteAdders(client []*Client) string {
}
func (ws *WsServer) KickUserConn(client *Client) error {
- ws.clients.deleteClients(client.UserID, []*Client{client})
+ ws.clients.DeleteClients(client.UserID, []*Client{client})
return client.KickOnlineMessage()
}
@@ -325,7 +332,7 @@ func (ws *WsServer) multiTerminalLoginChecker(clientOK bool, oldClients []*Clien
if !clientOK {
return
}
- ws.clients.deleteClients(newClient.UserID, oldClients)
+ ws.clients.DeleteClients(newClient.UserID, oldClients)
for _, c := range oldClients {
err := c.KickOnlineMessage()
if err != nil {
@@ -345,13 +352,14 @@ func (ws *WsServer) multiTerminalLoginChecker(clientOK bool, oldClients []*Clien
func (ws *WsServer) unregisterClient(client *Client) {
defer ws.clientPool.Put(client)
- isDeleteUser := ws.clients.delete(client.UserID, client.ctx.GetRemoteAddr())
+ isDeleteUser := ws.clients.DeleteClients(client.UserID, []*Client{client})
if isDeleteUser {
ws.onlineUserNum.Add(-1)
prommetrics.OnlineUserGauge.Dec()
}
ws.onlineUserConnNum.Add(-1)
- ws.SetUserOnlineStatus(client.ctx, client, constant.Offline)
+ ws.subscription.DelClient(client)
+ //ws.SetUserOnlineStatus(client.ctx, client, constant.Offline)
log.ZInfo(client.ctx, "user offline", "close reason", client.closedErr, "online user Num",
ws.onlineUserNum.Load(), "online user conn Num",
ws.onlineUserConnNum.Load(),
diff --git a/internal/msgtransfer/init.go b/internal/msgtransfer/init.go
index 65d04f381..b4b2245eb 100644
--- a/internal/msgtransfer/init.go
+++ b/internal/msgtransfer/init.go
@@ -17,6 +17,7 @@ package msgtransfer
import (
"context"
"fmt"
+ "github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
"github.com/openimsdk/tools/db/mongoutil"
@@ -29,16 +30,12 @@ import (
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
kdisc "github.com/openimsdk/open-im-server/v3/pkg/common/discoveryregister"
- "github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
"github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/mw"
"github.com/openimsdk/tools/system/program"
- "github.com/prometheus/client_golang/prometheus"
- "github.com/prometheus/client_golang/prometheus/collectors"
- "github.com/prometheus/client_golang/prometheus/promhttp"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
)
@@ -82,12 +79,21 @@ func Start(ctx context.Context, index int, config *Config) error {
client.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"LoadBalancingPolicy": "%s"}`, "round_robin")))
msgModel := redis.NewMsgCache(rdb)
- seqModel := redis.NewSeqCache(rdb)
msgDocModel, err := mgo.NewMsgMongo(mgocli.GetDB())
if err != nil {
return err
}
- msgDatabase, err := controller.NewCommonMsgDatabase(msgDocModel, msgModel, seqModel, &config.KafkaConfig)
+ seqConversation, err := mgo.NewSeqConversationMongo(mgocli.GetDB())
+ if err != nil {
+ return err
+ }
+ seqConversationCache := redis.NewSeqConversationCacheRedis(rdb, seqConversation)
+ seqUser, err := mgo.NewSeqUserMongo(mgocli.GetDB())
+ if err != nil {
+ return err
+ }
+ seqUserCache := redis.NewSeqUserCacheRedis(rdb, seqUser)
+ msgDatabase, err := controller.NewCommonMsgDatabase(msgDocModel, msgModel, seqUserCache, seqConversationCache, &config.KafkaConfig)
if err != nil {
return err
}
@@ -130,14 +136,8 @@ func (m *MsgTransfer) Start(index int, config *Config) error {
netDone <- struct{}{}
return
}
- proreg := prometheus.NewRegistry()
- proreg.MustRegister(
- collectors.NewGoCollector(),
- )
- proreg.MustRegister(prommetrics.GetGrpcCusMetrics("Transfer", &config.Share)...)
- http.Handle("/metrics", promhttp.HandlerFor(proreg, promhttp.HandlerOpts{Registry: proreg}))
- err = http.ListenAndServe(fmt.Sprintf(":%d", prometheusPort), nil)
- if err != nil && err != http.ErrServerClosed {
+
+ if err := prommetrics.TransferInit(prometheusPort); err != nil && err != http.ErrServerClosed {
netErr = errs.WrapMsg(err, "prometheus start error", "prometheusPort", prometheusPort)
netDone <- struct{}{}
}
diff --git a/internal/push/offlinepush/fcm/push.go b/internal/push/offlinepush/fcm/push.go
index ec973008e..f015ca4e5 100644
--- a/internal/push/offlinepush/fcm/push.go
+++ b/internal/push/offlinepush/fcm/push.go
@@ -16,8 +16,11 @@ package fcm
import (
"context"
+ "fmt"
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options"
+ "github.com/openimsdk/tools/utils/httputil"
"path/filepath"
+ "strings"
firebase "firebase.google.com/go"
"firebase.google.com/go/messaging"
@@ -40,13 +43,25 @@ type Fcm struct {
// NewClient initializes a new FCM client using the Firebase Admin SDK.
// It requires the FCM service account credentials file located within the project's configuration directory.
-func NewClient(pushConf *config.Push, cache cache.ThirdCache) (*Fcm, error) {
- projectRoot, err := config.GetProjectRoot()
- if err != nil {
- return nil, err
+func NewClient(pushConf *config.Push, cache cache.ThirdCache, fcmConfigPath string) (*Fcm, error) {
+ var opt option.ClientOption
+ switch {
+ case len(pushConf.FCM.FilePath) != 0:
+ // with file path
+ credentialsFilePath := filepath.Join(fcmConfigPath, pushConf.FCM.FilePath)
+ opt = option.WithCredentialsFile(credentialsFilePath)
+ case len(pushConf.FCM.AuthURL) != 0:
+ // with authentication URL
+ client := httputil.NewHTTPClient(httputil.NewClientConfig())
+ resp, err := client.Get(pushConf.FCM.AuthURL)
+ if err != nil {
+ return nil, err
+ }
+ opt = option.WithCredentialsJSON(resp)
+ default:
+ return nil, errs.New("no FCM config").Wrap()
}
- credentialsFilePath := filepath.Join(projectRoot, "config", pushConf.FCM.ServiceAccount)
- opt := option.WithCredentialsFile(credentialsFilePath)
+
fcmApp, err := firebase.NewApp(context.Background(), nil, opt)
if err != nil {
return nil, errs.Wrap(err)
@@ -56,7 +71,6 @@ func NewClient(pushConf *config.Push, cache cache.ThirdCache) (*Fcm, error) {
if err != nil {
return nil, errs.Wrap(err)
}
-
return &Fcm{fcmMsgCli: fcmMsgClient, cache: cache}, nil
}
@@ -79,6 +93,8 @@ func (f *Fcm) Push(ctx context.Context, userIDs []string, title, content string,
notification.Body = content
notification.Title = title
var messages []*messaging.Message
+ var sendErrBuilder strings.Builder
+ var msgErrBuilder strings.Builder
for userID, personTokens := range allTokens {
apns := &messaging.APNSConfig{Payload: &messaging.APNSPayload{Aps: &messaging.Aps{Sound: opts.IOSPushSound}}}
messageCount := len(messages)
@@ -86,9 +102,21 @@ func (f *Fcm) Push(ctx context.Context, userIDs []string, title, content string,
response, err := f.fcmMsgCli.SendAll(ctx, messages)
if err != nil {
Fail = Fail + messageCount
+ // Record push error
+ sendErrBuilder.WriteString(err.Error())
+ sendErrBuilder.WriteByte('.')
} else {
Success = Success + response.SuccessCount
Fail = Fail + response.FailureCount
+ if response.FailureCount != 0 {
+ // Record message error
+ for i := range response.Responses {
+ if !response.Responses[i].Success {
+ msgErrBuilder.WriteString(response.Responses[i].Error.Error())
+ msgErrBuilder.WriteByte('.')
+ }
+ }
+ }
}
messages = messages[0:0]
}
@@ -134,5 +162,9 @@ func (f *Fcm) Push(ctx context.Context, userIDs []string, title, content string,
Fail = Fail + response.FailureCount
}
}
+ if Fail != 0 {
+ return errs.New(fmt.Sprintf("%d message send failed;send err:%s;message err:%s",
+ Fail, sendErrBuilder.String(), msgErrBuilder.String())).Wrap()
+ }
return nil
}
diff --git a/internal/push/offlinepush/offlinepusher.go b/internal/push/offlinepush/offlinepusher.go
index 8dc8a0bc6..9aa6625de 100644
--- a/internal/push/offlinepush/offlinepusher.go
+++ b/internal/push/offlinepush/offlinepusher.go
@@ -36,13 +36,13 @@ type OfflinePusher interface {
Push(ctx context.Context, userIDs []string, title, content string, opts *options.Opts) error
}
-func NewOfflinePusher(pushConf *config.Push, cache cache.ThirdCache) (OfflinePusher, error) {
+func NewOfflinePusher(pushConf *config.Push, cache cache.ThirdCache, fcmConfigPath string) (OfflinePusher, error) {
var offlinePusher OfflinePusher
switch pushConf.Enable {
case geTUI:
offlinePusher = getui.NewClient(pushConf, cache)
case firebase:
- return fcm.NewClient(pushConf, cache)
+ return fcm.NewClient(pushConf, cache, fcmConfigPath)
case jPush:
offlinePusher = jpush.NewClient(pushConf)
default:
diff --git a/internal/push/push.go b/internal/push/push.go
index c7e245dfe..1a04bbea2 100644
--- a/internal/push/push.go
+++ b/internal/push/push.go
@@ -29,6 +29,7 @@ type Config struct {
WebhooksConfig config.Webhooks
LocalCacheConfig config.LocalCache
Discovery config.Discovery
+ FcmConfigPath string
}
func (p pushServer) PushMsg(ctx context.Context, req *pbpush.PushMsgReq) (*pbpush.PushMsgResp, error) {
@@ -50,7 +51,7 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
return err
}
cacheModel := redis.NewThirdCache(rdb)
- offlinePusher, err := offlinepush.NewOfflinePusher(&config.RpcConfig, cacheModel)
+ offlinePusher, err := offlinepush.NewOfflinePusher(&config.RpcConfig, cacheModel, config.FcmConfigPath)
if err != nil {
return err
}
diff --git a/internal/push/push_handler.go b/internal/push/push_handler.go
index 03c299b7a..ed87b3929 100644
--- a/internal/push/push_handler.go
+++ b/internal/push/push_handler.go
@@ -28,6 +28,7 @@ import (
"github.com/openimsdk/open-im-server/v3/pkg/util/conversationutil"
"github.com/openimsdk/protocol/constant"
pbchat "github.com/openimsdk/protocol/msg"
+ "github.com/openimsdk/protocol/msggateway"
pbpush "github.com/openimsdk/protocol/push"
"github.com/openimsdk/protocol/sdkws"
"github.com/openimsdk/tools/discovery"
@@ -45,6 +46,7 @@ type ConsumerHandler struct {
pushConsumerGroup *kafka.MConsumerGroup
offlinePusher offlinepush.OfflinePusher
onlinePusher OnlinePusher
+ onlineCache *rpccache.OnlineCache
groupLocalCache *rpccache.GroupLocalCache
conversationLocalCache *rpccache.ConversationLocalCache
msgRpcClient rpcclient.MessageRpcClient
@@ -63,16 +65,17 @@ func NewConsumerHandler(config *Config, offlinePusher offlinepush.OfflinePusher,
if err != nil {
return nil, err
}
+ userRpcClient := rpcclient.NewUserRpcClient(client, config.Share.RpcRegisterName.User, config.Share.IMAdminUserID)
consumerHandler.offlinePusher = offlinePusher
consumerHandler.onlinePusher = NewOnlinePusher(client, config)
consumerHandler.groupRpcClient = rpcclient.NewGroupRpcClient(client, config.Share.RpcRegisterName.Group)
consumerHandler.groupLocalCache = rpccache.NewGroupLocalCache(consumerHandler.groupRpcClient, &config.LocalCacheConfig, rdb)
consumerHandler.msgRpcClient = rpcclient.NewMessageRpcClient(client, config.Share.RpcRegisterName.Msg)
consumerHandler.conversationRpcClient = rpcclient.NewConversationRpcClient(client, config.Share.RpcRegisterName.Conversation)
- consumerHandler.conversationLocalCache = rpccache.NewConversationLocalCache(consumerHandler.conversationRpcClient,
- &config.LocalCacheConfig, rdb)
+ consumerHandler.conversationLocalCache = rpccache.NewConversationLocalCache(consumerHandler.conversationRpcClient, &config.LocalCacheConfig, rdb)
consumerHandler.webhookClient = webhook.NewWebhookClient(config.WebhooksConfig.URL)
consumerHandler.config = config
+ consumerHandler.onlineCache = rpccache.NewOnlineCache(userRpcClient, consumerHandler.groupLocalCache, rdb, nil)
return &consumerHandler, nil
}
@@ -125,12 +128,12 @@ func (c *ConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSession, claim s
}
// Push2User Suitable for two types of conversations, one is SingleChatType and the other is NotificationChatType.
-func (c *ConsumerHandler) Push2User(ctx context.Context, userIDs []string, msg *sdkws.MsgData) error {
+func (c *ConsumerHandler) Push2User(ctx context.Context, userIDs []string, msg *sdkws.MsgData) (err error) {
log.ZDebug(ctx, "Get msg from msg_transfer And push msg", "userIDs", userIDs, "msg", msg.String())
if err := c.webhookBeforeOnlinePush(ctx, &c.config.WebhooksConfig.BeforeOnlinePush, userIDs, msg); err != nil {
return err
}
- wsResults, err := c.onlinePusher.GetConnsAndOnlinePush(ctx, msg, userIDs)
+ wsResults, err := c.GetConnsAndOnlinePush(ctx, msg, userIDs)
if err != nil {
return err
}
@@ -179,8 +182,40 @@ func (c *ConsumerHandler) shouldPushOffline(_ context.Context, msg *sdkws.MsgDat
return true
}
+func (c *ConsumerHandler) GetConnsAndOnlinePush(ctx context.Context, msg *sdkws.MsgData, pushToUserIDs []string) ([]*msggateway.SingleMsgToUserResults, error) {
+ var (
+ onlineUserIDs []string
+ offlineUserIDs []string
+ )
+ for _, userID := range pushToUserIDs {
+ online, err := c.onlineCache.GetUserOnline(ctx, userID)
+ if err != nil {
+ return nil, err
+ }
+ if online {
+ onlineUserIDs = append(onlineUserIDs, userID)
+ } else {
+ offlineUserIDs = append(offlineUserIDs, userID)
+ }
+ }
+ var result []*msggateway.SingleMsgToUserResults
+ if len(onlineUserIDs) > 0 {
+ var err error
+ result, err = c.onlinePusher.GetConnsAndOnlinePush(ctx, msg, pushToUserIDs)
+ if err != nil {
+ return nil, err
+ }
+ }
+ for _, userID := range offlineUserIDs {
+ result = append(result, &msggateway.SingleMsgToUserResults{
+ UserID: userID,
+ })
+ }
+ return result, nil
+}
+
func (c *ConsumerHandler) Push2Group(ctx context.Context, groupID string, msg *sdkws.MsgData) (err error) {
- log.ZDebug(ctx, "Get super group msg from msg_transfer and push msg", "msg", msg.String(), "groupID", groupID)
+ log.ZDebug(ctx, "Get group msg from msg_transfer and push msg", "msg", msg.String(), "groupID", groupID)
var pushToUserIDs []string
if err = c.webhookBeforeGroupOnlinePush(ctx, &c.config.WebhooksConfig.BeforeGroupOnlinePush, groupID, msg,
&pushToUserIDs); err != nil {
@@ -192,7 +227,7 @@ func (c *ConsumerHandler) Push2Group(ctx context.Context, groupID string, msg *s
return err
}
- wsResults, err := c.onlinePusher.GetConnsAndOnlinePush(ctx, msg, pushToUserIDs)
+ wsResults, err := c.GetConnsAndOnlinePush(ctx, msg, pushToUserIDs)
if err != nil {
return err
}
diff --git a/internal/rpc/auth/auth.go b/internal/rpc/auth/auth.go
index 6270b39b3..804375e4f 100644
--- a/internal/rpc/auth/auth.go
+++ b/internal/rpc/auth/auth.go
@@ -16,6 +16,7 @@ package auth
import (
"context"
+
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
redis2 "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
"github.com/openimsdk/tools/db/redisutil"
@@ -32,7 +33,6 @@ import (
"github.com/openimsdk/tools/discovery"
"github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log"
- "github.com/openimsdk/tools/mcontext"
"github.com/openimsdk/tools/tokenverify"
"google.golang.org/grpc"
)
@@ -61,7 +61,7 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
userRpcClient: &userRpcClient,
RegisterCenter: client,
authDatabase: controller.NewAuthDatabase(
- redis2.NewTokenCacheModel(rdb),
+ redis2.NewTokenCacheModel(rdb, config.RpcConfig.TokenPolicy.Expire),
config.Share.Secret,
config.RpcConfig.TokenPolicy.Expire,
),
@@ -153,21 +153,19 @@ func (s *authServer) ForceLogout(ctx context.Context, req *pbauth.ForceLogoutReq
if err := authverify.CheckAdmin(ctx, s.config.Share.IMAdminUserID); err != nil {
return nil, err
}
- if err := s.forceKickOff(ctx, req.UserID, req.PlatformID, mcontext.GetOperationID(ctx)); err != nil {
+ if err := s.forceKickOff(ctx, req.UserID, req.PlatformID); err != nil {
return nil, err
}
return &pbauth.ForceLogoutResp{}, nil
}
-func (s *authServer) forceKickOff(ctx context.Context, userID string, platformID int32, operationID string) error {
+func (s *authServer) forceKickOff(ctx context.Context, userID string, platformID int32) error {
conns, err := s.RegisterCenter.GetConns(ctx, s.config.Share.RpcRegisterName.MessageGateway)
if err != nil {
return err
}
for _, v := range conns {
log.ZDebug(ctx, "forceKickOff", "conn", v.Target())
- }
- for _, v := range conns {
client := msggateway.NewMsgGatewayClient(v)
kickReq := &msggateway.KickUserOfflineReq{KickUserIDList: []string{userID}, PlatformID: platformID}
_, err := client.KickUserOffline(ctx, kickReq)
@@ -175,8 +173,24 @@ func (s *authServer) forceKickOff(ctx context.Context, userID string, platformID
log.ZError(ctx, "forceKickOff", err, "kickReq", kickReq)
}
}
+
+ m, err := s.authDatabase.GetTokensWithoutError(ctx, userID, int(platformID))
+ if err != nil && err != redis.Nil {
+ return err
+ }
+ for k := range m {
+ m[k] = constant.KickedToken
+ log.ZDebug(ctx, "set token map is ", "token map", m, "userID",
+ userID, "token", k)
+
+ err = s.authDatabase.SetTokenMapByUidPid(ctx, userID, int(platformID), m)
+ if err != nil {
+ return err
+ }
+ }
return nil
}
+
func (s *authServer) InvalidateToken(ctx context.Context, req *pbauth.InvalidateTokenReq) (*pbauth.InvalidateTokenResp, error) {
m, err := s.authDatabase.GetTokensWithoutError(ctx, req.UserID, int(req.PlatformID))
if err != nil && err != redis.Nil {
diff --git a/internal/rpc/conversation/conversaion.go b/internal/rpc/conversation/conversaion.go
index df9267ae0..3047c376b 100644
--- a/internal/rpc/conversation/conversaion.go
+++ b/internal/rpc/conversation/conversaion.go
@@ -184,13 +184,23 @@ func (c *conversationServer) GetAllConversations(ctx context.Context, req *pbcon
}
func (c *conversationServer) GetConversations(ctx context.Context, req *pbconversation.GetConversationsReq) (*pbconversation.GetConversationsResp, error) {
- conversations, err := c.conversationDatabase.FindConversations(ctx, req.OwnerUserID, req.ConversationIDs)
+ conversations, err := c.getConversations(ctx, req.OwnerUserID, req.ConversationIDs)
+ if err != nil {
+ return nil, err
+ }
+ return &pbconversation.GetConversationsResp{
+ Conversations: conversations,
+ }, nil
+}
+
+func (c *conversationServer) getConversations(ctx context.Context, ownerUserID string, conversationIDs []string) ([]*pbconversation.Conversation, error) {
+ conversations, err := c.conversationDatabase.FindConversations(ctx, ownerUserID, conversationIDs)
if err != nil {
return nil, err
}
resp := &pbconversation.GetConversationsResp{Conversations: []*pbconversation.Conversation{}}
resp.Conversations = convert.ConversationsDB2Pb(conversations)
- return resp, nil
+ return convert.ConversationsDB2Pb(conversations), nil
}
func (c *conversationServer) SetConversation(ctx context.Context, req *pbconversation.SetConversationReq) (*pbconversation.SetConversationResp, error) {
@@ -581,3 +591,14 @@ func (c *conversationServer) UpdateConversation(ctx context.Context, req *pbconv
}
return &pbconversation.UpdateConversationResp{}, nil
}
+
+func (c *conversationServer) GetOwnerConversation(ctx context.Context, req *pbconversation.GetOwnerConversationReq) (*pbconversation.GetOwnerConversationResp, error) {
+ total, conversations, err := c.conversationDatabase.GetOwnerConversation(ctx, req.UserID, req.Pagination)
+ if err != nil {
+ return nil, err
+ }
+ return &pbconversation.GetOwnerConversationResp{
+ Total: total,
+ Conversations: convert.ConversationsDB2Pb(conversations),
+ }, nil
+}
diff --git a/internal/rpc/conversation/sync.go b/internal/rpc/conversation/sync.go
new file mode 100644
index 000000000..ad88b2bbd
--- /dev/null
+++ b/internal/rpc/conversation/sync.go
@@ -0,0 +1,56 @@
+package conversation
+
+import (
+ "context"
+
+ "github.com/openimsdk/open-im-server/v3/internal/rpc/incrversion"
+ "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
+ "github.com/openimsdk/open-im-server/v3/pkg/util/hashutil"
+ "github.com/openimsdk/protocol/conversation"
+)
+
+func (c *conversationServer) GetFullOwnerConversationIDs(ctx context.Context, req *conversation.GetFullOwnerConversationIDsReq) (*conversation.GetFullOwnerConversationIDsResp, error) {
+ vl, err := c.conversationDatabase.FindMaxConversationUserVersionCache(ctx, req.UserID)
+ if err != nil {
+ return nil, err
+ }
+ conversationIDs, err := c.conversationDatabase.GetConversationIDs(ctx, req.UserID)
+ if err != nil {
+ return nil, err
+ }
+ idHash := hashutil.IdHash(conversationIDs)
+ if req.IdHash == idHash {
+ conversationIDs = nil
+ }
+ return &conversation.GetFullOwnerConversationIDsResp{
+ Version: idHash,
+ VersionID: vl.ID.Hex(),
+ Equal: req.IdHash == idHash,
+ ConversationIDs: conversationIDs,
+ }, nil
+}
+
+func (c *conversationServer) GetIncrementalConversation(ctx context.Context, req *conversation.GetIncrementalConversationReq) (*conversation.GetIncrementalConversationResp, error) {
+ opt := incrversion.Option[*conversation.Conversation, conversation.GetIncrementalConversationResp]{
+ Ctx: ctx,
+ VersionKey: req.UserID,
+ VersionID: req.VersionID,
+ VersionNumber: req.Version,
+ Version: c.conversationDatabase.FindConversationUserVersion,
+ CacheMaxVersion: c.conversationDatabase.FindMaxConversationUserVersionCache,
+ Find: func(ctx context.Context, conversationIDs []string) ([]*conversation.Conversation, error) {
+ return c.getConversations(ctx, req.UserID, conversationIDs)
+ },
+ Resp: func(version *model.VersionLog, delIDs []string, insertList, updateList []*conversation.Conversation, full bool) *conversation.GetIncrementalConversationResp {
+ return &conversation.GetIncrementalConversationResp{
+ VersionID: version.ID.Hex(),
+ Version: uint64(version.Version),
+ Full: full,
+ Delete: delIDs,
+ Insert: insertList,
+ Update: updateList,
+ }
+ },
+ }
+ return opt.Build()
+}
diff --git a/internal/rpc/friend/black.go b/internal/rpc/friend/black.go
index caec08b7a..218d1e7f8 100644
--- a/internal/rpc/friend/black.go
+++ b/internal/rpc/friend/black.go
@@ -16,16 +16,17 @@ package friend
import (
"context"
- "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
"time"
+ "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
+
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
"github.com/openimsdk/open-im-server/v3/pkg/common/convert"
- pbfriend "github.com/openimsdk/protocol/friend"
+ "github.com/openimsdk/protocol/relation"
"github.com/openimsdk/tools/mcontext"
)
-func (s *friendServer) GetPaginationBlacks(ctx context.Context, req *pbfriend.GetPaginationBlacksReq) (resp *pbfriend.GetPaginationBlacksResp, err error) {
+func (s *friendServer) GetPaginationBlacks(ctx context.Context, req *relation.GetPaginationBlacksReq) (resp *relation.GetPaginationBlacksResp, err error) {
if err := s.userRpcClient.Access(ctx, req.UserID); err != nil {
return nil, err
}
@@ -33,7 +34,7 @@ func (s *friendServer) GetPaginationBlacks(ctx context.Context, req *pbfriend.Ge
if err != nil {
return nil, err
}
- resp = &pbfriend.GetPaginationBlacksResp{}
+ resp = &relation.GetPaginationBlacksResp{}
resp.Blacks, err = convert.BlackDB2Pb(ctx, blacks, s.userRpcClient.GetUsersInfoMap)
if err != nil {
return nil, err
@@ -42,18 +43,18 @@ func (s *friendServer) GetPaginationBlacks(ctx context.Context, req *pbfriend.Ge
return resp, nil
}
-func (s *friendServer) IsBlack(ctx context.Context, req *pbfriend.IsBlackReq) (*pbfriend.IsBlackResp, error) {
+func (s *friendServer) IsBlack(ctx context.Context, req *relation.IsBlackReq) (*relation.IsBlackResp, error) {
in1, in2, err := s.blackDatabase.CheckIn(ctx, req.UserID1, req.UserID2)
if err != nil {
return nil, err
}
- resp := &pbfriend.IsBlackResp{}
+ resp := &relation.IsBlackResp{}
resp.InUser1Blacks = in1
resp.InUser2Blacks = in2
return resp, nil
}
-func (s *friendServer) RemoveBlack(ctx context.Context, req *pbfriend.RemoveBlackReq) (*pbfriend.RemoveBlackResp, error) {
+func (s *friendServer) RemoveBlack(ctx context.Context, req *relation.RemoveBlackReq) (*relation.RemoveBlackResp, error) {
if err := s.userRpcClient.Access(ctx, req.OwnerUserID); err != nil {
return nil, err
}
@@ -64,10 +65,10 @@ func (s *friendServer) RemoveBlack(ctx context.Context, req *pbfriend.RemoveBlac
s.notificationSender.BlackDeletedNotification(ctx, req)
- return &pbfriend.RemoveBlackResp{}, nil
+ return &relation.RemoveBlackResp{}, nil
}
-func (s *friendServer) AddBlack(ctx context.Context, req *pbfriend.AddBlackReq) (*pbfriend.AddBlackResp, error) {
+func (s *friendServer) AddBlack(ctx context.Context, req *relation.AddBlackReq) (*relation.AddBlackResp, error) {
if err := authverify.CheckAccessV3(ctx, req.OwnerUserID, s.config.Share.IMAdminUserID); err != nil {
return nil, err
}
@@ -87,5 +88,5 @@ func (s *friendServer) AddBlack(ctx context.Context, req *pbfriend.AddBlackReq)
return nil, err
}
s.notificationSender.BlackAddedNotification(ctx, req)
- return &pbfriend.AddBlackResp{}, nil
+ return &relation.AddBlackResp{}, nil
}
diff --git a/internal/rpc/friend/callback.go b/internal/rpc/friend/callback.go
index 0610cdb78..746ad21fa 100644
--- a/internal/rpc/friend/callback.go
+++ b/internal/rpc/friend/callback.go
@@ -16,14 +16,15 @@ package friend
import (
"context"
+
"github.com/openimsdk/open-im-server/v3/pkg/common/webhook"
cbapi "github.com/openimsdk/open-im-server/v3/pkg/callbackstruct"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
- pbfriend "github.com/openimsdk/protocol/friend"
+ "github.com/openimsdk/protocol/relation"
)
-func (s *friendServer) webhookAfterDeleteFriend(ctx context.Context, after *config.AfterConfig, req *pbfriend.DeleteFriendReq) {
+func (s *friendServer) webhookAfterDeleteFriend(ctx context.Context, after *config.AfterConfig, req *relation.DeleteFriendReq) {
cbReq := &cbapi.CallbackAfterDeleteFriendReq{
CallbackCommand: cbapi.CallbackAfterDeleteFriendCommand,
OwnerUserID: req.OwnerUserID,
@@ -32,7 +33,7 @@ func (s *friendServer) webhookAfterDeleteFriend(ctx context.Context, after *conf
s.webhookClient.AsyncPost(ctx, cbReq.GetCallbackCommand(), cbReq, &cbapi.CallbackAfterDeleteFriendResp{}, after)
}
-func (s *friendServer) webhookBeforeAddFriend(ctx context.Context, before *config.BeforeConfig, req *pbfriend.ApplyToAddFriendReq) error {
+func (s *friendServer) webhookBeforeAddFriend(ctx context.Context, before *config.BeforeConfig, req *relation.ApplyToAddFriendReq) error {
return webhook.WithCondition(ctx, before, func(ctx context.Context) error {
cbReq := &cbapi.CallbackBeforeAddFriendReq{
CallbackCommand: cbapi.CallbackBeforeAddFriendCommand,
@@ -50,7 +51,7 @@ func (s *friendServer) webhookBeforeAddFriend(ctx context.Context, before *confi
})
}
-func (s *friendServer) webhookAfterAddFriend(ctx context.Context, after *config.AfterConfig, req *pbfriend.ApplyToAddFriendReq) {
+func (s *friendServer) webhookAfterAddFriend(ctx context.Context, after *config.AfterConfig, req *relation.ApplyToAddFriendReq) {
cbReq := &cbapi.CallbackAfterAddFriendReq{
CallbackCommand: cbapi.CallbackAfterAddFriendCommand,
FromUserID: req.FromUserID,
@@ -61,8 +62,7 @@ func (s *friendServer) webhookAfterAddFriend(ctx context.Context, after *config.
s.webhookClient.AsyncPost(ctx, cbReq.GetCallbackCommand(), cbReq, resp, after)
}
-func (s *friendServer) webhookAfterSetFriendRemark(ctx context.Context, after *config.AfterConfig, req *pbfriend.SetFriendRemarkReq) {
-
+func (s *friendServer) webhookAfterSetFriendRemark(ctx context.Context, after *config.AfterConfig, req *relation.SetFriendRemarkReq) {
cbReq := &cbapi.CallbackAfterSetFriendRemarkReq{
CallbackCommand: cbapi.CallbackAfterSetFriendRemarkCommand,
OwnerUserID: req.OwnerUserID,
@@ -73,7 +73,7 @@ func (s *friendServer) webhookAfterSetFriendRemark(ctx context.Context, after *c
s.webhookClient.AsyncPost(ctx, cbReq.GetCallbackCommand(), cbReq, resp, after)
}
-func (s *friendServer) webhookAfterImportFriends(ctx context.Context, after *config.AfterConfig, req *pbfriend.ImportFriendReq) {
+func (s *friendServer) webhookAfterImportFriends(ctx context.Context, after *config.AfterConfig, req *relation.ImportFriendReq) {
cbReq := &cbapi.CallbackAfterImportFriendsReq{
CallbackCommand: cbapi.CallbackAfterImportFriendsCommand,
OwnerUserID: req.OwnerUserID,
@@ -83,7 +83,7 @@ func (s *friendServer) webhookAfterImportFriends(ctx context.Context, after *con
s.webhookClient.AsyncPost(ctx, cbReq.GetCallbackCommand(), cbReq, resp, after)
}
-func (s *friendServer) webhookAfterRemoveBlack(ctx context.Context, after *config.AfterConfig, req *pbfriend.RemoveBlackReq) {
+func (s *friendServer) webhookAfterRemoveBlack(ctx context.Context, after *config.AfterConfig, req *relation.RemoveBlackReq) {
cbReq := &cbapi.CallbackAfterRemoveBlackReq{
CallbackCommand: cbapi.CallbackAfterRemoveBlackCommand,
OwnerUserID: req.OwnerUserID,
@@ -93,7 +93,7 @@ func (s *friendServer) webhookAfterRemoveBlack(ctx context.Context, after *confi
s.webhookClient.AsyncPost(ctx, cbReq.GetCallbackCommand(), cbReq, resp, after)
}
-func (s *friendServer) webhookBeforeSetFriendRemark(ctx context.Context, before *config.BeforeConfig, req *pbfriend.SetFriendRemarkReq) error {
+func (s *friendServer) webhookBeforeSetFriendRemark(ctx context.Context, before *config.BeforeConfig, req *relation.SetFriendRemarkReq) error {
return webhook.WithCondition(ctx, before, func(ctx context.Context) error {
cbReq := &cbapi.CallbackBeforeSetFriendRemarkReq{
CallbackCommand: cbapi.CallbackBeforeSetFriendRemarkCommand,
@@ -112,7 +112,7 @@ func (s *friendServer) webhookBeforeSetFriendRemark(ctx context.Context, before
})
}
-func (s *friendServer) webhookBeforeAddBlack(ctx context.Context, before *config.BeforeConfig, req *pbfriend.AddBlackReq) error {
+func (s *friendServer) webhookBeforeAddBlack(ctx context.Context, before *config.BeforeConfig, req *relation.AddBlackReq) error {
return webhook.WithCondition(ctx, before, func(ctx context.Context) error {
cbReq := &cbapi.CallbackBeforeAddBlackReq{
CallbackCommand: cbapi.CallbackBeforeAddBlackCommand,
@@ -124,7 +124,7 @@ func (s *friendServer) webhookBeforeAddBlack(ctx context.Context, before *config
})
}
-func (s *friendServer) webhookBeforeAddFriendAgree(ctx context.Context, before *config.BeforeConfig, req *pbfriend.RespondFriendApplyReq) error {
+func (s *friendServer) webhookBeforeAddFriendAgree(ctx context.Context, before *config.BeforeConfig, req *relation.RespondFriendApplyReq) error {
return webhook.WithCondition(ctx, before, func(ctx context.Context) error {
cbReq := &cbapi.CallbackBeforeAddFriendAgreeReq{
CallbackCommand: cbapi.CallbackBeforeAddFriendAgreeCommand,
@@ -138,7 +138,7 @@ func (s *friendServer) webhookBeforeAddFriendAgree(ctx context.Context, before *
})
}
-func (s *friendServer) webhookBeforeImportFriends(ctx context.Context, before *config.BeforeConfig, req *pbfriend.ImportFriendReq) error {
+func (s *friendServer) webhookBeforeImportFriends(ctx context.Context, before *config.BeforeConfig, req *relation.ImportFriendReq) error {
return webhook.WithCondition(ctx, before, func(ctx context.Context) error {
cbReq := &cbapi.CallbackBeforeImportFriendsReq{
CallbackCommand: cbapi.CallbackBeforeImportFriendsCommand,
diff --git a/internal/rpc/friend/friend.go b/internal/rpc/friend/friend.go
index 8b2dea995..bdb786bca 100644
--- a/internal/rpc/friend/friend.go
+++ b/internal/rpc/friend/friend.go
@@ -16,6 +16,8 @@ package friend
import (
"context"
+ "github.com/openimsdk/tools/mq/memamq"
+
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
@@ -30,7 +32,7 @@ import (
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
"github.com/openimsdk/protocol/constant"
- pbfriend "github.com/openimsdk/protocol/friend"
+ "github.com/openimsdk/protocol/relation"
"github.com/openimsdk/protocol/sdkws"
"github.com/openimsdk/tools/db/mongoutil"
"github.com/openimsdk/tools/discovery"
@@ -40,7 +42,7 @@ import (
)
type friendServer struct {
- friendDatabase controller.FriendDatabase
+ db controller.FriendDatabase
blackDatabase controller.BlackDatabase
userRpcClient *rpcclient.UserRpcClient
notificationSender *FriendNotificationSender
@@ -48,13 +50,14 @@ type friendServer struct {
RegisterCenter discovery.SvcDiscoveryRegistry
config *Config
webhookClient *webhook.Client
+ queue *memamq.MemoryQueue
}
type Config struct {
RpcConfig config.Friend
RedisConfig config.Redis
MongodbConfig config.Mongo
- //ZookeeperConfig config.ZooKeeper
+ // ZookeeperConfig config.ZooKeeper
NotificationConfig config.Notification
Share config.Share
WebhooksConfig config.Webhooks
@@ -100,8 +103,8 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
localcache.InitLocalCache(&config.LocalCacheConfig)
// Register Friend server with refactored MongoDB and Redis integrations
- pbfriend.RegisterFriendServer(server, &friendServer{
- friendDatabase: controller.NewFriendDatabase(
+ relation.RegisterFriendServer(server, &friendServer{
+ db: controller.NewFriendDatabase(
friendMongoDB,
friendRequestMongoDB,
redis.NewFriendCacheRedis(rdb, &config.LocalCacheConfig, friendMongoDB, redis.GetRocksCacheOptions()),
@@ -117,14 +120,14 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
conversationRpcClient: rpcclient.NewConversationRpcClient(client, config.Share.RpcRegisterName.Conversation),
config: config,
webhookClient: webhook.NewWebhookClient(config.WebhooksConfig.URL),
+ queue: memamq.NewMemoryQueue(128, 1024*8),
})
-
return nil
}
// ok.
-func (s *friendServer) ApplyToAddFriend(ctx context.Context, req *pbfriend.ApplyToAddFriendReq) (resp *pbfriend.ApplyToAddFriendResp, err error) {
- resp = &pbfriend.ApplyToAddFriendResp{}
+func (s *friendServer) ApplyToAddFriend(ctx context.Context, req *relation.ApplyToAddFriendReq) (resp *relation.ApplyToAddFriendResp, err error) {
+ resp = &relation.ApplyToAddFriendResp{}
if err := authverify.CheckAccessV3(ctx, req.FromUserID, s.config.Share.IMAdminUserID); err != nil {
return nil, err
}
@@ -138,14 +141,14 @@ func (s *friendServer) ApplyToAddFriend(ctx context.Context, req *pbfriend.Apply
return nil, err
}
- in1, in2, err := s.friendDatabase.CheckIn(ctx, req.FromUserID, req.ToUserID)
+ in1, in2, err := s.db.CheckIn(ctx, req.FromUserID, req.ToUserID)
if err != nil {
return nil, err
}
if in1 && in2 {
return nil, servererrs.ErrRelationshipAlready.WrapMsg("already friends has f")
}
- if err = s.friendDatabase.AddFriendRequest(ctx, req.FromUserID, req.ToUserID, req.ReqMsg, req.Ex); err != nil {
+ if err = s.db.AddFriendRequest(ctx, req.FromUserID, req.ToUserID, req.ReqMsg, req.Ex); err != nil {
return nil, err
}
s.notificationSender.FriendApplicationAddNotification(ctx, req)
@@ -154,7 +157,7 @@ func (s *friendServer) ApplyToAddFriend(ctx context.Context, req *pbfriend.Apply
}
// ok.
-func (s *friendServer) ImportFriends(ctx context.Context, req *pbfriend.ImportFriendReq) (resp *pbfriend.ImportFriendResp, err error) {
+func (s *friendServer) ImportFriends(ctx context.Context, req *relation.ImportFriendReq) (resp *relation.ImportFriendResp, err error) {
if err := authverify.CheckAdmin(ctx, s.config.Share.IMAdminUserID); err != nil {
return nil, err
}
@@ -172,11 +175,11 @@ func (s *friendServer) ImportFriends(ctx context.Context, req *pbfriend.ImportFr
return nil, err
}
- if err := s.friendDatabase.BecomeFriends(ctx, req.OwnerUserID, req.FriendUserIDs, constant.BecomeFriendByImport); err != nil {
+ if err := s.db.BecomeFriends(ctx, req.OwnerUserID, req.FriendUserIDs, constant.BecomeFriendByImport); err != nil {
return nil, err
}
for _, userID := range req.FriendUserIDs {
- s.notificationSender.FriendApplicationAgreedNotification(ctx, &pbfriend.RespondFriendApplyReq{
+ s.notificationSender.FriendApplicationAgreedNotification(ctx, &relation.RespondFriendApplyReq{
FromUserID: req.OwnerUserID,
ToUserID: userID,
HandleResult: constant.FriendResponseAgree,
@@ -184,12 +187,12 @@ func (s *friendServer) ImportFriends(ctx context.Context, req *pbfriend.ImportFr
}
s.webhookAfterImportFriends(ctx, &s.config.WebhooksConfig.AfterImportFriends, req)
- return &pbfriend.ImportFriendResp{}, nil
+ return &relation.ImportFriendResp{}, nil
}
// ok.
-func (s *friendServer) RespondFriendApply(ctx context.Context, req *pbfriend.RespondFriendApplyReq) (resp *pbfriend.RespondFriendApplyResp, err error) {
- resp = &pbfriend.RespondFriendApplyResp{}
+func (s *friendServer) RespondFriendApply(ctx context.Context, req *relation.RespondFriendApplyReq) (resp *relation.RespondFriendApplyResp, err error) {
+ resp = &relation.RespondFriendApplyResp{}
if err := authverify.CheckAccessV3(ctx, req.ToUserID, s.config.Share.IMAdminUserID); err != nil {
return nil, err
}
@@ -204,7 +207,7 @@ func (s *friendServer) RespondFriendApply(ctx context.Context, req *pbfriend.Res
if err := s.webhookBeforeAddFriendAgree(ctx, &s.config.WebhooksConfig.BeforeAddFriendAgree, req); err != nil && err != servererrs.ErrCallbackContinue {
return nil, err
}
- err := s.friendDatabase.AgreeFriendRequest(ctx, &friendRequest)
+ err := s.db.AgreeFriendRequest(ctx, &friendRequest)
if err != nil {
return nil, err
}
@@ -212,7 +215,7 @@ func (s *friendServer) RespondFriendApply(ctx context.Context, req *pbfriend.Res
return resp, nil
}
if req.HandleResult == constant.FriendResponseRefuse {
- err := s.friendDatabase.RefuseFriendRequest(ctx, &friendRequest)
+ err := s.db.RefuseFriendRequest(ctx, &friendRequest)
if err != nil {
return nil, err
}
@@ -223,16 +226,16 @@ func (s *friendServer) RespondFriendApply(ctx context.Context, req *pbfriend.Res
}
// ok.
-func (s *friendServer) DeleteFriend(ctx context.Context, req *pbfriend.DeleteFriendReq) (resp *pbfriend.DeleteFriendResp, err error) {
- resp = &pbfriend.DeleteFriendResp{}
+func (s *friendServer) DeleteFriend(ctx context.Context, req *relation.DeleteFriendReq) (resp *relation.DeleteFriendResp, err error) {
+ resp = &relation.DeleteFriendResp{}
if err := s.userRpcClient.Access(ctx, req.OwnerUserID); err != nil {
return nil, err
}
- _, err = s.friendDatabase.FindFriendsWithError(ctx, req.OwnerUserID, []string{req.FriendUserID})
+ _, err = s.db.FindFriendsWithError(ctx, req.OwnerUserID, []string{req.FriendUserID})
if err != nil {
return nil, err
}
- if err := s.friendDatabase.Delete(ctx, req.OwnerUserID, []string{req.FriendUserID}); err != nil {
+ if err := s.db.Delete(ctx, req.OwnerUserID, []string{req.FriendUserID}); err != nil {
return nil, err
}
s.notificationSender.FriendDeletedNotification(ctx, req)
@@ -241,19 +244,19 @@ func (s *friendServer) DeleteFriend(ctx context.Context, req *pbfriend.DeleteFri
}
// ok.
-func (s *friendServer) SetFriendRemark(ctx context.Context, req *pbfriend.SetFriendRemarkReq) (resp *pbfriend.SetFriendRemarkResp, err error) {
+func (s *friendServer) SetFriendRemark(ctx context.Context, req *relation.SetFriendRemarkReq) (resp *relation.SetFriendRemarkResp, err error) {
if err = s.webhookBeforeSetFriendRemark(ctx, &s.config.WebhooksConfig.BeforeSetFriendRemark, req); err != nil && err != servererrs.ErrCallbackContinue {
return nil, err
}
- resp = &pbfriend.SetFriendRemarkResp{}
+ resp = &relation.SetFriendRemarkResp{}
if err := s.userRpcClient.Access(ctx, req.OwnerUserID); err != nil {
return nil, err
}
- _, err = s.friendDatabase.FindFriendsWithError(ctx, req.OwnerUserID, []string{req.FriendUserID})
+ _, err = s.db.FindFriendsWithError(ctx, req.OwnerUserID, []string{req.FriendUserID})
if err != nil {
return nil, err
}
- if err := s.friendDatabase.UpdateRemark(ctx, req.OwnerUserID, req.FriendUserID, req.Remark); err != nil {
+ if err := s.db.UpdateRemark(ctx, req.OwnerUserID, req.FriendUserID, req.Remark); err != nil {
return nil, err
}
s.webhookAfterSetFriendRemark(ctx, &s.config.WebhooksConfig.AfterSetFriendRemark, req)
@@ -262,29 +265,40 @@ func (s *friendServer) SetFriendRemark(ctx context.Context, req *pbfriend.SetFri
}
// ok.
-func (s *friendServer) GetDesignatedFriends(ctx context.Context, req *pbfriend.GetDesignatedFriendsReq) (resp *pbfriend.GetDesignatedFriendsResp, err error) {
- resp = &pbfriend.GetDesignatedFriendsResp{}
+func (s *friendServer) GetDesignatedFriends(ctx context.Context, req *relation.GetDesignatedFriendsReq) (resp *relation.GetDesignatedFriendsResp, err error) {
+ resp = &relation.GetDesignatedFriendsResp{}
if datautil.Duplicate(req.FriendUserIDs) {
return nil, errs.ErrArgs.WrapMsg("friend userID repeated")
}
- friends, err := s.friendDatabase.FindFriendsWithError(ctx, req.OwnerUserID, req.FriendUserIDs)
+ friends, err := s.getFriend(ctx, req.OwnerUserID, req.FriendUserIDs)
if err != nil {
return nil, err
}
- if resp.FriendsInfo, err = convert.FriendsDB2Pb(ctx, friends, s.userRpcClient.GetUsersInfoMap); err != nil {
+ return &relation.GetDesignatedFriendsResp{
+ FriendsInfo: friends,
+ }, nil
+}
+
+func (s *friendServer) getFriend(ctx context.Context, ownerUserID string, friendUserIDs []string) ([]*sdkws.FriendInfo, error) {
+ if len(friendUserIDs) == 0 {
+ return nil, nil
+ }
+ friends, err := s.db.FindFriendsWithError(ctx, ownerUserID, friendUserIDs)
+ if err != nil {
return nil, err
}
- return resp, nil
+ return convert.FriendsDB2Pb(ctx, friends, s.userRpcClient.GetUsersInfoMap)
}
// Get the list of friend requests sent out proactively.
func (s *friendServer) GetDesignatedFriendsApply(ctx context.Context,
- req *pbfriend.GetDesignatedFriendsApplyReq) (resp *pbfriend.GetDesignatedFriendsApplyResp, err error) {
- friendRequests, err := s.friendDatabase.FindBothFriendRequests(ctx, req.FromUserID, req.ToUserID)
+ req *relation.GetDesignatedFriendsApplyReq,
+) (resp *relation.GetDesignatedFriendsApplyResp, err error) {
+ friendRequests, err := s.db.FindBothFriendRequests(ctx, req.FromUserID, req.ToUserID)
if err != nil {
return nil, err
}
- resp = &pbfriend.GetDesignatedFriendsApplyResp{}
+ resp = &relation.GetDesignatedFriendsApplyResp{}
resp.FriendRequests, err = convert.FriendRequestDB2Pb(ctx, friendRequests, s.userRpcClient.GetUsersInfoMap)
if err != nil {
return nil, err
@@ -293,15 +307,15 @@ func (s *friendServer) GetDesignatedFriendsApply(ctx context.Context,
}
// Get received friend requests (i.e., those initiated by others).
-func (s *friendServer) GetPaginationFriendsApplyTo(ctx context.Context, req *pbfriend.GetPaginationFriendsApplyToReq) (resp *pbfriend.GetPaginationFriendsApplyToResp, err error) {
+func (s *friendServer) GetPaginationFriendsApplyTo(ctx context.Context, req *relation.GetPaginationFriendsApplyToReq) (resp *relation.GetPaginationFriendsApplyToResp, err error) {
if err := s.userRpcClient.Access(ctx, req.UserID); err != nil {
return nil, err
}
- total, friendRequests, err := s.friendDatabase.PageFriendRequestToMe(ctx, req.UserID, req.Pagination)
+ total, friendRequests, err := s.db.PageFriendRequestToMe(ctx, req.UserID, req.Pagination)
if err != nil {
return nil, err
}
- resp = &pbfriend.GetPaginationFriendsApplyToResp{}
+ resp = &relation.GetPaginationFriendsApplyToResp{}
resp.FriendRequests, err = convert.FriendRequestDB2Pb(ctx, friendRequests, s.userRpcClient.GetUsersInfoMap)
if err != nil {
return nil, err
@@ -310,12 +324,12 @@ func (s *friendServer) GetPaginationFriendsApplyTo(ctx context.Context, req *pbf
return resp, nil
}
-func (s *friendServer) GetPaginationFriendsApplyFrom(ctx context.Context, req *pbfriend.GetPaginationFriendsApplyFromReq) (resp *pbfriend.GetPaginationFriendsApplyFromResp, err error) {
- resp = &pbfriend.GetPaginationFriendsApplyFromResp{}
+func (s *friendServer) GetPaginationFriendsApplyFrom(ctx context.Context, req *relation.GetPaginationFriendsApplyFromReq) (resp *relation.GetPaginationFriendsApplyFromResp, err error) {
+ resp = &relation.GetPaginationFriendsApplyFromResp{}
if err := s.userRpcClient.Access(ctx, req.UserID); err != nil {
return nil, err
}
- total, friendRequests, err := s.friendDatabase.PageFriendRequestFromMe(ctx, req.UserID, req.Pagination)
+ total, friendRequests, err := s.db.PageFriendRequestFromMe(ctx, req.UserID, req.Pagination)
if err != nil {
return nil, err
}
@@ -328,24 +342,24 @@ func (s *friendServer) GetPaginationFriendsApplyFrom(ctx context.Context, req *p
}
// ok.
-func (s *friendServer) IsFriend(ctx context.Context, req *pbfriend.IsFriendReq) (resp *pbfriend.IsFriendResp, err error) {
- resp = &pbfriend.IsFriendResp{}
- resp.InUser1Friends, resp.InUser2Friends, err = s.friendDatabase.CheckIn(ctx, req.UserID1, req.UserID2)
+func (s *friendServer) IsFriend(ctx context.Context, req *relation.IsFriendReq) (resp *relation.IsFriendResp, err error) {
+ resp = &relation.IsFriendResp{}
+ resp.InUser1Friends, resp.InUser2Friends, err = s.db.CheckIn(ctx, req.UserID1, req.UserID2)
if err != nil {
return nil, err
}
return resp, nil
}
-func (s *friendServer) GetPaginationFriends(ctx context.Context, req *pbfriend.GetPaginationFriendsReq) (resp *pbfriend.GetPaginationFriendsResp, err error) {
+func (s *friendServer) GetPaginationFriends(ctx context.Context, req *relation.GetPaginationFriendsReq) (resp *relation.GetPaginationFriendsResp, err error) {
if err := s.userRpcClient.Access(ctx, req.UserID); err != nil {
return nil, err
}
- total, friends, err := s.friendDatabase.PageOwnerFriends(ctx, req.UserID, req.Pagination)
+ total, friends, err := s.db.PageOwnerFriends(ctx, req.UserID, req.Pagination)
if err != nil {
return nil, err
}
- resp = &pbfriend.GetPaginationFriendsResp{}
+ resp = &relation.GetPaginationFriendsResp{}
resp.FriendsInfo, err = convert.FriendsDB2Pb(ctx, friends, s.userRpcClient.GetUsersInfoMap)
if err != nil {
return nil, err
@@ -354,19 +368,19 @@ func (s *friendServer) GetPaginationFriends(ctx context.Context, req *pbfriend.G
return resp, nil
}
-func (s *friendServer) GetFriendIDs(ctx context.Context, req *pbfriend.GetFriendIDsReq) (resp *pbfriend.GetFriendIDsResp, err error) {
+func (s *friendServer) GetFriendIDs(ctx context.Context, req *relation.GetFriendIDsReq) (resp *relation.GetFriendIDsResp, err error) {
if err := s.userRpcClient.Access(ctx, req.UserID); err != nil {
return nil, err
}
- resp = &pbfriend.GetFriendIDsResp{}
- resp.FriendIDs, err = s.friendDatabase.FindFriendUserIDs(ctx, req.UserID)
+ resp = &relation.GetFriendIDsResp{}
+ resp.FriendIDs, err = s.db.FindFriendUserIDs(ctx, req.UserID)
if err != nil {
return nil, err
}
return resp, nil
}
-func (s *friendServer) GetSpecifiedFriendsInfo(ctx context.Context, req *pbfriend.GetSpecifiedFriendsInfoReq) (*pbfriend.GetSpecifiedFriendsInfoResp, error) {
+func (s *friendServer) GetSpecifiedFriendsInfo(ctx context.Context, req *relation.GetSpecifiedFriendsInfoReq) (*relation.GetSpecifiedFriendsInfoResp, error) {
if len(req.UserIDList) == 0 {
return nil, errs.ErrArgs.WrapMsg("userIDList is empty")
}
@@ -377,7 +391,7 @@ func (s *friendServer) GetSpecifiedFriendsInfo(ctx context.Context, req *pbfrien
if err != nil {
return nil, err
}
- friends, err := s.friendDatabase.FindFriendsWithError(ctx, req.OwnerUserID, req.UserIDList)
+ friends, err := s.db.FindFriendsWithError(ctx, req.OwnerUserID, req.UserIDList)
if err != nil {
return nil, err
}
@@ -391,8 +405,8 @@ func (s *friendServer) GetSpecifiedFriendsInfo(ctx context.Context, req *pbfrien
blackMap := datautil.SliceToMap(blacks, func(e *model.Black) string {
return e.BlockUserID
})
- resp := &pbfriend.GetSpecifiedFriendsInfoResp{
- Infos: make([]*pbfriend.GetSpecifiedFriendsInfoInfo, 0, len(req.UserIDList)),
+ resp := &relation.GetSpecifiedFriendsInfoResp{
+ Infos: make([]*relation.GetSpecifiedFriendsInfoInfo, 0, len(req.UserIDList)),
}
for _, userID := range req.UserIDList {
user := userMap[userID]
@@ -401,7 +415,6 @@ func (s *friendServer) GetSpecifiedFriendsInfo(ctx context.Context, req *pbfrien
}
var friendInfo *sdkws.FriendInfo
if friend := friendMap[userID]; friend != nil {
-
friendInfo = &sdkws.FriendInfo{
OwnerUserID: friend.OwnerUserID,
Remark: friend.Remark,
@@ -422,7 +435,7 @@ func (s *friendServer) GetSpecifiedFriendsInfo(ctx context.Context, req *pbfrien
Ex: black.Ex,
}
}
- resp.Infos = append(resp.Infos, &pbfriend.GetSpecifiedFriendsInfoInfo{
+ resp.Infos = append(resp.Infos, &relation.GetSpecifiedFriendsInfoInfo{
UserInfo: user,
FriendInfo: friendInfo,
BlackInfo: blackInfo,
@@ -430,10 +443,11 @@ func (s *friendServer) GetSpecifiedFriendsInfo(ctx context.Context, req *pbfrien
}
return resp, nil
}
+
func (s *friendServer) UpdateFriends(
ctx context.Context,
- req *pbfriend.UpdateFriendsReq,
-) (*pbfriend.UpdateFriendsResp, error) {
+ req *relation.UpdateFriendsReq,
+) (*relation.UpdateFriendsResp, error) {
if len(req.FriendUserIDs) == 0 {
return nil, errs.ErrArgs.WrapMsg("friendIDList is empty")
}
@@ -441,7 +455,7 @@ func (s *friendServer) UpdateFriends(
return nil, errs.ErrArgs.WrapMsg("friendIDList repeated")
}
- _, err := s.friendDatabase.FindFriendsWithError(ctx, req.OwnerUserID, req.FriendUserIDs)
+ _, err := s.db.FindFriendsWithError(ctx, req.OwnerUserID, req.FriendUserIDs)
if err != nil {
return nil, err
}
@@ -457,12 +471,27 @@ func (s *friendServer) UpdateFriends(
if req.Ex != nil {
val["ex"] = req.Ex.Value
}
- if err = s.friendDatabase.UpdateFriends(ctx, req.OwnerUserID, req.FriendUserIDs, val); err != nil {
+ if err = s.db.UpdateFriends(ctx, req.OwnerUserID, req.FriendUserIDs, val); err != nil {
return nil, err
}
- resp := &pbfriend.UpdateFriendsResp{}
+ resp := &relation.UpdateFriendsResp{}
s.notificationSender.FriendsInfoUpdateNotification(ctx, req.OwnerUserID, req.FriendUserIDs)
return resp, nil
}
+
+func (s *friendServer) GetIncrementalFriendsApplyTo(ctx context.Context, req *relation.GetIncrementalFriendsApplyToReq) (*relation.GetIncrementalFriendsApplyToResp, error) {
+ // TODO implement me
+ return nil, nil
+}
+
+func (s *friendServer) GetIncrementalFriendsApplyFrom(ctx context.Context, req *relation.GetIncrementalFriendsApplyFromReq) (*relation.GetIncrementalFriendsApplyFromResp, error) {
+ // TODO implement me
+ return nil, nil
+}
+
+func (s *friendServer) GetIncrementalBlacks(ctx context.Context, req *relation.GetIncrementalBlacksReq) (*relation.GetIncrementalBlacksResp, error) {
+ // TODO implement me
+ return nil, nil
+}
diff --git a/internal/rpc/friend/notification.go b/internal/rpc/friend/notification.go
index 8089a9bdc..5fb34577f 100644
--- a/internal/rpc/friend/notification.go
+++ b/internal/rpc/friend/notification.go
@@ -16,6 +16,9 @@ package friend
import (
"context"
+ "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
+ "github.com/openimsdk/open-im-server/v3/pkg/common/storage/versionctx"
+
relationtb "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
@@ -24,7 +27,7 @@ import (
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient/notification"
"github.com/openimsdk/protocol/constant"
- pbfriend "github.com/openimsdk/protocol/friend"
+ "github.com/openimsdk/protocol/relation"
"github.com/openimsdk/protocol/sdkws"
"github.com/openimsdk/tools/mcontext"
)
@@ -127,7 +130,7 @@ func (f *FriendNotificationSender) UserInfoUpdatedNotification(ctx context.Conte
f.Notification(ctx, mcontext.GetOpUserID(ctx), changedUserID, constant.UserInfoUpdatedNotification, &tips)
}
-func (f *FriendNotificationSender) FriendApplicationAddNotification(ctx context.Context, req *pbfriend.ApplyToAddFriendReq) {
+func (f *FriendNotificationSender) FriendApplicationAddNotification(ctx context.Context, req *relation.ApplyToAddFriendReq) {
tips := sdkws.FriendApplicationTips{FromToUserID: &sdkws.FromToUserID{
FromUserID: req.FromUserID,
ToUserID: req.ToUserID,
@@ -137,7 +140,7 @@ func (f *FriendNotificationSender) FriendApplicationAddNotification(ctx context.
func (f *FriendNotificationSender) FriendApplicationAgreedNotification(
ctx context.Context,
- req *pbfriend.RespondFriendApplyReq,
+ req *relation.RespondFriendApplyReq,
) {
tips := sdkws.FriendApplicationApprovedTips{FromToUserID: &sdkws.FromToUserID{
FromUserID: req.FromUserID,
@@ -148,7 +151,7 @@ func (f *FriendNotificationSender) FriendApplicationAgreedNotification(
func (f *FriendNotificationSender) FriendApplicationRefusedNotification(
ctx context.Context,
- req *pbfriend.RespondFriendApplyReq,
+ req *relation.RespondFriendApplyReq,
) {
tips := sdkws.FriendApplicationApprovedTips{FromToUserID: &sdkws.FromToUserID{
FromUserID: req.FromUserID,
@@ -182,7 +185,7 @@ func (f *FriendNotificationSender) FriendAddedNotification(
return nil
}
-func (f *FriendNotificationSender) FriendDeletedNotification(ctx context.Context, req *pbfriend.DeleteFriendReq) {
+func (f *FriendNotificationSender) FriendDeletedNotification(ctx context.Context, req *relation.DeleteFriendReq) {
tips := sdkws.FriendDeletedTips{FromToUserID: &sdkws.FromToUserID{
FromUserID: req.OwnerUserID,
ToUserID: req.FriendUserID,
@@ -190,10 +193,37 @@ func (f *FriendNotificationSender) FriendDeletedNotification(ctx context.Context
f.Notification(ctx, req.OwnerUserID, req.FriendUserID, constant.FriendDeletedNotification, &tips)
}
+func (f *FriendNotificationSender) setVersion(ctx context.Context, version *uint64, versionID *string, collName string, id string) {
+ versions := versionctx.GetVersionLog(ctx).Get()
+ for _, coll := range versions {
+ if coll.Name == collName && coll.Doc.DID == id {
+ *version = uint64(coll.Doc.Version)
+ *versionID = coll.Doc.ID.Hex()
+ return
+ }
+ }
+}
+
+func (f *FriendNotificationSender) setSortVersion(ctx context.Context, version *uint64, versionID *string, collName string, id string, sortVersion *uint64) {
+ versions := versionctx.GetVersionLog(ctx).Get()
+ for _, coll := range versions {
+ if coll.Name == collName && coll.Doc.DID == id {
+ *version = uint64(coll.Doc.Version)
+ *versionID = coll.Doc.ID.Hex()
+ for _, elem := range coll.Doc.Logs {
+ if elem.EID == relationtb.VersionSortChangeID {
+ *sortVersion = uint64(elem.Version)
+ }
+ }
+ }
+ }
+}
+
func (f *FriendNotificationSender) FriendRemarkSetNotification(ctx context.Context, fromUserID, toUserID string) {
tips := sdkws.FriendInfoChangedTips{FromToUserID: &sdkws.FromToUserID{}}
tips.FromToUserID.FromUserID = fromUserID
tips.FromToUserID.ToUserID = toUserID
+ f.setSortVersion(ctx, &tips.FriendVersion, &tips.FriendVersionID, database.FriendVersionName, toUserID, &tips.FriendSortVersion)
f.Notification(ctx, fromUserID, toUserID, constant.FriendRemarkSetNotification, &tips)
}
@@ -204,14 +234,14 @@ func (f *FriendNotificationSender) FriendsInfoUpdateNotification(ctx context.Con
f.Notification(ctx, toUserID, toUserID, constant.FriendsInfoUpdateNotification, &tips)
}
-func (f *FriendNotificationSender) BlackAddedNotification(ctx context.Context, req *pbfriend.AddBlackReq) {
+func (f *FriendNotificationSender) BlackAddedNotification(ctx context.Context, req *relation.AddBlackReq) {
tips := sdkws.BlackAddedTips{FromToUserID: &sdkws.FromToUserID{}}
tips.FromToUserID.FromUserID = req.OwnerUserID
tips.FromToUserID.ToUserID = req.BlackUserID
f.Notification(ctx, req.OwnerUserID, req.BlackUserID, constant.BlackAddedNotification, &tips)
}
-func (f *FriendNotificationSender) BlackDeletedNotification(ctx context.Context, req *pbfriend.RemoveBlackReq) {
+func (f *FriendNotificationSender) BlackDeletedNotification(ctx context.Context, req *relation.RemoveBlackReq) {
blackDeletedTips := sdkws.BlackDeletedTips{FromToUserID: &sdkws.FromToUserID{
FromUserID: req.OwnerUserID,
ToUserID: req.BlackUserID,
diff --git a/internal/rpc/friend/sync.go b/internal/rpc/friend/sync.go
new file mode 100644
index 000000000..902cc7303
--- /dev/null
+++ b/internal/rpc/friend/sync.go
@@ -0,0 +1,104 @@
+package friend
+
+import (
+ "context"
+ "github.com/openimsdk/open-im-server/v3/pkg/util/hashutil"
+ "github.com/openimsdk/protocol/sdkws"
+ "github.com/openimsdk/tools/log"
+ "slices"
+
+ "github.com/openimsdk/open-im-server/v3/internal/rpc/incrversion"
+ "github.com/openimsdk/open-im-server/v3/pkg/authverify"
+ "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
+ "github.com/openimsdk/protocol/relation"
+)
+
+func (s *friendServer) NotificationUserInfoUpdate(ctx context.Context, req *relation.NotificationUserInfoUpdateReq) (*relation.NotificationUserInfoUpdateResp, error) {
+ userIDs, err := s.db.FindFriendUserIDs(ctx, req.UserID)
+ if err != nil {
+ return nil, err
+ }
+ if len(userIDs) > 0 {
+ friendUserIDs := []string{req.UserID}
+ noCancelCtx := context.WithoutCancel(ctx)
+ err := s.queue.PushCtx(ctx, func() {
+ for _, userID := range userIDs {
+ if err := s.db.OwnerIncrVersion(noCancelCtx, userID, friendUserIDs, model.VersionStateUpdate); err != nil {
+ log.ZError(ctx, "OwnerIncrVersion", err, "userID", userID, "friendUserIDs", friendUserIDs)
+ }
+ }
+ for _, userID := range userIDs {
+ s.notificationSender.FriendInfoUpdatedNotification(noCancelCtx, req.UserID, userID)
+ }
+ })
+ if err != nil {
+ log.ZError(ctx, "NotificationUserInfoUpdate timeout", err, "userID", req.UserID)
+ }
+ }
+ return &relation.NotificationUserInfoUpdateResp{}, nil
+}
+
+func (s *friendServer) GetFullFriendUserIDs(ctx context.Context, req *relation.GetFullFriendUserIDsReq) (*relation.GetFullFriendUserIDsResp, error) {
+ vl, err := s.db.FindMaxFriendVersionCache(ctx, req.UserID)
+ if err != nil {
+ return nil, err
+ }
+ userIDs, err := s.db.FindFriendUserIDs(ctx, req.UserID)
+ if err != nil {
+ return nil, err
+ }
+ idHash := hashutil.IdHash(userIDs)
+ if req.IdHash == idHash {
+ userIDs = nil
+ }
+ return &relation.GetFullFriendUserIDsResp{
+ Version: idHash,
+ VersionID: vl.ID.Hex(),
+ Equal: req.IdHash == idHash,
+ UserIDs: userIDs,
+ }, nil
+}
+
+func (s *friendServer) GetIncrementalFriends(ctx context.Context, req *relation.GetIncrementalFriendsReq) (*relation.GetIncrementalFriendsResp, error) {
+ if err := authverify.CheckAccessV3(ctx, req.UserID, s.config.Share.IMAdminUserID); err != nil {
+ return nil, err
+ }
+ var sortVersion uint64
+ opt := incrversion.Option[*sdkws.FriendInfo, relation.GetIncrementalFriendsResp]{
+ Ctx: ctx,
+ VersionKey: req.UserID,
+ VersionID: req.VersionID,
+ VersionNumber: req.Version,
+ Version: func(ctx context.Context, ownerUserID string, version uint, limit int) (*model.VersionLog, error) {
+ vl, err := s.db.FindFriendIncrVersion(ctx, ownerUserID, version, limit)
+ if err != nil {
+ return nil, err
+ }
+ vl.Logs = slices.DeleteFunc(vl.Logs, func(elem model.VersionLogElem) bool {
+ if elem.EID == model.VersionSortChangeID {
+ vl.LogLen--
+ sortVersion = uint64(elem.Version)
+ return true
+ }
+ return false
+ })
+ return vl, nil
+ },
+ CacheMaxVersion: s.db.FindMaxFriendVersionCache,
+ Find: func(ctx context.Context, ids []string) ([]*sdkws.FriendInfo, error) {
+ return s.getFriend(ctx, req.UserID, ids)
+ },
+ Resp: func(version *model.VersionLog, deleteIds []string, insertList, updateList []*sdkws.FriendInfo, full bool) *relation.GetIncrementalFriendsResp {
+ return &relation.GetIncrementalFriendsResp{
+ VersionID: version.ID.Hex(),
+ Version: uint64(version.Version),
+ Full: full,
+ Delete: deleteIds,
+ Insert: insertList,
+ Update: updateList,
+ SortVersion: sortVersion,
+ }
+ },
+ }
+ return opt.Build()
+}
diff --git a/internal/rpc/group/convert.go b/internal/rpc/group/convert.go
index a75693904..8026430c3 100644
--- a/internal/rpc/group/convert.go
+++ b/internal/rpc/group/convert.go
@@ -57,3 +57,7 @@ func (s *groupServer) groupMemberDB2PB(member *model.GroupMember, appMangerLevel
InviterUserID: member.InviterUserID,
}
}
+
+func (s *groupServer) groupMemberDB2PB2(member *model.GroupMember) *sdkws.GroupMemberFullInfo {
+ return s.groupMemberDB2PB(member, 0)
+}
diff --git a/internal/rpc/group/group.go b/internal/rpc/group/group.go
index a9cea4ff2..e3d1d4dfe 100644
--- a/internal/rpc/group/group.go
+++ b/internal/rpc/group/group.go
@@ -17,17 +17,18 @@ package group
import (
"context"
"fmt"
+ "math/big"
+ "math/rand"
+ "strconv"
+ "strings"
+ "time"
+
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/common"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
"github.com/openimsdk/open-im-server/v3/pkg/common/webhook"
"github.com/openimsdk/open-im-server/v3/pkg/localcache"
- "math/big"
- "math/rand"
- "strconv"
- "strings"
- "time"
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
"github.com/openimsdk/open-im-server/v3/pkg/callbackstruct"
@@ -132,13 +133,17 @@ func (s *groupServer) NotificationUserInfoUpdate(ctx context.Context, req *pbgro
}
groupIDs = append(groupIDs, member.GroupID)
}
+ for _, groupID := range groupIDs {
+ if err := s.db.MemberGroupIncrVersion(ctx, groupID, []string{req.UserID}, model.VersionStateUpdate); err != nil {
+ return nil, err
+ }
+ }
for _, groupID := range groupIDs {
s.notification.GroupMemberInfoSetNotification(ctx, groupID, req.UserID)
}
if err = s.db.DeleteGroupMemberHash(ctx, groupIDs); err != nil {
return nil, err
}
-
return &pbgroup.NotificationUserInfoUpdateResp{}, nil
}
@@ -527,6 +532,14 @@ func (s *groupServer) KickGroupMember(ctx context.Context, req *pbgroup.KickGrou
if datautil.Contain(opUserID, req.KickedUserIDs...) {
return nil, errs.ErrArgs.WrapMsg("opUserID in KickedUserIDs")
}
+ owner, err := s.db.TakeGroupOwner(ctx, req.GroupID)
+ if err != nil {
+ return nil, err
+ }
+ if datautil.Contain(owner.UserID, req.KickedUserIDs...) {
+ return nil, errs.ErrArgs.WrapMsg("ownerUID can not Kick")
+ }
+
members, err := s.db.FindGroupMembers(ctx, req.GroupID, append(req.KickedUserIDs, opUserID))
if err != nil {
return nil, err
@@ -586,7 +599,7 @@ func (s *groupServer) KickGroupMember(ctx context.Context, req *pbgroup.KickGrou
FaceURL: group.FaceURL,
OwnerUserID: ownerUserID,
CreateTime: group.CreateTime.UnixMilli(),
- MemberCount: num,
+ MemberCount: num - uint32(len(req.KickedUserIDs)),
Ex: group.Ex,
Status: group.Status,
CreatorUserID: group.CreatorUserID,
@@ -621,18 +634,29 @@ func (s *groupServer) GetGroupMembersInfo(ctx context.Context, req *pbgroup.GetG
if req.GroupID == "" {
return nil, errs.ErrArgs.WrapMsg("groupID empty")
}
- members, err := s.db.FindGroupMembers(ctx, req.GroupID, req.UserIDs)
+ members, err := s.getGroupMembersInfo(ctx, req.GroupID, req.UserIDs)
+ if err != nil {
+ return nil, err
+ }
+ return &pbgroup.GetGroupMembersInfoResp{
+ Members: members,
+ }, nil
+}
+
+func (s *groupServer) getGroupMembersInfo(ctx context.Context, groupID string, userIDs []string) ([]*sdkws.GroupMemberFullInfo, error) {
+ if len(userIDs) == 0 {
+ return nil, nil
+ }
+ members, err := s.db.FindGroupMembers(ctx, groupID, userIDs)
if err != nil {
return nil, err
}
if err := s.PopulateGroupMember(ctx, members...); err != nil {
return nil, err
}
- return &pbgroup.GetGroupMembersInfoResp{
- Members: datautil.Slice(members, func(e *model.GroupMember) *sdkws.GroupMemberFullInfo {
- return convert.Db2PbGroupMember(e)
- }),
- }, nil
+ return datautil.Slice(members, func(e *model.GroupMember) *sdkws.GroupMemberFullInfo {
+ return convert.Db2PbGroupMember(e)
+ }), nil
}
// GetGroupApplicationList handles functions that get a list of group requests.
@@ -701,15 +725,28 @@ func (s *groupServer) GetGroupsInfo(ctx context.Context, req *pbgroup.GetGroupsI
if len(req.GroupIDs) == 0 {
return nil, errs.ErrArgs.WrapMsg("groupID is empty")
}
- groups, err := s.db.FindGroup(ctx, req.GroupIDs)
+ groups, err := s.getGroupsInfo(ctx, req.GroupIDs)
if err != nil {
return nil, err
}
- groupMemberNumMap, err := s.db.MapGroupMemberNum(ctx, req.GroupIDs)
+ return &pbgroup.GetGroupsInfoResp{
+ GroupInfos: groups,
+ }, nil
+}
+
+func (s *groupServer) getGroupsInfo(ctx context.Context, groupIDs []string) ([]*sdkws.GroupInfo, error) {
+ if len(groupIDs) == 0 {
+ return nil, nil
+ }
+ groups, err := s.db.FindGroup(ctx, groupIDs)
if err != nil {
return nil, err
}
- owners, err := s.db.FindGroupsOwner(ctx, req.GroupIDs)
+ groupMemberNumMap, err := s.db.MapGroupMemberNum(ctx, groupIDs)
+ if err != nil {
+ return nil, err
+ }
+ owners, err := s.db.FindGroupsOwner(ctx, groupIDs)
if err != nil {
return nil, err
}
@@ -719,15 +756,13 @@ func (s *groupServer) GetGroupsInfo(ctx context.Context, req *pbgroup.GetGroupsI
ownerMap := datautil.SliceToMap(owners, func(e *model.GroupMember) string {
return e.GroupID
})
- return &pbgroup.GetGroupsInfoResp{
- GroupInfos: datautil.Slice(groups, func(e *model.Group) *sdkws.GroupInfo {
- var ownerUserID string
- if owner, ok := ownerMap[e.GroupID]; ok {
- ownerUserID = owner.UserID
- }
- return convert.Db2PbGroupInfo(e, ownerUserID, groupMemberNumMap[e.GroupID])
- }),
- }, nil
+ return datautil.Slice(groups, func(e *model.Group) *sdkws.GroupInfo {
+ var ownerUserID string
+ if owner, ok := ownerMap[e.GroupID]; ok {
+ ownerUserID = owner.UserID
+ }
+ return convert.Db2PbGroupInfo(e, ownerUserID, groupMemberNumMap[e.GroupID])
+ }), nil
}
func (s *groupServer) GroupApplicationResponse(ctx context.Context, req *pbgroup.GroupApplicationResponseReq) (*pbgroup.GroupApplicationResponseResp, error) {
diff --git a/internal/rpc/group/notification.go b/internal/rpc/group/notification.go
index cfa62c85d..9815167e9 100644
--- a/internal/rpc/group/notification.go
+++ b/internal/rpc/group/notification.go
@@ -17,7 +17,10 @@ package group
import (
"context"
"fmt"
+ "github.com/openimsdk/open-im-server/v3/pkg/common/convert"
+ "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
+ "github.com/openimsdk/open-im-server/v3/pkg/common/storage/versionctx"
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient/notification"
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
@@ -34,6 +37,12 @@ import (
"github.com/openimsdk/tools/utils/stringutil"
)
+// GroupApplicationReceiver
+const (
+ applicantReceiver = iota
+ adminReceiver
+)
+
func NewGroupNotificationSender(db controller.GroupDatabase, msgRpcClient *rpcclient.MessageRpcClient, userRpcClient *rpcclient.UserRpcClient, config *Config, fn func(ctx context.Context, userIDs []string) ([]notification.CommonUser, error)) *GroupNotificationSender {
return &GroupNotificationSender{
NotificationSender: rpcclient.NewNotificationSender(&config.NotificationConfig, rpcclient.WithRpcClient(msgRpcClient), rpcclient.WithUserRpcClient(userRpcClient)),
@@ -118,25 +127,8 @@ func (g *GroupNotificationSender) getGroupInfo(ctx context.Context, groupID stri
if len(ownerUserIDs) > 0 {
ownerUserID = ownerUserIDs[0]
}
- return &sdkws.GroupInfo{
- GroupID: gm.GroupID,
- GroupName: gm.GroupName,
- Notification: gm.Notification,
- Introduction: gm.Introduction,
- FaceURL: gm.FaceURL,
- OwnerUserID: ownerUserID,
- CreateTime: gm.CreateTime.UnixMilli(),
- MemberCount: num,
- Ex: gm.Ex,
- Status: gm.Status,
- CreatorUserID: gm.CreatorUserID,
- GroupType: gm.GroupType,
- NeedVerification: gm.NeedVerification,
- LookMemberInfo: gm.LookMemberInfo,
- ApplyMemberFriend: gm.ApplyMemberFriend,
- NotificationUpdateTime: gm.NotificationUpdateTime.UnixMilli(),
- NotificationUserID: gm.NotificationUserID,
- }, nil
+
+ return convert.Db2PbGroupInfo(gm, ownerUserID, num), nil
}
func (g *GroupNotificationSender) getGroupMembers(ctx context.Context, groupID string, userIDs []string) ([]*sdkws.GroupMemberFullInfo, error) {
@@ -190,29 +182,6 @@ func (g *GroupNotificationSender) getGroupOwnerAndAdminUserID(ctx context.Contex
return datautil.Slice(members, fn), nil
}
-//nolint:unused
-func (g *GroupNotificationSender) groupDB2PB(group *model.Group, ownerUserID string, memberCount uint32) *sdkws.GroupInfo {
- return &sdkws.GroupInfo{
- GroupID: group.GroupID,
- GroupName: group.GroupName,
- Notification: group.Notification,
- Introduction: group.Introduction,
- FaceURL: group.FaceURL,
- OwnerUserID: ownerUserID,
- CreateTime: group.CreateTime.UnixMilli(),
- MemberCount: memberCount,
- Ex: group.Ex,
- Status: group.Status,
- CreatorUserID: group.CreatorUserID,
- GroupType: group.GroupType,
- NeedVerification: group.NeedVerification,
- LookMemberInfo: group.LookMemberInfo,
- ApplyMemberFriend: group.ApplyMemberFriend,
- NotificationUpdateTime: group.NotificationUpdateTime.UnixMilli(),
- NotificationUserID: group.NotificationUserID,
- }
-}
-
func (g *GroupNotificationSender) groupMemberDB2PB(member *model.GroupMember, appMangerLevel int32) *sdkws.GroupMemberFullInfo {
return &sdkws.GroupMemberFullInfo{
GroupID: member.GroupID,
@@ -287,6 +256,32 @@ func (g *GroupNotificationSender) fillOpUser(ctx context.Context, opUser **sdkws
return nil
}
+func (g *GroupNotificationSender) setVersion(ctx context.Context, version *uint64, versionID *string, collName string, id string) {
+ versions := versionctx.GetVersionLog(ctx).Get()
+ for _, coll := range versions {
+ if coll.Name == collName && coll.Doc.DID == id {
+ *version = uint64(coll.Doc.Version)
+ *versionID = coll.Doc.ID.Hex()
+ return
+ }
+ }
+}
+
+func (g *GroupNotificationSender) setSortVersion(ctx context.Context, version *uint64, versionID *string, collName string, id string, sortVersion *uint64) {
+ versions := versionctx.GetVersionLog(ctx).Get()
+ for _, coll := range versions {
+ if coll.Name == collName && coll.Doc.DID == id {
+ *version = uint64(coll.Doc.Version)
+ *versionID = coll.Doc.ID.Hex()
+ for _, elem := range coll.Doc.Logs {
+ if elem.EID == model.VersionSortChangeID {
+ *sortVersion = uint64(elem.Version)
+ }
+ }
+ }
+ }
+}
+
func (g *GroupNotificationSender) GroupCreatedNotification(ctx context.Context, tips *sdkws.GroupCreatedTips) {
var err error
defer func() {
@@ -297,6 +292,7 @@ func (g *GroupNotificationSender) GroupCreatedNotification(ctx context.Context,
if err = g.fillOpUser(ctx, &tips.OpUser, tips.Group.GroupID); err != nil {
return
}
+ g.setVersion(ctx, &tips.GroupMemberVersion, &tips.GroupMemberVersionID, database.GroupMemberVersionName, tips.Group.GroupID)
g.Notification(ctx, mcontext.GetOpUserID(ctx), tips.Group.GroupID, constant.GroupCreatedNotification, tips)
}
@@ -310,6 +306,7 @@ func (g *GroupNotificationSender) GroupInfoSetNotification(ctx context.Context,
if err = g.fillOpUser(ctx, &tips.OpUser, tips.Group.GroupID); err != nil {
return
}
+ g.setVersion(ctx, &tips.GroupMemberVersion, &tips.GroupMemberVersionID, database.GroupMemberVersionName, tips.Group.GroupID)
g.Notification(ctx, mcontext.GetOpUserID(ctx), tips.Group.GroupID, constant.GroupInfoSetNotification, tips, rpcclient.WithRpcGetUserName())
}
@@ -323,6 +320,7 @@ func (g *GroupNotificationSender) GroupInfoSetNameNotification(ctx context.Conte
if err = g.fillOpUser(ctx, &tips.OpUser, tips.Group.GroupID); err != nil {
return
}
+ g.setVersion(ctx, &tips.GroupMemberVersion, &tips.GroupMemberVersionID, database.GroupMemberVersionName, tips.Group.GroupID)
g.Notification(ctx, mcontext.GetOpUserID(ctx), tips.Group.GroupID, constant.GroupInfoSetNameNotification, tips)
}
@@ -336,6 +334,7 @@ func (g *GroupNotificationSender) GroupInfoSetAnnouncementNotification(ctx conte
if err = g.fillOpUser(ctx, &tips.OpUser, tips.Group.GroupID); err != nil {
return
}
+ g.setVersion(ctx, &tips.GroupMemberVersion, &tips.GroupMemberVersionID, database.GroupMemberVersionName, tips.Group.GroupID)
g.Notification(ctx, mcontext.GetOpUserID(ctx), tips.Group.GroupID, constant.GroupInfoSetAnnouncementNotification, tips, rpcclient.WithRpcGetUserName())
}
@@ -380,6 +379,7 @@ func (g *GroupNotificationSender) MemberQuitNotification(ctx context.Context, me
return
}
tips := &sdkws.MemberQuitTips{Group: group, QuitUser: member}
+ g.setVersion(ctx, &tips.GroupMemberVersion, &tips.GroupMemberVersionID, database.GroupMemberVersionName, member.GroupID)
g.Notification(ctx, mcontext.GetOpUserID(ctx), member.GroupID, constant.MemberQuitNotification, tips)
}
@@ -400,15 +400,17 @@ func (g *GroupNotificationSender) GroupApplicationAcceptedNotification(ctx conte
if err != nil {
return
}
- tips := &sdkws.GroupApplicationAcceptedTips{Group: group, HandleMsg: req.HandledMsg}
- if err = g.fillOpUser(ctx, &tips.OpUser, tips.Group.GroupID); err != nil {
+
+ var opUser *sdkws.GroupMemberFullInfo
+ if err = g.fillOpUser(ctx, &opUser, group.GroupID); err != nil {
return
}
for _, userID := range append(userIDs, req.FromUserID) {
+ tips := &sdkws.GroupApplicationAcceptedTips{Group: group, OpUser: opUser, HandleMsg: req.HandledMsg}
if userID == req.FromUserID {
- tips.ReceiverAs = 0
+ tips.ReceiverAs = applicantReceiver
} else {
- tips.ReceiverAs = 1
+ tips.ReceiverAs = adminReceiver
}
g.Notification(ctx, mcontext.GetOpUserID(ctx), userID, constant.GroupApplicationAcceptedNotification, tips)
}
@@ -431,15 +433,17 @@ func (g *GroupNotificationSender) GroupApplicationRejectedNotification(ctx conte
if err != nil {
return
}
- tips := &sdkws.GroupApplicationRejectedTips{Group: group, HandleMsg: req.HandledMsg}
- if err = g.fillOpUser(ctx, &tips.OpUser, tips.Group.GroupID); err != nil {
+
+ var opUser *sdkws.GroupMemberFullInfo
+ if err = g.fillOpUser(ctx, &opUser, group.GroupID); err != nil {
return
}
for _, userID := range append(userIDs, req.FromUserID) {
+ tips := &sdkws.GroupApplicationAcceptedTips{Group: group, OpUser: opUser, HandleMsg: req.HandledMsg}
if userID == req.FromUserID {
- tips.ReceiverAs = 0
+ tips.ReceiverAs = applicantReceiver
} else {
- tips.ReceiverAs = 1
+ tips.ReceiverAs = adminReceiver
}
g.Notification(ctx, mcontext.GetOpUserID(ctx), userID, constant.GroupApplicationRejectedNotification, tips)
}
@@ -459,14 +463,20 @@ func (g *GroupNotificationSender) GroupOwnerTransferredNotification(ctx context.
}
opUserID := mcontext.GetOpUserID(ctx)
var member map[string]*sdkws.GroupMemberFullInfo
- member, err = g.getGroupMemberMap(ctx, req.GroupID, []string{opUserID, req.NewOwnerUserID})
+ member, err = g.getGroupMemberMap(ctx, req.GroupID, []string{opUserID, req.NewOwnerUserID, req.OldOwnerUserID})
if err != nil {
return
}
- tips := &sdkws.GroupOwnerTransferredTips{Group: group, OpUser: member[opUserID], NewGroupOwner: member[req.NewOwnerUserID]}
+ tips := &sdkws.GroupOwnerTransferredTips{
+ Group: group,
+ OpUser: member[opUserID],
+ NewGroupOwner: member[req.NewOwnerUserID],
+ OldGroupOwnerInfo: member[req.OldOwnerUserID],
+ }
if err = g.fillOpUser(ctx, &tips.OpUser, tips.Group.GroupID); err != nil {
return
}
+ g.setVersion(ctx, &tips.GroupMemberVersion, &tips.GroupMemberVersionID, database.GroupMemberVersionName, req.GroupID)
g.Notification(ctx, mcontext.GetOpUserID(ctx), group.GroupID, constant.GroupOwnerTransferredNotification, tips)
}
@@ -480,6 +490,7 @@ func (g *GroupNotificationSender) MemberKickedNotification(ctx context.Context,
if err = g.fillOpUser(ctx, &tips.OpUser, tips.Group.GroupID); err != nil {
return
}
+ g.setVersion(ctx, &tips.GroupMemberVersion, &tips.GroupMemberVersionID, database.GroupMemberVersionName, tips.Group.GroupID)
g.Notification(ctx, mcontext.GetOpUserID(ctx), tips.Group.GroupID, constant.MemberKickedNotification, tips)
}
@@ -503,6 +514,7 @@ func (g *GroupNotificationSender) MemberInvitedNotification(ctx context.Context,
}
tips := &sdkws.MemberInvitedTips{Group: group, InvitedUserList: users}
err = g.fillOpUser(ctx, &tips.OpUser, tips.Group.GroupID)
+ g.setVersion(ctx, &tips.GroupMemberVersion, &tips.GroupMemberVersionID, database.GroupMemberVersionName, tips.Group.GroupID)
g.Notification(ctx, mcontext.GetOpUserID(ctx), group.GroupID, constant.MemberInvitedNotification, tips)
}
@@ -524,6 +536,7 @@ func (g *GroupNotificationSender) MemberEnterNotification(ctx context.Context, g
return
}
tips := &sdkws.MemberEnterTips{Group: group, EntrantUser: user}
+ g.setVersion(ctx, &tips.GroupMemberVersion, &tips.GroupMemberVersionID, database.GroupMemberVersionName, tips.Group.GroupID)
g.Notification(ctx, mcontext.GetOpUserID(ctx), group.GroupID, constant.MemberEnterNotification, tips)
}
@@ -564,6 +577,7 @@ func (g *GroupNotificationSender) GroupMemberMutedNotification(ctx context.Conte
if err = g.fillOpUser(ctx, &tips.OpUser, tips.Group.GroupID); err != nil {
return
}
+ g.setVersion(ctx, &tips.GroupMemberVersion, &tips.GroupMemberVersionID, database.GroupMemberVersionName, tips.Group.GroupID)
g.Notification(ctx, mcontext.GetOpUserID(ctx), group.GroupID, constant.GroupMemberMutedNotification, tips)
}
@@ -588,6 +602,7 @@ func (g *GroupNotificationSender) GroupMemberCancelMutedNotification(ctx context
if err = g.fillOpUser(ctx, &tips.OpUser, tips.Group.GroupID); err != nil {
return
}
+ g.setVersion(ctx, &tips.GroupMemberVersion, &tips.GroupMemberVersionID, database.GroupMemberVersionName, tips.Group.GroupID)
g.Notification(ctx, mcontext.GetOpUserID(ctx), group.GroupID, constant.GroupMemberCancelMutedNotification, tips)
}
@@ -615,6 +630,7 @@ func (g *GroupNotificationSender) GroupMutedNotification(ctx context.Context, gr
if err = g.fillOpUser(ctx, &tips.OpUser, tips.Group.GroupID); err != nil {
return
}
+ g.setVersion(ctx, &tips.GroupMemberVersion, &tips.GroupMemberVersionID, database.GroupMemberVersionName, groupID)
g.Notification(ctx, mcontext.GetOpUserID(ctx), group.GroupID, constant.GroupMutedNotification, tips)
}
@@ -642,6 +658,7 @@ func (g *GroupNotificationSender) GroupCancelMutedNotification(ctx context.Conte
if err = g.fillOpUser(ctx, &tips.OpUser, tips.Group.GroupID); err != nil {
return
}
+ g.setVersion(ctx, &tips.GroupMemberVersion, &tips.GroupMemberVersionID, database.GroupMemberVersionName, groupID)
g.Notification(ctx, mcontext.GetOpUserID(ctx), group.GroupID, constant.GroupCancelMutedNotification, tips)
}
@@ -666,6 +683,7 @@ func (g *GroupNotificationSender) GroupMemberInfoSetNotification(ctx context.Con
if err = g.fillOpUser(ctx, &tips.OpUser, tips.Group.GroupID); err != nil {
return
}
+ g.setSortVersion(ctx, &tips.GroupMemberVersion, &tips.GroupMemberVersionID, database.GroupMemberVersionName, tips.Group.GroupID, &tips.GroupSortVersion)
g.Notification(ctx, mcontext.GetOpUserID(ctx), group.GroupID, constant.GroupMemberInfoSetNotification, tips)
}
@@ -689,6 +707,7 @@ func (g *GroupNotificationSender) GroupMemberSetToAdminNotification(ctx context.
if err = g.fillOpUser(ctx, &tips.OpUser, tips.Group.GroupID); err != nil {
return
}
+ g.setVersion(ctx, &tips.GroupMemberVersion, &tips.GroupMemberVersionID, database.GroupMemberVersionName, tips.Group.GroupID)
g.Notification(ctx, mcontext.GetOpUserID(ctx), group.GroupID, constant.GroupMemberSetToAdminNotification, tips)
}
@@ -713,5 +732,6 @@ func (g *GroupNotificationSender) GroupMemberSetToOrdinaryUserNotification(ctx c
if err = g.fillOpUser(ctx, &tips.OpUser, tips.Group.GroupID); err != nil {
return
}
+ g.setVersion(ctx, &tips.GroupMemberVersion, &tips.GroupMemberVersionID, database.GroupMemberVersionName, tips.Group.GroupID)
g.Notification(ctx, mcontext.GetOpUserID(ctx), group.GroupID, constant.GroupMemberSetToOrdinaryUserNotification, tips)
}
diff --git a/internal/rpc/group/sync.go b/internal/rpc/group/sync.go
new file mode 100644
index 000000000..0592aa811
--- /dev/null
+++ b/internal/rpc/group/sync.go
@@ -0,0 +1,303 @@
+package group
+
+import (
+ "context"
+
+ "github.com/openimsdk/open-im-server/v3/internal/rpc/incrversion"
+ "github.com/openimsdk/open-im-server/v3/pkg/authverify"
+ "github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
+ "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
+ "github.com/openimsdk/open-im-server/v3/pkg/util/hashutil"
+ "github.com/openimsdk/protocol/constant"
+ pbgroup "github.com/openimsdk/protocol/group"
+ "github.com/openimsdk/protocol/sdkws"
+ "github.com/openimsdk/tools/errs"
+ "github.com/openimsdk/tools/log"
+)
+
+func (s *groupServer) GetFullGroupMemberUserIDs(ctx context.Context, req *pbgroup.GetFullGroupMemberUserIDsReq) (*pbgroup.GetFullGroupMemberUserIDsResp, error) {
+ vl, err := s.db.FindMaxGroupMemberVersionCache(ctx, req.GroupID)
+ if err != nil {
+ return nil, err
+ }
+ userIDs, err := s.db.FindGroupMemberUserID(ctx, req.GroupID)
+ if err != nil {
+ return nil, err
+ }
+ idHash := hashutil.IdHash(userIDs)
+ if req.IdHash == idHash {
+ userIDs = nil
+ }
+ return &pbgroup.GetFullGroupMemberUserIDsResp{
+ Version: idHash,
+ VersionID: vl.ID.Hex(),
+ Equal: req.IdHash == idHash,
+ UserIDs: userIDs,
+ }, nil
+}
+
+func (s *groupServer) GetFullJoinGroupIDs(ctx context.Context, req *pbgroup.GetFullJoinGroupIDsReq) (*pbgroup.GetFullJoinGroupIDsResp, error) {
+ vl, err := s.db.FindMaxJoinGroupVersionCache(ctx, req.UserID)
+ if err != nil {
+ return nil, err
+ }
+ groupIDs, err := s.db.FindJoinGroupID(ctx, req.UserID)
+ if err != nil {
+ return nil, err
+ }
+ idHash := hashutil.IdHash(groupIDs)
+ if req.IdHash == idHash {
+ groupIDs = nil
+ }
+ return &pbgroup.GetFullJoinGroupIDsResp{
+ Version: idHash,
+ VersionID: vl.ID.Hex(),
+ Equal: req.IdHash == idHash,
+ GroupIDs: groupIDs,
+ }, nil
+}
+
+func (s *groupServer) GetIncrementalGroupMember(ctx context.Context, req *pbgroup.GetIncrementalGroupMemberReq) (*pbgroup.GetIncrementalGroupMemberResp, error) {
+ group, err := s.db.TakeGroup(ctx, req.GroupID)
+ if err != nil {
+ return nil, err
+ }
+ if group.Status == constant.GroupStatusDismissed {
+ return nil, servererrs.ErrDismissedAlready.Wrap()
+ }
+ var (
+ hasGroupUpdate bool
+ sortVersion uint64
+ )
+ opt := incrversion.Option[*sdkws.GroupMemberFullInfo, pbgroup.GetIncrementalGroupMemberResp]{
+ Ctx: ctx,
+ VersionKey: req.GroupID,
+ VersionID: req.VersionID,
+ VersionNumber: req.Version,
+ Version: func(ctx context.Context, groupID string, version uint, limit int) (*model.VersionLog, error) {
+ vl, err := s.db.FindMemberIncrVersion(ctx, groupID, version, limit)
+ if err != nil {
+ return nil, err
+ }
+ logs := make([]model.VersionLogElem, 0, len(vl.Logs))
+ for i, log := range vl.Logs {
+ switch log.EID {
+ case model.VersionGroupChangeID:
+ vl.LogLen--
+ hasGroupUpdate = true
+ case model.VersionSortChangeID:
+ vl.LogLen--
+ sortVersion = uint64(log.Version)
+ default:
+ logs = append(logs, vl.Logs[i])
+ }
+ }
+ vl.Logs = logs
+ if vl.LogLen > 0 {
+ hasGroupUpdate = true
+ }
+ return vl, nil
+ },
+ CacheMaxVersion: s.db.FindMaxGroupMemberVersionCache,
+ Find: func(ctx context.Context, ids []string) ([]*sdkws.GroupMemberFullInfo, error) {
+ return s.getGroupMembersInfo(ctx, req.GroupID, ids)
+ },
+ Resp: func(version *model.VersionLog, delIDs []string, insertList, updateList []*sdkws.GroupMemberFullInfo, full bool) *pbgroup.GetIncrementalGroupMemberResp {
+ return &pbgroup.GetIncrementalGroupMemberResp{
+ VersionID: version.ID.Hex(),
+ Version: uint64(version.Version),
+ Full: full,
+ Delete: delIDs,
+ Insert: insertList,
+ Update: updateList,
+ SortVersion: sortVersion,
+ }
+ },
+ }
+ resp, err := opt.Build()
+ if err != nil {
+ return nil, err
+ }
+ if resp.Full || hasGroupUpdate {
+ count, err := s.db.FindGroupMemberNum(ctx, group.GroupID)
+ if err != nil {
+ return nil, err
+ }
+ owner, err := s.db.TakeGroupOwner(ctx, group.GroupID)
+ if err != nil {
+ return nil, err
+ }
+ resp.Group = s.groupDB2PB(group, owner.UserID, count)
+ }
+ return resp, nil
+}
+
+func (s *groupServer) BatchGetIncrementalGroupMember(ctx context.Context, req *pbgroup.BatchGetIncrementalGroupMemberReq) (resp *pbgroup.BatchGetIncrementalGroupMemberResp, err error) {
+ type VersionInfo struct {
+ GroupID string
+ VersionID string
+ VersionNumber uint64
+ }
+
+ var groupIDs []string
+
+ groupsVersionMap := make(map[string]*VersionInfo)
+ groupsMap := make(map[string]*model.Group)
+ hasGroupUpdateMap := make(map[string]bool)
+ sortVersionMap := make(map[string]uint64)
+
+ var targetKeys, versionIDs []string
+ var versionNumbers []uint64
+
+ var requestBodyLen int
+
+ for _, group := range req.ReqList {
+ groupsVersionMap[group.GroupID] = &VersionInfo{
+ GroupID: group.GroupID,
+ VersionID: group.VersionID,
+ VersionNumber: group.Version,
+ }
+
+ groupIDs = append(groupIDs, group.GroupID)
+ }
+
+ groups, err := s.db.FindGroup(ctx, groupIDs)
+ if err != nil {
+ return nil, errs.Wrap(err)
+ }
+
+ for _, group := range groups {
+ if group.Status == constant.GroupStatusDismissed {
+ err = servererrs.ErrDismissedAlready.Wrap()
+ log.ZError(ctx, "This group is Dismissed Already", err, "group is", group.GroupID)
+
+ delete(groupsVersionMap, group.GroupID)
+ } else {
+ groupsMap[group.GroupID] = group
+ }
+ }
+
+ for groupID, vInfo := range groupsVersionMap {
+ targetKeys = append(targetKeys, groupID)
+ versionIDs = append(versionIDs, vInfo.VersionID)
+ versionNumbers = append(versionNumbers, vInfo.VersionNumber)
+ }
+
+ opt := incrversion.BatchOption[[]*sdkws.GroupMemberFullInfo, pbgroup.BatchGetIncrementalGroupMemberResp]{
+ Ctx: ctx,
+ TargetKeys: targetKeys,
+ VersionIDs: versionIDs,
+ VersionNumbers: versionNumbers,
+ Versions: func(ctx context.Context, groupIDs []string, versions []uint64, limits []int) (map[string]*model.VersionLog, error) {
+ vLogs, err := s.db.BatchFindMemberIncrVersion(ctx, groupIDs, versions, limits)
+ if err != nil {
+ return nil, errs.Wrap(err)
+ }
+
+ for groupID, vlog := range vLogs {
+ vlogElems := make([]model.VersionLogElem, 0, len(vlog.Logs))
+ for i, log := range vlog.Logs {
+ switch log.EID {
+ case model.VersionGroupChangeID:
+ vlog.LogLen--
+ hasGroupUpdateMap[groupID] = true
+ case model.VersionSortChangeID:
+ vlog.LogLen--
+ sortVersionMap[groupID] = uint64(log.Version)
+ default:
+ vlogElems = append(vlogElems, vlog.Logs[i])
+ }
+ }
+ vlog.Logs = vlogElems
+ if vlog.LogLen > 0 {
+ hasGroupUpdateMap[groupID] = true
+ }
+ }
+
+ return vLogs, nil
+ },
+ CacheMaxVersions: s.db.BatchFindMaxGroupMemberVersionCache,
+ Find: func(ctx context.Context, groupID string, ids []string) ([]*sdkws.GroupMemberFullInfo, error) {
+ memberInfo, err := s.getGroupMembersInfo(ctx, groupID, ids)
+ if err != nil {
+ return nil, err
+ }
+
+ return memberInfo, err
+ },
+ Resp: func(versions map[string]*model.VersionLog, deleteIdsMap map[string][]string, insertListMap, updateListMap map[string][]*sdkws.GroupMemberFullInfo, fullMap map[string]bool) *pbgroup.BatchGetIncrementalGroupMemberResp {
+ resList := make(map[string]*pbgroup.GetIncrementalGroupMemberResp)
+
+ for groupID, versionLog := range versions {
+ resList[groupID] = &pbgroup.GetIncrementalGroupMemberResp{
+ VersionID: versionLog.ID.Hex(),
+ Version: uint64(versionLog.Version),
+ Full: fullMap[groupID],
+ Delete: deleteIdsMap[groupID],
+ Insert: insertListMap[groupID],
+ Update: updateListMap[groupID],
+ SortVersion: sortVersionMap[groupID],
+ }
+
+ requestBodyLen += len(insertListMap[groupID]) + len(updateListMap[groupID]) + len(deleteIdsMap[groupID])
+ if requestBodyLen > 200 {
+ break
+ }
+ }
+
+ return &pbgroup.BatchGetIncrementalGroupMemberResp{
+ RespList: resList,
+ }
+ },
+ }
+
+ resp, err = opt.Build()
+ if err != nil {
+ return nil, errs.Wrap(err)
+ }
+
+ for groupID, val := range resp.RespList {
+ if val.Full || hasGroupUpdateMap[groupID] {
+ count, err := s.db.FindGroupMemberNum(ctx, groupID)
+ if err != nil {
+ return nil, err
+ }
+
+ owner, err := s.db.TakeGroupOwner(ctx, groupID)
+ if err != nil {
+ return nil, err
+ }
+
+ resp.RespList[groupID].Group = s.groupDB2PB(groupsMap[groupID], owner.UserID, count)
+ }
+ }
+
+ return resp, nil
+
+}
+
+func (s *groupServer) GetIncrementalJoinGroup(ctx context.Context, req *pbgroup.GetIncrementalJoinGroupReq) (*pbgroup.GetIncrementalJoinGroupResp, error) {
+ if err := authverify.CheckAccessV3(ctx, req.UserID, s.config.Share.IMAdminUserID); err != nil {
+ return nil, err
+ }
+ opt := incrversion.Option[*sdkws.GroupInfo, pbgroup.GetIncrementalJoinGroupResp]{
+ Ctx: ctx,
+ VersionKey: req.UserID,
+ VersionID: req.VersionID,
+ VersionNumber: req.Version,
+ Version: s.db.FindJoinIncrVersion,
+ CacheMaxVersion: s.db.FindMaxJoinGroupVersionCache,
+ Find: s.getGroupsInfo,
+ Resp: func(version *model.VersionLog, delIDs []string, insertList, updateList []*sdkws.GroupInfo, full bool) *pbgroup.GetIncrementalJoinGroupResp {
+ return &pbgroup.GetIncrementalJoinGroupResp{
+ VersionID: version.ID.Hex(),
+ Version: uint64(version.Version),
+ Full: full,
+ Delete: delIDs,
+ Insert: insertList,
+ Update: updateList,
+ }
+ },
+ }
+ return opt.Build()
+}
diff --git a/internal/rpc/incrversion/batch_option.go b/internal/rpc/incrversion/batch_option.go
new file mode 100644
index 000000000..34d1b2506
--- /dev/null
+++ b/internal/rpc/incrversion/batch_option.go
@@ -0,0 +1,207 @@
+package incrversion
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
+ "github.com/openimsdk/tools/errs"
+ "go.mongodb.org/mongo-driver/bson/primitive"
+)
+
+type BatchOption[A, B any] struct {
+ Ctx context.Context
+ TargetKeys []string
+ VersionIDs []string
+ VersionNumbers []uint64
+ //SyncLimit int
+ Versions func(ctx context.Context, dIds []string, versions []uint64, limits []int) (map[string]*model.VersionLog, error)
+ CacheMaxVersions func(ctx context.Context, dIds []string) (map[string]*model.VersionLog, error)
+ Find func(ctx context.Context, dId string, ids []string) (A, error)
+ Resp func(versionsMap map[string]*model.VersionLog, deleteIdsMap map[string][]string, insertListMap, updateListMap map[string]A, fullMap map[string]bool) *B
+}
+
+func (o *BatchOption[A, B]) newError(msg string) error {
+ return errs.ErrInternalServer.WrapMsg(msg)
+}
+
+func (o *BatchOption[A, B]) check() error {
+ if o.Ctx == nil {
+ return o.newError("opt ctx is nil")
+ }
+ if len(o.TargetKeys) == 0 {
+ return o.newError("targetKeys is empty")
+ }
+ if o.Versions == nil {
+ return o.newError("func versions is nil")
+ }
+ if o.Find == nil {
+ return o.newError("func find is nil")
+ }
+ if o.Resp == nil {
+ return o.newError("func resp is nil")
+ }
+ return nil
+}
+
+func (o *BatchOption[A, B]) validVersions() []bool {
+ valids := make([]bool, len(o.VersionIDs))
+ for i, versionID := range o.VersionIDs {
+ objID, err := primitive.ObjectIDFromHex(versionID)
+ valids[i] = (err == nil && (!objID.IsZero()) && o.VersionNumbers[i] > 0)
+ }
+ return valids
+}
+
+func (o *BatchOption[A, B]) equalIDs(objIDs []primitive.ObjectID) []bool {
+ equals := make([]bool, len(o.VersionIDs))
+ for i, versionID := range o.VersionIDs {
+ equals[i] = versionID == objIDs[i].Hex()
+ }
+ return equals
+}
+
+func (o *BatchOption[A, B]) getVersions(tags *[]int) (versions map[string]*model.VersionLog, err error) {
+ var dIDs []string
+ var versionNums []uint64
+ var limits []int
+
+ valids := o.validVersions()
+
+ if o.CacheMaxVersions == nil {
+ for i, valid := range valids {
+ if valid {
+ (*tags)[i] = tagQuery
+ dIDs = append(dIDs, o.TargetKeys[i])
+ versionNums = append(versionNums, o.VersionNumbers[i])
+ limits = append(limits, syncLimit)
+ } else {
+ (*tags)[i] = tagFull
+ dIDs = append(dIDs, o.TargetKeys[i])
+ versionNums = append(versionNums, 0)
+ limits = append(limits, 0)
+ }
+ }
+
+ versions, err = o.Versions(o.Ctx, dIDs, versionNums, limits)
+ if err != nil {
+ return nil, errs.Wrap(err)
+ }
+ return versions, nil
+
+ } else {
+ caches, err := o.CacheMaxVersions(o.Ctx, o.TargetKeys)
+ if err != nil {
+ return nil, errs.Wrap(err)
+ }
+
+ objIDs := make([]primitive.ObjectID, len(o.VersionIDs))
+
+ for i, versionID := range o.VersionIDs {
+ objID, _ := primitive.ObjectIDFromHex(versionID)
+ objIDs[i] = objID
+ }
+
+ equals := o.equalIDs(objIDs)
+ for i, valid := range valids {
+ if !valid {
+ (*tags)[i] = tagFull
+ } else if !equals[i] {
+ (*tags)[i] = tagFull
+ } else if o.VersionNumbers[i] == uint64(caches[o.TargetKeys[i]].Version) {
+ (*tags)[i] = tagEqual
+ } else {
+ (*tags)[i] = tagQuery
+ dIDs = append(dIDs, o.TargetKeys[i])
+ versionNums = append(versionNums, o.VersionNumbers[i])
+ limits = append(limits, syncLimit)
+
+ delete(caches, o.TargetKeys[i])
+ }
+ }
+
+ if dIDs != nil {
+ versionMap, err := o.Versions(o.Ctx, dIDs, versionNums, limits)
+ if err != nil {
+ return nil, errs.Wrap(err)
+ }
+
+ for k, v := range versionMap {
+ caches[k] = v
+ }
+ }
+
+ versions = caches
+ }
+ return versions, nil
+}
+
+func (o *BatchOption[A, B]) Build() (*B, error) {
+ if err := o.check(); err != nil {
+ return nil, errs.Wrap(err)
+ }
+
+ tags := make([]int, len(o.TargetKeys))
+ versions, err := o.getVersions(&tags)
+ if err != nil {
+ return nil, errs.Wrap(err)
+ }
+
+ fullMap := make(map[string]bool)
+ for i, tag := range tags {
+ switch tag {
+ case tagQuery:
+ vLog := versions[o.TargetKeys[i]]
+ fullMap[o.TargetKeys[i]] = vLog.ID.Hex() != o.VersionIDs[i] || uint64(vLog.Version) < o.VersionNumbers[i] || len(vLog.Logs) != vLog.LogLen
+ case tagFull:
+ fullMap[o.TargetKeys[i]] = true
+ case tagEqual:
+ fullMap[o.TargetKeys[i]] = false
+ default:
+ panic(fmt.Errorf("undefined tag %d", tag))
+ }
+ }
+
+ var (
+ insertIdsMap = make(map[string][]string)
+ deleteIdsMap = make(map[string][]string)
+ updateIdsMap = make(map[string][]string)
+ )
+
+ for _, targetKey := range o.TargetKeys {
+ if !fullMap[targetKey] {
+ version := versions[targetKey]
+ insertIds, deleteIds, updateIds := version.DeleteAndChangeIDs()
+ insertIdsMap[targetKey] = insertIds
+ deleteIdsMap[targetKey] = deleteIds
+ updateIdsMap[targetKey] = updateIds
+ }
+ }
+
+ var (
+ insertListMap = make(map[string]A)
+ updateListMap = make(map[string]A)
+ )
+
+ for targetKey, insertIds := range insertIdsMap {
+ if len(insertIds) > 0 {
+ insertList, err := o.Find(o.Ctx, targetKey, insertIds)
+ if err != nil {
+ return nil, errs.Wrap(err)
+ }
+ insertListMap[targetKey] = insertList
+ }
+ }
+
+ for targetKey, updateIds := range updateIdsMap {
+ if len(updateIds) > 0 {
+ updateList, err := o.Find(o.Ctx, targetKey, updateIds)
+ if err != nil {
+ return nil, errs.Wrap(err)
+ }
+ updateListMap[targetKey] = updateList
+ }
+ }
+
+ return o.Resp(versions, deleteIdsMap, insertListMap, updateListMap, fullMap), nil
+}
diff --git a/internal/rpc/incrversion/option.go b/internal/rpc/incrversion/option.go
new file mode 100644
index 000000000..af1200d5c
--- /dev/null
+++ b/internal/rpc/incrversion/option.go
@@ -0,0 +1,153 @@
+package incrversion
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
+ "github.com/openimsdk/tools/errs"
+ "go.mongodb.org/mongo-driver/bson/primitive"
+)
+
+//func Limit(maxSync int, version uint64) int {
+// if version == 0 {
+// return 0
+// }
+// return maxSync
+//}
+
+const syncLimit = 200
+
+const (
+ tagQuery = iota + 1
+ tagFull
+ tagEqual
+)
+
+type Option[A, B any] struct {
+ Ctx context.Context
+ VersionKey string
+ VersionID string
+ VersionNumber uint64
+ //SyncLimit int
+ CacheMaxVersion func(ctx context.Context, dId string) (*model.VersionLog, error)
+ Version func(ctx context.Context, dId string, version uint, limit int) (*model.VersionLog, error)
+ //SortID func(ctx context.Context, dId string) ([]string, error)
+ Find func(ctx context.Context, ids []string) ([]A, error)
+ Resp func(version *model.VersionLog, deleteIds []string, insertList, updateList []A, full bool) *B
+}
+
+func (o *Option[A, B]) newError(msg string) error {
+ return errs.ErrInternalServer.WrapMsg(msg)
+}
+
+func (o *Option[A, B]) check() error {
+ if o.Ctx == nil {
+ return o.newError("opt ctx is nil")
+ }
+ if o.VersionKey == "" {
+ return o.newError("versionKey is empty")
+ }
+ //if o.SyncLimit <= 0 {
+ // return o.newError("invalid synchronization quantity")
+ //}
+ if o.Version == nil {
+ return o.newError("func version is nil")
+ }
+ //if o.SortID == nil {
+ // return o.newError("func allID is nil")
+ //}
+ if o.Find == nil {
+ return o.newError("func find is nil")
+ }
+ if o.Resp == nil {
+ return o.newError("func resp is nil")
+ }
+ return nil
+}
+
+func (o *Option[A, B]) validVersion() bool {
+ objID, err := primitive.ObjectIDFromHex(o.VersionID)
+ return err == nil && (!objID.IsZero()) && o.VersionNumber > 0
+}
+
+func (o *Option[A, B]) equalID(objID primitive.ObjectID) bool {
+ return o.VersionID == objID.Hex()
+}
+
+func (o *Option[A, B]) getVersion(tag *int) (*model.VersionLog, error) {
+ if o.CacheMaxVersion == nil {
+ if o.validVersion() {
+ *tag = tagQuery
+ return o.Version(o.Ctx, o.VersionKey, uint(o.VersionNumber), syncLimit)
+ }
+ *tag = tagFull
+ return o.Version(o.Ctx, o.VersionKey, 0, 0)
+ } else {
+ cache, err := o.CacheMaxVersion(o.Ctx, o.VersionKey)
+ if err != nil {
+ return nil, err
+ }
+ if !o.validVersion() {
+ *tag = tagFull
+ return cache, nil
+ }
+ if !o.equalID(cache.ID) {
+ *tag = tagFull
+ return cache, nil
+ }
+ if o.VersionNumber == uint64(cache.Version) {
+ *tag = tagEqual
+ return cache, nil
+ }
+ *tag = tagQuery
+ return o.Version(o.Ctx, o.VersionKey, uint(o.VersionNumber), syncLimit)
+ }
+}
+
+func (o *Option[A, B]) Build() (*B, error) {
+ if err := o.check(); err != nil {
+ return nil, err
+ }
+ var tag int
+ version, err := o.getVersion(&tag)
+ if err != nil {
+ return nil, err
+ }
+ var full bool
+ switch tag {
+ case tagQuery:
+ full = version.ID.Hex() != o.VersionID || uint64(version.Version) < o.VersionNumber || len(version.Logs) != version.LogLen
+ case tagFull:
+ full = true
+ case tagEqual:
+ full = false
+ default:
+ panic(fmt.Errorf("undefined tag %d", tag))
+ }
+ var (
+ insertIds []string
+ deleteIds []string
+ updateIds []string
+ )
+ if !full {
+ insertIds, deleteIds, updateIds = version.DeleteAndChangeIDs()
+ }
+ var (
+ insertList []A
+ updateList []A
+ )
+ if len(insertIds) > 0 {
+ insertList, err = o.Find(o.Ctx, insertIds)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if len(updateIds) > 0 {
+ updateList, err = o.Find(o.Ctx, updateIds)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return o.Resp(version, deleteIds, insertList, updateList, full), nil
+}
diff --git a/internal/rpc/msg/callback.go b/internal/rpc/msg/callback.go
index 10404675e..be58d7504 100644
--- a/internal/rpc/msg/callback.go
+++ b/internal/rpc/msg/callback.go
@@ -83,6 +83,11 @@ func (m *msgServer) webhookAfterSendSingleMsg(ctx context.Context, after *config
if msg.MsgData.ContentType == constant.Typing {
return
}
+ // According to the attentionIds configuration, only some users are sent
+ attentionIds := after.AttentionIds
+ if attentionIds != nil && !datautil.Contain(msg.MsgData.RecvID, attentionIds...) && !datautil.Contain(msg.MsgData.SendID, attentionIds...) {
+ return
+ }
cbReq := &cbapi.CallbackAfterSendSingleMsgReq{
CommonCallbackReq: toCommonCallback(ctx, msg, cbapi.CallbackAfterSendSingleMsgCommand),
RecvID: msg.MsgData.RecvID,
diff --git a/internal/rpc/msg/seq.go b/internal/rpc/msg/seq.go
index 27465c210..1ebec4a71 100644
--- a/internal/rpc/msg/seq.go
+++ b/internal/rpc/msg/seq.go
@@ -16,13 +16,15 @@ package msg
import (
"context"
+ "github.com/openimsdk/tools/errs"
+ "github.com/redis/go-redis/v9"
pbmsg "github.com/openimsdk/protocol/msg"
)
func (m *msgServer) GetConversationMaxSeq(ctx context.Context, req *pbmsg.GetConversationMaxSeqReq) (*pbmsg.GetConversationMaxSeqResp, error) {
maxSeq, err := m.MsgDatabase.GetMaxSeq(ctx, req.ConversationID)
- if err != nil {
+ if err != nil && errs.Unwrap(err) != redis.Nil {
return nil, err
}
return &pbmsg.GetConversationMaxSeqResp{MaxSeq: maxSeq}, nil
diff --git a/internal/rpc/msg/server.go b/internal/rpc/msg/server.go
index f1fb28fff..de0f698ea 100644
--- a/internal/rpc/msg/server.go
+++ b/internal/rpc/msg/server.go
@@ -86,12 +86,21 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
return err
}
msgModel := redis.NewMsgCache(rdb)
- seqModel := redis.NewSeqCache(rdb)
conversationClient := rpcclient.NewConversationRpcClient(client, config.Share.RpcRegisterName.Conversation)
userRpcClient := rpcclient.NewUserRpcClient(client, config.Share.RpcRegisterName.User, config.Share.IMAdminUserID)
groupRpcClient := rpcclient.NewGroupRpcClient(client, config.Share.RpcRegisterName.Group)
friendRpcClient := rpcclient.NewFriendRpcClient(client, config.Share.RpcRegisterName.Friend)
- msgDatabase, err := controller.NewCommonMsgDatabase(msgDocModel, msgModel, seqModel, &config.KafkaConfig)
+ seqConversation, err := mgo.NewSeqConversationMongo(mgocli.GetDB())
+ if err != nil {
+ return err
+ }
+ seqConversationCache := redis.NewSeqConversationCacheRedis(rdb, seqConversation)
+ seqUser, err := mgo.NewSeqUserMongo(mgocli.GetDB())
+ if err != nil {
+ return err
+ }
+ seqUserCache := redis.NewSeqUserCacheRedis(rdb, seqUser)
+ msgDatabase, err := controller.NewCommonMsgDatabase(msgDocModel, msgModel, seqUserCache, seqConversationCache, &config.KafkaConfig)
if err != nil {
return err
}
diff --git a/internal/rpc/msg/sync_msg.go b/internal/rpc/msg/sync_msg.go
index afb79506e..f5b5ebda5 100644
--- a/internal/rpc/msg/sync_msg.go
+++ b/internal/rpc/msg/sync_msg.go
@@ -111,7 +111,7 @@ func (m *msgServer) GetMaxSeq(ctx context.Context, req *sdkws.GetMaxSeqReq) (*sd
func (m *msgServer) SearchMessage(ctx context.Context, req *msg.SearchMessageReq) (resp *msg.SearchMessageResp, err error) {
var chatLogs []*sdkws.MsgData
- var total int32
+ var total int64
resp = &msg.SearchMessageResp{}
if total, chatLogs, err = m.MsgDatabase.SearchMessage(ctx, req); err != nil {
return nil, err
@@ -194,7 +194,7 @@ func (m *msgServer) SearchMessage(ctx context.Context, req *msg.SearchMessageReq
}
resp.ChatLogs = append(resp.ChatLogs, pbchatLog)
}
- resp.ChatLogsNum = total
+ resp.ChatLogsNum = int32(total)
return resp, nil
}
diff --git a/internal/rpc/third/log.go b/internal/rpc/third/log.go
index 5c0b1f2e6..cd52727cb 100644
--- a/internal/rpc/third/log.go
+++ b/internal/rpc/third/log.go
@@ -50,13 +50,14 @@ func (t *thirdServer) UploadLogs(ctx context.Context, req *third.UploadLogsReq)
platform := constant.PlatformID2Name[int(req.Platform)]
for _, fileURL := range req.FileURLs {
log := relationtb.Log{
- Version: req.Version,
- SystemType: req.SystemType,
Platform: platform,
UserID: userID,
CreateTime: time.Now(),
Url: fileURL.URL,
FileName: fileURL.Filename,
+ SystemType: req.SystemType,
+ Version: req.Version,
+ Ex: req.Ex,
}
for i := 0; i < 20; i++ {
id := genLogID()
diff --git a/internal/rpc/third/s3.go b/internal/rpc/third/s3.go
index 4cb1b81d0..f96eb7390 100644
--- a/internal/rpc/third/s3.go
+++ b/internal/rpc/third/s3.go
@@ -19,13 +19,17 @@ import (
"encoding/base64"
"encoding/hex"
"encoding/json"
- "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
"path"
"strconv"
"time"
+ "github.com/openimsdk/open-im-server/v3/pkg/common/config"
+ "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
+ "go.mongodb.org/mongo-driver/mongo"
+
"github.com/google/uuid"
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
+ "github.com/openimsdk/protocol/sdkws"
"github.com/openimsdk/protocol/third"
"github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log"
@@ -283,6 +287,52 @@ func (t *thirdServer) apiAddress(prefix, name string) string {
return prefix + name
}
+func (t *thirdServer) DeleteOutdatedData(ctx context.Context, req *third.DeleteOutdatedDataReq) (*third.DeleteOutdatedDataResp, error) {
+ var conf config.Third
+ expireTime := time.UnixMilli(req.ExpireTime)
+ findPagination := &sdkws.RequestPagination{
+ PageNumber: 1,
+ ShowNumber: 1000,
+ }
+ for {
+ total, models, err := t.s3dataBase.FindByExpires(ctx, expireTime, findPagination)
+ if err != nil && errs.Unwrap(err) != mongo.ErrNoDocuments {
+ return nil, errs.Wrap(err)
+ }
+ needDelObjectKeys := make([]string, 0)
+ for _, model := range models {
+ needDelObjectKeys = append(needDelObjectKeys, model.Key)
+ }
+
+ needDelObjectKeys = datautil.Distinct(needDelObjectKeys)
+ for _, key := range needDelObjectKeys {
+ count, err := t.s3dataBase.FindNotDelByS3(ctx, key, expireTime)
+ if err != nil && errs.Unwrap(err) != mongo.ErrNoDocuments {
+ return nil, errs.Wrap(err)
+ }
+ if int(count) < 1 && t.minio != nil {
+ thumbnailKey, err := t.getMinioImageThumbnailKey(ctx, key)
+ if err != nil {
+ return nil, errs.Wrap(err)
+ }
+ t.s3dataBase.DeleteObject(ctx, thumbnailKey)
+ t.s3dataBase.DelS3Key(ctx, conf.Object.Enable, needDelObjectKeys...)
+ t.s3dataBase.DeleteObject(ctx, key)
+ }
+ }
+ for _, model := range models {
+ err := t.s3dataBase.DeleteSpecifiedData(ctx, model.Engine, model.Name)
+ if err != nil {
+ return nil, errs.Wrap(err)
+ }
+ }
+ if total < int64(findPagination.ShowNumber) {
+ break
+ }
+ }
+ return &third.DeleteOutdatedDataResp{}, nil
+}
+
type FormDataMate struct {
Name string `json:"name"`
Size int64 `json:"size"`
diff --git a/internal/rpc/third/third.go b/internal/rpc/third/third.go
index 7560486a0..0eeaaa314 100644
--- a/internal/rpc/third/third.go
+++ b/internal/rpc/third/third.go
@@ -31,6 +31,7 @@ import (
"github.com/openimsdk/tools/discovery"
"github.com/openimsdk/tools/s3"
"github.com/openimsdk/tools/s3/cos"
+ "github.com/openimsdk/tools/s3/kodo"
"github.com/openimsdk/tools/s3/minio"
"github.com/openimsdk/tools/s3/oss"
"google.golang.org/grpc"
@@ -42,7 +43,9 @@ type thirdServer struct {
userRpcClient rpcclient.UserRpcClient
defaultExpire time.Duration
config *Config
+ minio *minio.Minio
}
+
type Config struct {
RpcConfig config.Third
RedisConfig config.Redis
@@ -73,14 +76,20 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
}
// Select the oss method according to the profile policy
enable := config.RpcConfig.Object.Enable
- var o s3.Interface
+ var (
+ o s3.Interface
+ minioCli *minio.Minio
+ )
switch enable {
case "minio":
- o, err = minio.NewMinio(ctx, redis.NewMinioCache(rdb), *config.MinioConfig.Build())
+ minioCli, err = minio.NewMinio(ctx, redis.NewMinioCache(rdb), *config.MinioConfig.Build())
+ o = minioCli
case "cos":
o, err = cos.NewCos(*config.RpcConfig.Object.Cos.Build())
case "oss":
o, err = oss.NewOSS(*config.RpcConfig.Object.Oss.Build())
+ case "kodo":
+ o, err = kodo.NewKodo(*config.RpcConfig.Object.Kodo.Build())
default:
err = fmt.Errorf("invalid object enable: %s", enable)
}
@@ -94,10 +103,15 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
s3dataBase: controller.NewS3Database(rdb, o, s3db),
defaultExpire: time.Hour * 24 * 7,
config: config,
+ minio: minioCli,
})
return nil
}
+func (t *thirdServer) getMinioImageThumbnailKey(ctx context.Context, name string) (string, error) {
+ return t.minio.GetImageThumbnailKey(ctx, name)
+}
+
func (t *thirdServer) FcmUpdateToken(ctx context.Context, req *third.FcmUpdateTokenReq) (resp *third.FcmUpdateTokenResp, err error) {
err = t.thirdDatabase.FcmUpdateToken(ctx, req.Account, int(req.PlatformID), req.FcmToken, req.ExpireTime)
if err != nil {
diff --git a/internal/rpc/user/online.go b/internal/rpc/user/online.go
new file mode 100644
index 000000000..99b272006
--- /dev/null
+++ b/internal/rpc/user/online.go
@@ -0,0 +1,82 @@
+package user
+
+import (
+ "context"
+ "github.com/openimsdk/protocol/constant"
+ pbuser "github.com/openimsdk/protocol/user"
+)
+
+func (s *userServer) getUserOnlineStatus(ctx context.Context, userID string) (*pbuser.OnlineStatus, error) {
+ platformIDs, err := s.online.GetOnline(ctx, userID)
+ if err != nil {
+ return nil, err
+ }
+ status := pbuser.OnlineStatus{
+ UserID: userID,
+ PlatformIDs: platformIDs,
+ }
+ if len(platformIDs) > 0 {
+ status.Status = constant.Online
+ } else {
+ status.Status = constant.Offline
+ }
+ return &status, nil
+}
+
+func (s *userServer) getUsersOnlineStatus(ctx context.Context, userIDs []string) ([]*pbuser.OnlineStatus, error) {
+ res := make([]*pbuser.OnlineStatus, 0, len(userIDs))
+ for _, userID := range userIDs {
+ status, err := s.getUserOnlineStatus(ctx, userID)
+ if err != nil {
+ return nil, err
+ }
+ res = append(res, status)
+ }
+ return res, nil
+}
+
+// SubscribeOrCancelUsersStatus Subscribe online or cancel online users.
+func (s *userServer) SubscribeOrCancelUsersStatus(ctx context.Context, req *pbuser.SubscribeOrCancelUsersStatusReq) (*pbuser.SubscribeOrCancelUsersStatusResp, error) {
+ return &pbuser.SubscribeOrCancelUsersStatusResp{}, nil
+}
+
+// GetUserStatus Get the online status of the user.
+func (s *userServer) GetUserStatus(ctx context.Context, req *pbuser.GetUserStatusReq) (*pbuser.GetUserStatusResp, error) {
+ res, err := s.getUsersOnlineStatus(ctx, req.UserIDs)
+ if err != nil {
+ return nil, err
+ }
+ return &pbuser.GetUserStatusResp{StatusList: res}, nil
+}
+
+// SetUserStatus Synchronize user's online status.
+func (s *userServer) SetUserStatus(ctx context.Context, req *pbuser.SetUserStatusReq) (*pbuser.SetUserStatusResp, error) {
+ var (
+ online []int32
+ offline []int32
+ )
+ switch req.Status {
+ case constant.Online:
+ online = []int32{req.PlatformID}
+ case constant.Offline:
+ online = []int32{req.PlatformID}
+ }
+ if err := s.online.SetUserOnline(ctx, req.UserID, online, offline); err != nil {
+ return nil, err
+ }
+ return &pbuser.SetUserStatusResp{}, nil
+}
+
+// GetSubscribeUsersStatus Get the online status of subscribers.
+func (s *userServer) GetSubscribeUsersStatus(ctx context.Context, req *pbuser.GetSubscribeUsersStatusReq) (*pbuser.GetSubscribeUsersStatusResp, error) {
+ return &pbuser.GetSubscribeUsersStatusResp{}, nil
+}
+
+func (s *userServer) SetUserOnlineStatus(ctx context.Context, req *pbuser.SetUserOnlineStatusReq) (*pbuser.SetUserOnlineStatusResp, error) {
+ for _, status := range req.Status {
+ if err := s.online.SetUserOnline(ctx, status.UserID, status.Online, status.Offline); err != nil {
+ return nil, err
+ }
+ }
+ return &pbuser.SetUserOnlineStatusResp{}, nil
+}
diff --git a/internal/rpc/user/user.go b/internal/rpc/user/user.go
index d0d3dbf60..1e534437d 100644
--- a/internal/rpc/user/user.go
+++ b/internal/rpc/user/user.go
@@ -16,16 +16,22 @@ package user
import (
"context"
+ "errors"
"github.com/openimsdk/open-im-server/v3/internal/rpc/friend"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
+ "github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
+ "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
tablerelation "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
"github.com/openimsdk/open-im-server/v3/pkg/common/webhook"
"github.com/openimsdk/open-im-server/v3/pkg/localcache"
+ "github.com/openimsdk/protocol/group"
+ friendpb "github.com/openimsdk/protocol/relation"
"github.com/openimsdk/tools/db/redisutil"
"math/rand"
"strings"
+ "sync"
"time"
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
@@ -46,6 +52,7 @@ import (
)
type userServer struct {
+ online cache.OnlineCache
db controller.UserDatabase
friendNotificationSender *friend.FriendNotificationSender
userNotificationSender *UserNotificationSender
@@ -87,13 +94,13 @@ func Start(ctx context.Context, config *Config, client registry.SvcDiscoveryRegi
return err
}
userCache := redis.NewUserCacheRedis(rdb, &config.LocalCacheConfig, userDB, redis.GetRocksCacheOptions())
- userMongoDB := mgo.NewUserMongoDriver(mgocli.GetDB())
- database := controller.NewUserDatabase(userDB, userCache, mgocli.GetTx(), userMongoDB)
+ database := controller.NewUserDatabase(userDB, userCache, mgocli.GetTx())
friendRpcClient := rpcclient.NewFriendRpcClient(client, config.Share.RpcRegisterName.Friend)
groupRpcClient := rpcclient.NewGroupRpcClient(client, config.Share.RpcRegisterName.Group)
msgRpcClient := rpcclient.NewMessageRpcClient(client, config.Share.RpcRegisterName.Msg)
localcache.InitLocalCache(&config.LocalCacheConfig)
u := &userServer{
+ online: redis.NewUserOnline(rdb),
db: database,
RegisterCenter: client,
friendRpcClient: &friendRpcClient,
@@ -131,26 +138,29 @@ func (s *userServer) UpdateUserInfo(ctx context.Context, req *pbuser.UpdateUserI
if err := s.webhookBeforeUpdateUserInfo(ctx, &s.config.WebhooksConfig.BeforeUpdateUserInfo, req); err != nil {
return nil, err
}
-
data := convert.UserPb2DBMap(req.UserInfo)
- if err := s.db.UpdateByMap(ctx, req.UserInfo.UserID, data); err != nil {
- return nil, err
- }
- s.friendNotificationSender.UserInfoUpdatedNotification(ctx, req.UserInfo.UserID)
- friends, err := s.friendRpcClient.GetFriendIDs(ctx, req.UserInfo.UserID)
+ oldUser, err := s.db.GetUserByID(ctx, req.UserInfo.UserID)
if err != nil {
return nil, err
}
- if req.UserInfo.Nickname != "" || req.UserInfo.FaceURL != "" {
- if err = s.groupRpcClient.NotificationUserInfoUpdate(ctx, req.UserInfo.UserID); err != nil {
- return nil, err
- }
- }
- for _, friendID := range friends {
- s.friendNotificationSender.FriendInfoUpdatedNotification(ctx, req.UserInfo.UserID, friendID)
+ if err := s.db.UpdateByMap(ctx, req.UserInfo.UserID, data); err != nil {
+ return nil, err
}
+ s.friendNotificationSender.UserInfoUpdatedNotification(ctx, req.UserInfo.UserID)
+ //friends, err := s.friendRpcClient.GetFriendIDs(ctx, req.UserInfo.UserID)
+ //if err != nil {
+ // return nil, err
+ //}
+ //if req.UserInfo.Nickname != "" || req.UserInfo.FaceURL != "" {
+ // if err = s.NotificationUserInfoUpdate(ctx, req.UserInfo.UserID,oldUser); err != nil {
+ // return nil, err
+ // }
+ //}
+ //for _, friendID := range friends {
+ // s.friendNotificationSender.FriendInfoUpdatedNotification(ctx, req.UserInfo.UserID, friendID)
+ //}
s.webhookAfterUpdateUserInfo(ctx, &s.config.WebhooksConfig.AfterUpdateUserInfo, req)
- if err = s.groupRpcClient.NotificationUserInfoUpdate(ctx, req.UserInfo.UserID); err != nil {
+ if err = s.NotificationUserInfoUpdate(ctx, req.UserInfo.UserID, oldUser); err != nil {
return nil, err
}
return resp, nil
@@ -164,25 +174,29 @@ func (s *userServer) UpdateUserInfoEx(ctx context.Context, req *pbuser.UpdateUse
if err = s.webhookBeforeUpdateUserInfoEx(ctx, &s.config.WebhooksConfig.BeforeUpdateUserInfoEx, req); err != nil {
return nil, err
}
+ oldUser, err := s.db.GetUserByID(ctx, req.UserInfo.UserID)
+ if err != nil {
+ return nil, err
+ }
data := convert.UserPb2DBMapEx(req.UserInfo)
if err = s.db.UpdateByMap(ctx, req.UserInfo.UserID, data); err != nil {
return nil, err
}
s.friendNotificationSender.UserInfoUpdatedNotification(ctx, req.UserInfo.UserID)
- friends, err := s.friendRpcClient.GetFriendIDs(ctx, req.UserInfo.UserID)
- if err != nil {
- return nil, err
- }
- if req.UserInfo.Nickname != nil || req.UserInfo.FaceURL != nil {
- if err := s.groupRpcClient.NotificationUserInfoUpdate(ctx, req.UserInfo.UserID); err != nil {
- return nil, err
- }
- }
- for _, friendID := range friends {
- s.friendNotificationSender.FriendInfoUpdatedNotification(ctx, req.UserInfo.UserID, friendID)
- }
+ //friends, err := s.friendRpcClient.GetFriendIDs(ctx, req.UserInfo.UserID)
+ //if err != nil {
+ // return nil, err
+ //}
+ //if req.UserInfo.Nickname != nil || req.UserInfo.FaceURL != nil {
+ // if err := s.NotificationUserInfoUpdate(ctx, req.UserInfo.UserID); err != nil {
+ // return nil, err
+ // }
+ //}
+ //for _, friendID := range friends {
+ // s.friendNotificationSender.FriendInfoUpdatedNotification(ctx, req.UserInfo.UserID, friendID)
+ //}
s.webhookAfterUpdateUserInfoEx(ctx, &s.config.WebhooksConfig.AfterUpdateUserInfoEx, req)
- if err := s.groupRpcClient.NotificationUserInfoUpdate(ctx, req.UserInfo.UserID); err != nil {
+ if err := s.NotificationUserInfoUpdate(ctx, req.UserInfo.UserID, oldUser); err != nil {
return nil, err
}
return resp, nil
@@ -297,6 +311,8 @@ func (s *userServer) UserRegister(ctx context.Context, req *pbuser.UserRegisterR
return nil, err
}
+ prommetrics.UserRegisterCounter.Add(float64(len(users)))
+
s.webhookAfterUserRegister(ctx, &s.config.WebhooksConfig.AfterUserRegister, req)
return resp, nil
}
@@ -318,76 +334,6 @@ func (s *userServer) GetAllUserID(ctx context.Context, req *pbuser.GetAllUserIDR
return &pbuser.GetAllUserIDResp{Total: int32(total), UserIDs: userIDs}, nil
}
-// SubscribeOrCancelUsersStatus Subscribe online or cancel online users.
-func (s *userServer) SubscribeOrCancelUsersStatus(ctx context.Context, req *pbuser.SubscribeOrCancelUsersStatusReq) (resp *pbuser.SubscribeOrCancelUsersStatusResp, err error) {
- if req.Genre == constant.SubscriberUser {
- err = s.db.SubscribeUsersStatus(ctx, req.UserID, req.UserIDs)
- if err != nil {
- return nil, err
- }
- var status []*pbuser.OnlineStatus
- status, err = s.db.GetUserStatus(ctx, req.UserIDs)
- if err != nil {
- return nil, err
- }
- return &pbuser.SubscribeOrCancelUsersStatusResp{StatusList: status}, nil
- } else if req.Genre == constant.Unsubscribe {
- err = s.db.UnsubscribeUsersStatus(ctx, req.UserID, req.UserIDs)
- if err != nil {
- return nil, err
- }
- }
- return &pbuser.SubscribeOrCancelUsersStatusResp{}, nil
-}
-
-// GetUserStatus Get the online status of the user.
-func (s *userServer) GetUserStatus(ctx context.Context, req *pbuser.GetUserStatusReq) (resp *pbuser.GetUserStatusResp,
- err error) {
- onlineStatusList, err := s.db.GetUserStatus(ctx, req.UserIDs)
- if err != nil {
- return nil, err
- }
- return &pbuser.GetUserStatusResp{StatusList: onlineStatusList}, nil
-}
-
-// SetUserStatus Synchronize user's online status.
-func (s *userServer) SetUserStatus(ctx context.Context, req *pbuser.SetUserStatusReq) (resp *pbuser.SetUserStatusResp,
- err error) {
- err = s.db.SetUserStatus(ctx, req.UserID, req.Status, req.PlatformID)
- if err != nil {
- return nil, err
- }
- list, err := s.db.GetSubscribedList(ctx, req.UserID)
- if err != nil {
- return nil, err
- }
- for _, userID := range list {
- tips := &sdkws.UserStatusChangeTips{
- FromUserID: req.UserID,
- ToUserID: userID,
- Status: req.Status,
- PlatformID: req.PlatformID,
- }
- s.userNotificationSender.UserStatusChangeNotification(ctx, tips)
- }
-
- return &pbuser.SetUserStatusResp{}, nil
-}
-
-// GetSubscribeUsersStatus Get the online status of subscribers.
-func (s *userServer) GetSubscribeUsersStatus(ctx context.Context,
- req *pbuser.GetSubscribeUsersStatusReq) (*pbuser.GetSubscribeUsersStatusResp, error) {
- userList, err := s.db.GetAllSubscribeList(ctx, req.UserID)
- if err != nil {
- return nil, err
- }
- onlineStatusList, err := s.db.GetUserStatus(ctx, userList)
- if err != nil {
- return nil, err
- }
- return &pbuser.GetSubscribeUsersStatusResp{StatusList: onlineStatusList}, nil
-}
-
// ProcessUserCommandAdd user general function add.
func (s *userServer) ProcessUserCommandAdd(ctx context.Context, req *pbuser.ProcessUserCommandAddReq) (*pbuser.ProcessUserCommandAddResp, error) {
err := authverify.CheckAccessV3(ctx, req.UserID, s.config.Share.IMAdminUserID)
@@ -683,3 +629,45 @@ func (s *userServer) userModelToResp(users []*tablerelation.User, pagination pag
return &pbuser.SearchNotificationAccountResp{Total: total, NotificationAccounts: notificationAccounts}
}
+
+func (s *userServer) NotificationUserInfoUpdate(ctx context.Context, userID string, oldUser *tablerelation.User) error {
+ user, err := s.db.GetUserByID(ctx, userID)
+ if err != nil {
+ return err
+ }
+ if user.Nickname == oldUser.Nickname && user.FaceURL == oldUser.FaceURL {
+ return nil
+ }
+ oldUserInfo := convert.UserDB2Pb(oldUser)
+ newUserInfo := convert.UserDB2Pb(user)
+ var wg sync.WaitGroup
+ var es [2]error
+ wg.Add(len(es))
+ go func() {
+ defer wg.Done()
+ _, es[0] = s.groupRpcClient.Client.NotificationUserInfoUpdate(ctx, &group.NotificationUserInfoUpdateReq{
+ UserID: userID,
+ OldUserInfo: oldUserInfo,
+ NewUserInfo: newUserInfo,
+ })
+ }()
+
+ go func() {
+ defer wg.Done()
+ _, es[1] = s.friendRpcClient.Client.NotificationUserInfoUpdate(ctx, &friendpb.NotificationUserInfoUpdateReq{
+ UserID: userID,
+ OldUserInfo: oldUserInfo,
+ NewUserInfo: newUserInfo,
+ })
+ }()
+ wg.Wait()
+ return errors.Join(es[:]...)
+}
+
+func (s *userServer) SortQuery(ctx context.Context, req *pbuser.SortQueryReq) (*pbuser.SortQueryResp, error) {
+ users, err := s.db.SortQuery(ctx, req.UserIDName, req.Asc)
+ if err != nil {
+ return nil, err
+ }
+ return &pbuser.SortQueryResp{Users: convert.UsersDB2Pb(users)}, nil
+}
diff --git a/internal/tools/cron_task.go b/internal/tools/cron_task.go
index bf037b694..1ef4943cd 100644
--- a/internal/tools/cron_task.go
+++ b/internal/tools/cron_task.go
@@ -20,6 +20,7 @@ import (
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
kdisc "github.com/openimsdk/open-im-server/v3/pkg/common/discoveryregister"
"github.com/openimsdk/protocol/msg"
+ "github.com/openimsdk/protocol/third"
"github.com/openimsdk/tools/mcontext"
"github.com/openimsdk/tools/mw"
"google.golang.org/grpc"
@@ -39,7 +40,7 @@ type CronTaskConfig struct {
}
func Start(ctx context.Context, config *CronTaskConfig) error {
- log.CInfo(ctx, "CRON-TASK server is initializing", "chatRecordsClearTime", config.CronTask.ChatRecordsClearTime, "msgDestructTime", config.CronTask.RetainChatRecords)
+ log.CInfo(ctx, "CRON-TASK server is initializing", "chatRecordsClearTime", config.CronTask.CronExecuteTime, "msgDestructTime", config.CronTask.RetainChatRecords)
if config.CronTask.RetainChatRecords < 1 {
return errs.New("msg destruct time must be greater than 1").Wrap()
}
@@ -66,10 +67,31 @@ func Start(ctx context.Context, config *CronTaskConfig) error {
}
log.ZInfo(ctx, "cron clear chat records success", "deltime", deltime, "cont", time.Since(now))
}
- if _, err := crontab.AddFunc(config.CronTask.ChatRecordsClearTime, clearFunc); err != nil {
+ if _, err := crontab.AddFunc(config.CronTask.CronExecuteTime, clearFunc); err != nil {
return errs.Wrap(err)
}
- log.ZInfo(ctx, "start cron task", "chatRecordsClearTime", config.CronTask.ChatRecordsClearTime)
+
+ tConn, err := client.GetConn(ctx, config.Share.RpcRegisterName.Third)
+ if err != nil {
+ return err
+ }
+ thirdClient := third.NewThirdClient(tConn)
+
+ deleteFunc := func() {
+ now := time.Now()
+ deleteTime := now.Add(-time.Hour * 24 * time.Duration(config.CronTask.FileExpireTime))
+ ctx := mcontext.SetOperationID(ctx, fmt.Sprintf("cron_%d_%d", os.Getpid(), deleteTime.UnixMilli()))
+ log.ZInfo(ctx, "deleteoutDatedData ", "deletetime", deleteTime, "timestamp", deleteTime.UnixMilli())
+ if _, err := thirdClient.DeleteOutdatedData(ctx, &third.DeleteOutdatedDataReq{ExpireTime: deleteTime.UnixMilli()}); err != nil {
+ log.ZError(ctx, "cron deleteoutDatedData failed", err, "deleteTime", deleteTime, "cont", time.Since(now))
+ return
+ }
+ log.ZInfo(ctx, "cron deleteoutDatedData success", "deltime", deleteTime, "cont", time.Since(now))
+ }
+ if _, err := crontab.AddFunc(config.CronTask.CronExecuteTime, deleteFunc); err != nil {
+ return errs.Wrap(err)
+ }
+ log.ZInfo(ctx, "start cron task", "CronExecuteTime", config.CronTask.CronExecuteTime)
crontab.Start()
<-ctx.Done()
return nil
diff --git a/pkg/apistruct/manage.go b/pkg/apistruct/manage.go
index e79b47722..f4deb9fb1 100644
--- a/pkg/apistruct/manage.go
+++ b/pkg/apistruct/manage.go
@@ -15,7 +15,7 @@
package apistruct
import (
- sdkws "github.com/openimsdk/protocol/sdkws"
+ "github.com/openimsdk/protocol/sdkws"
)
// SendMsg defines the structure for sending messages with various metadata.
@@ -55,6 +55,9 @@ type SendMsg struct {
// OfflinePushInfo contains information for offline push notifications.
OfflinePushInfo *sdkws.OfflinePushInfo `json:"offlinePushInfo"`
+
+ // Ex stores extended fields
+ Ex string `json:"ex"`
}
// SendMsgReq extends SendMsg with the requirement of RecvID when SessionType indicates a one-on-one or notification chat.
diff --git a/pkg/common/cmd/group.go b/pkg/common/cmd/group.go
index f158b8c62..20124be95 100644
--- a/pkg/common/cmd/group.go
+++ b/pkg/common/cmd/group.go
@@ -19,6 +19,7 @@ import (
"github.com/openimsdk/open-im-server/v3/internal/rpc/group"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/startrpc"
+ "github.com/openimsdk/open-im-server/v3/pkg/common/storage/versionctx"
"github.com/openimsdk/tools/system/program"
"github.com/spf13/cobra"
)
@@ -58,5 +59,5 @@ func (a *GroupRpcCmd) Exec() error {
func (a *GroupRpcCmd) runE() error {
return startrpc.Start(a.ctx, &a.groupConfig.Discovery, &a.groupConfig.RpcConfig.Prometheus, a.groupConfig.RpcConfig.RPC.ListenIP,
a.groupConfig.RpcConfig.RPC.RegisterIP, a.groupConfig.RpcConfig.RPC.Ports,
- a.Index(), a.groupConfig.Share.RpcRegisterName.Group, &a.groupConfig.Share, a.groupConfig, group.Start)
+ a.Index(), a.groupConfig.Share.RpcRegisterName.Group, &a.groupConfig.Share, a.groupConfig, group.Start, versionctx.EnableVersionCtx())
}
diff --git a/pkg/common/cmd/msg_gateway.go b/pkg/common/cmd/msg_gateway.go
index 78004094c..29d3fba33 100644
--- a/pkg/common/cmd/msg_gateway.go
+++ b/pkg/common/cmd/msg_gateway.go
@@ -37,6 +37,7 @@ func NewMsgGatewayCmd() *MsgGatewayCmd {
ret.configMap = map[string]any{
OpenIMMsgGatewayCfgFileName: &msgGatewayConfig.MsgGateway,
ShareFileName: &msgGatewayConfig.Share,
+ RedisConfigFileName: &msgGatewayConfig.RedisConfig,
WebhooksConfigFileName: &msgGatewayConfig.WebhooksConfig,
DiscoveryConfigFilename: &msgGatewayConfig.Discovery,
}
diff --git a/pkg/common/cmd/msg_gateway_test.go b/pkg/common/cmd/msg_gateway_test.go
index d820627b5..2b68a3e3a 100644
--- a/pkg/common/cmd/msg_gateway_test.go
+++ b/pkg/common/cmd/msg_gateway_test.go
@@ -19,6 +19,7 @@ import (
"github.com/openimsdk/tools/apiresp"
"github.com/openimsdk/tools/utils/jsonutil"
"github.com/stretchr/testify/mock"
+ "go.mongodb.org/mongo-driver/bson/primitive"
"math"
"testing"
)
@@ -59,3 +60,9 @@ func TestName(t *testing.T) {
t.Logf("%+v\n", rReso)
}
+
+func TestName1(t *testing.T) {
+ t.Log(primitive.NewObjectID().String())
+ t.Log(primitive.NewObjectID().Hex())
+
+}
diff --git a/pkg/common/cmd/push.go b/pkg/common/cmd/push.go
index 3e7c4c249..6e6014021 100644
--- a/pkg/common/cmd/push.go
+++ b/pkg/common/cmd/push.go
@@ -47,6 +47,7 @@ func NewPushRpcCmd() *PushRpcCmd {
ret.RootCmd = NewRootCmd(program.GetProcessName(), WithConfigMap(ret.configMap))
ret.ctx = context.WithValue(context.Background(), "version", config.Version)
ret.Command.RunE = func(cmd *cobra.Command, args []string) error {
+ ret.pushConfig.FcmConfigPath = ret.ConfigPath()
return ret.runE()
}
return ret
diff --git a/pkg/common/cmd/root.go b/pkg/common/cmd/root.go
index 900281367..84e985697 100644
--- a/pkg/common/cmd/root.go
+++ b/pkg/common/cmd/root.go
@@ -31,6 +31,11 @@ type RootCmd struct {
prometheusPort int
log config.Log
index int
+ configPath string
+}
+
+func (r *RootCmd) ConfigPath() string {
+ return r.configPath
}
func (r *RootCmd) Index() int {
@@ -134,6 +139,7 @@ func (r *RootCmd) initializeLogger(cmdOpts *CmdOpts) error {
r.log.RemainRotationCount,
r.log.RotationTime,
config.Version,
+ r.log.IsSimplify,
)
if err != nil {
return errs.Wrap(err)
@@ -153,6 +159,7 @@ func (r *RootCmd) getFlag(cmd *cobra.Command) (string, int, error) {
if err != nil {
return "", 0, errs.Wrap(err)
}
+ r.configPath = configDirectory
index, err := cmd.Flags().GetInt(FlagTransferIndex)
if err != nil {
return "", 0, errs.Wrap(err)
diff --git a/pkg/common/config/config.go b/pkg/common/config/config.go
index 5313c196a..c6c672eb8 100644
--- a/pkg/common/config/config.go
+++ b/pkg/common/config/config.go
@@ -15,14 +15,16 @@
package config
import (
+ "strings"
+ "time"
+
"github.com/openimsdk/tools/db/mongoutil"
"github.com/openimsdk/tools/db/redisutil"
"github.com/openimsdk/tools/mq/kafka"
"github.com/openimsdk/tools/s3/cos"
+ "github.com/openimsdk/tools/s3/kodo"
"github.com/openimsdk/tools/s3/minio"
"github.com/openimsdk/tools/s3/oss"
- "strings"
- "time"
)
type CacheConfig struct {
@@ -47,6 +49,7 @@ type Log struct {
RemainLogLevel int `mapstructure:"remainLogLevel"`
IsStdout bool `mapstructure:"isStdout"`
IsJson bool `mapstructure:"isJson"`
+ IsSimplify bool `mapstructure:"isSimplify"`
WithStack bool `mapstructure:"withStack"`
}
@@ -105,8 +108,9 @@ type API struct {
}
type CronTask struct {
- ChatRecordsClearTime string `mapstructure:"chatRecordsClearTime"`
- RetainChatRecords int `mapstructure:"retainChatRecords"`
+ CronExecuteTime string `mapstructure:"cronExecuteTime"`
+ RetainChatRecords int `mapstructure:"retainChatRecords"`
+ FileExpireTime int `mapstructure:"fileExpireTime"`
}
type OfflinePushConfig struct {
@@ -202,7 +206,8 @@ type Push struct {
ChannelName string `mapstructure:"channelName"`
} `mapstructure:"geTui"`
FCM struct {
- ServiceAccount string `mapstructure:"serviceAccount"`
+ FilePath string `mapstructure:"filePath"`
+ AuthURL string `mapstructure:"authURL"`
} `mapstructure:"fcm"`
JPNS struct {
AppKey string `mapstructure:"appKey"`
@@ -277,16 +282,8 @@ type Third struct {
Enable string `mapstructure:"enable"`
Cos Cos `mapstructure:"cos"`
Oss Oss `mapstructure:"oss"`
- Kodo struct {
- Endpoint string `mapstructure:"endpoint"`
- Bucket string `mapstructure:"bucket"`
- BucketURL string `mapstructure:"bucketURL"`
- AccessKeyID string `mapstructure:"accessKeyID"`
- AccessKeySecret string `mapstructure:"accessKeySecret"`
- SessionToken string `mapstructure:"sessionToken"`
- PublicRead bool `mapstructure:"publicRead"`
- } `mapstructure:"kodo"`
- Aws struct {
+ Kodo Kodo `mapstructure:"kodo"`
+ Aws struct {
Endpoint string `mapstructure:"endpoint"`
Region string `mapstructure:"region"`
Bucket string `mapstructure:"bucket"`
@@ -313,6 +310,16 @@ type Oss struct {
PublicRead bool `mapstructure:"publicRead"`
}
+type Kodo struct {
+ Endpoint string `mapstructure:"endpoint"`
+ Bucket string `mapstructure:"bucket"`
+ BucketURL string `mapstructure:"bucketURL"`
+ AccessKeyID string `mapstructure:"accessKeyID"`
+ AccessKeySecret string `mapstructure:"accessKeySecret"`
+ SessionToken string `mapstructure:"sessionToken"`
+ PublicRead bool `mapstructure:"publicRead"`
+}
+
type User struct {
RPC struct {
RegisterIP string `mapstructure:"registerIP"`
@@ -338,8 +345,9 @@ type BeforeConfig struct {
}
type AfterConfig struct {
- Enable bool `mapstructure:"enable"`
- Timeout int `mapstructure:"timeout"`
+ Enable bool `mapstructure:"enable"`
+ Timeout int `mapstructure:"timeout"`
+ AttentionIds []string `mapstructure:"attentionIds"`
}
type Share struct {
@@ -523,6 +531,18 @@ func (o *Oss) Build() *oss.Config {
}
}
+func (o *Kodo) Build() *kodo.Config {
+ return &kodo.Config{
+ Endpoint: o.Endpoint,
+ Bucket: o.Bucket,
+ BucketURL: o.BucketURL,
+ AccessKeyID: o.AccessKeyID,
+ AccessKeySecret: o.AccessKeySecret,
+ SessionToken: o.SessionToken,
+ PublicRead: o.PublicRead,
+ }
+}
+
func (l *CacheConfig) Failed() time.Duration {
return time.Second * time.Duration(l.FailedExpire)
}
diff --git a/pkg/common/convert/user.go b/pkg/common/convert/user.go
index ccc574f51..d824fa68e 100644
--- a/pkg/common/convert/user.go
+++ b/pkg/common/convert/user.go
@@ -16,26 +16,26 @@ package convert
import (
relationtb "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
+ "github.com/openimsdk/tools/utils/datautil"
"time"
"github.com/openimsdk/protocol/sdkws"
)
-func UsersDB2Pb(users []*relationtb.User) []*sdkws.UserInfo {
- result := make([]*sdkws.UserInfo, 0, len(users))
- for _, user := range users {
- userPb := &sdkws.UserInfo{
- UserID: user.UserID,
- Nickname: user.Nickname,
- FaceURL: user.FaceURL,
- Ex: user.Ex,
- CreateTime: user.CreateTime.UnixMilli(),
- AppMangerLevel: user.AppMangerLevel,
- GlobalRecvMsgOpt: user.GlobalRecvMsgOpt,
- }
- result = append(result, userPb)
+func UserDB2Pb(user *relationtb.User) *sdkws.UserInfo {
+ return &sdkws.UserInfo{
+ UserID: user.UserID,
+ Nickname: user.Nickname,
+ FaceURL: user.FaceURL,
+ Ex: user.Ex,
+ CreateTime: user.CreateTime.UnixMilli(),
+ AppMangerLevel: user.AppMangerLevel,
+ GlobalRecvMsgOpt: user.GlobalRecvMsgOpt,
}
- return result
+}
+
+func UsersDB2Pb(users []*relationtb.User) []*sdkws.UserInfo {
+ return datautil.Slice(users, UserDB2Pb)
}
func UserPb2DB(user *sdkws.UserInfo) *relationtb.User {
diff --git a/pkg/common/ginprometheus/ginprometheus.go b/pkg/common/ginprometheus/ginprometheus.go
index c2e6bdcca..64f8a0d8a 100644
--- a/pkg/common/ginprometheus/ginprometheus.go
+++ b/pkg/common/ginprometheus/ginprometheus.go
@@ -14,430 +14,431 @@
package ginprometheus
-import (
- "bytes"
- "fmt"
- "io"
- "net/http"
- "os"
- "strconv"
- "time"
-
- "github.com/gin-gonic/gin"
- "github.com/prometheus/client_golang/prometheus"
- "github.com/prometheus/client_golang/prometheus/promhttp"
-)
-
-var defaultMetricPath = "/metrics"
-
-// counter, counter_vec, gauge, gauge_vec,
-// histogram, histogram_vec, summary, summary_vec.
-var (
- reqCounter = &Metric{
- ID: "reqCnt",
- Name: "requests_total",
- Description: "How many HTTP requests processed, partitioned by status code and HTTP method.",
- Type: "counter_vec",
- Args: []string{"code", "method", "handler", "host", "url"}}
-
- reqDuration = &Metric{
- ID: "reqDur",
- Name: "request_duration_seconds",
- Description: "The HTTP request latencies in seconds.",
- Type: "histogram_vec",
- Args: []string{"code", "method", "url"},
- }
-
- resSize = &Metric{
- ID: "resSz",
- Name: "response_size_bytes",
- Description: "The HTTP response sizes in bytes.",
- Type: "summary"}
-
- reqSize = &Metric{
- ID: "reqSz",
- Name: "request_size_bytes",
- Description: "The HTTP request sizes in bytes.",
- Type: "summary"}
-
- standardMetrics = []*Metric{
- reqCounter,
- reqDuration,
- resSize,
- reqSize,
- }
-)
-
-/*
-RequestCounterURLLabelMappingFn is a function which can be supplied to the middleware to control
-the cardinality of the request counter's "url" label, which might be required in some contexts.
-For instance, if for a "/customer/:name" route you don't want to generate a time series for every
-possible customer name, you could use this function:
-
- func(c *gin.Context) string {
- url := c.Request.URL.Path
- for _, p := range c.Params {
- if p.Key == "name" {
- url = strings.Replace(url, p.Value, ":name", 1)
- break
- }
- }
- return url
- }
-
-which would map "/customer/alice" and "/customer/bob" to their template "/customer/:name".
-*/
-type RequestCounterURLLabelMappingFn func(c *gin.Context) string
-
-// Metric is a definition for the name, description, type, ID, and
-// prometheus.Collector type (i.e. CounterVec, Summary, etc) of each metric.
-type Metric struct {
- MetricCollector prometheus.Collector
- ID string
- Name string
- Description string
- Type string
- Args []string
-}
-
-// Prometheus contains the metrics gathered by the instance and its path.
-type Prometheus struct {
- reqCnt *prometheus.CounterVec
- reqDur *prometheus.HistogramVec
- reqSz, resSz prometheus.Summary
- router *gin.Engine
- listenAddress string
- Ppg PrometheusPushGateway
-
- MetricsList []*Metric
- MetricsPath string
-
- ReqCntURLLabelMappingFn RequestCounterURLLabelMappingFn
-
- // gin.Context string to use as a prometheus URL label
- URLLabelFromContext string
-}
-
-// PrometheusPushGateway contains the configuration for pushing to a Prometheus pushgateway (optional).
-type PrometheusPushGateway struct {
-
- // Push interval in seconds
- PushIntervalSeconds time.Duration
-
- // Push Gateway URL in format http://domain:port
- // where JOBNAME can be any string of your choice
- PushGatewayURL string
-
- // Local metrics URL where metrics are fetched from, this could be omitted in the future
- // if implemented using prometheus common/expfmt instead
- MetricsURL string
-
- // pushgateway job name, defaults to "gin"
- Job string
-}
-
-// NewPrometheus generates a new set of metrics with a certain subsystem name.
-func NewPrometheus(subsystem string, customMetricsList ...[]*Metric) *Prometheus {
- if subsystem == "" {
- subsystem = "app"
- }
-
- var metricsList []*Metric
-
- if len(customMetricsList) > 1 {
- panic("Too many args. NewPrometheus( string, ).")
- } else if len(customMetricsList) == 1 {
- metricsList = customMetricsList[0]
- }
- metricsList = append(metricsList, standardMetrics...)
-
- p := &Prometheus{
- MetricsList: metricsList,
- MetricsPath: defaultMetricPath,
- ReqCntURLLabelMappingFn: func(c *gin.Context) string {
- return c.FullPath() // e.g. /user/:id , /user/:id/info
- },
- }
-
- p.registerMetrics(subsystem)
-
- return p
-}
-
-// SetPushGateway sends metrics to a remote pushgateway exposed on pushGatewayURL
-// every pushIntervalSeconds. Metrics are fetched from metricsURL.
-func (p *Prometheus) SetPushGateway(pushGatewayURL, metricsURL string, pushIntervalSeconds time.Duration) {
- p.Ppg.PushGatewayURL = pushGatewayURL
- p.Ppg.MetricsURL = metricsURL
- p.Ppg.PushIntervalSeconds = pushIntervalSeconds
- p.startPushTicker()
-}
-
-// SetPushGatewayJob job name, defaults to "gin".
-func (p *Prometheus) SetPushGatewayJob(j string) {
- p.Ppg.Job = j
-}
-
-// SetListenAddress for exposing metrics on address. If not set, it will be exposed at the
-// same address of the gin engine that is being used.
-func (p *Prometheus) SetListenAddress(address string) {
- p.listenAddress = address
- if p.listenAddress != "" {
- p.router = gin.Default()
- }
-}
-
-// SetListenAddressWithRouter for using a separate router to expose metrics. (this keeps things like GET /metrics out of
-// your content's access log).
-func (p *Prometheus) SetListenAddressWithRouter(listenAddress string, r *gin.Engine) {
- p.listenAddress = listenAddress
- if len(p.listenAddress) > 0 {
- p.router = r
- }
-}
-
-// SetMetricsPath set metrics paths.
-func (p *Prometheus) SetMetricsPath(e *gin.Engine) error {
-
- if p.listenAddress != "" {
- p.router.GET(p.MetricsPath, prometheusHandler())
- return p.runServer()
- } else {
- e.GET(p.MetricsPath, prometheusHandler())
- return nil
- }
-}
-
-// SetMetricsPathWithAuth set metrics paths with authentication.
-func (p *Prometheus) SetMetricsPathWithAuth(e *gin.Engine, accounts gin.Accounts) error {
-
- if p.listenAddress != "" {
- p.router.GET(p.MetricsPath, gin.BasicAuth(accounts), prometheusHandler())
- return p.runServer()
- } else {
- e.GET(p.MetricsPath, gin.BasicAuth(accounts), prometheusHandler())
- return nil
- }
-
-}
-
-func (p *Prometheus) runServer() error {
- return p.router.Run(p.listenAddress)
-}
-
-func (p *Prometheus) getMetrics() []byte {
- response, err := http.Get(p.Ppg.MetricsURL)
- if err != nil {
- return nil
- }
-
- defer response.Body.Close()
-
- body, _ := io.ReadAll(response.Body)
- return body
-}
-
-var hostname, _ = os.Hostname()
-
-func (p *Prometheus) getPushGatewayURL() string {
- if p.Ppg.Job == "" {
- p.Ppg.Job = "gin"
- }
- return p.Ppg.PushGatewayURL + "/metrics/job/" + p.Ppg.Job + "/instance/" + hostname
-}
-
-func (p *Prometheus) sendMetricsToPushGateway(metrics []byte) {
- req, err := http.NewRequest("POST", p.getPushGatewayURL(), bytes.NewBuffer(metrics))
- if err != nil {
- return
- }
-
- client := &http.Client{}
- resp, err := client.Do(req)
- if err != nil {
- fmt.Println("Error sending to push gateway error:", err.Error())
- }
-
- resp.Body.Close()
-}
-
-func (p *Prometheus) startPushTicker() {
- ticker := time.NewTicker(time.Second * p.Ppg.PushIntervalSeconds)
- go func() {
- for range ticker.C {
- p.sendMetricsToPushGateway(p.getMetrics())
- }
- }()
-}
-
-// NewMetric associates prometheus.Collector based on Metric.Type.
-func NewMetric(m *Metric, subsystem string) prometheus.Collector {
- var metric prometheus.Collector
- switch m.Type {
- case "counter_vec":
- metric = prometheus.NewCounterVec(
- prometheus.CounterOpts{
- Subsystem: subsystem,
- Name: m.Name,
- Help: m.Description,
- },
- m.Args,
- )
- case "counter":
- metric = prometheus.NewCounter(
- prometheus.CounterOpts{
- Subsystem: subsystem,
- Name: m.Name,
- Help: m.Description,
- },
- )
- case "gauge_vec":
- metric = prometheus.NewGaugeVec(
- prometheus.GaugeOpts{
- Subsystem: subsystem,
- Name: m.Name,
- Help: m.Description,
- },
- m.Args,
- )
- case "gauge":
- metric = prometheus.NewGauge(
- prometheus.GaugeOpts{
- Subsystem: subsystem,
- Name: m.Name,
- Help: m.Description,
- },
- )
- case "histogram_vec":
- metric = prometheus.NewHistogramVec(
- prometheus.HistogramOpts{
- Subsystem: subsystem,
- Name: m.Name,
- Help: m.Description,
- },
- m.Args,
- )
- case "histogram":
- metric = prometheus.NewHistogram(
- prometheus.HistogramOpts{
- Subsystem: subsystem,
- Name: m.Name,
- Help: m.Description,
- },
- )
- case "summary_vec":
- metric = prometheus.NewSummaryVec(
- prometheus.SummaryOpts{
- Subsystem: subsystem,
- Name: m.Name,
- Help: m.Description,
- },
- m.Args,
- )
- case "summary":
- metric = prometheus.NewSummary(
- prometheus.SummaryOpts{
- Subsystem: subsystem,
- Name: m.Name,
- Help: m.Description,
- },
- )
- }
- return metric
-}
-
-func (p *Prometheus) registerMetrics(subsystem string) {
- for _, metricDef := range p.MetricsList {
- metric := NewMetric(metricDef, subsystem)
- if err := prometheus.Register(metric); err != nil {
- fmt.Println("could not be registered in Prometheus,metricDef.Name:", metricDef.Name, " error:", err.Error())
- }
-
- switch metricDef {
- case reqCounter:
- p.reqCnt = metric.(*prometheus.CounterVec)
- case reqDuration:
- p.reqDur = metric.(*prometheus.HistogramVec)
- case resSize:
- p.resSz = metric.(prometheus.Summary)
- case reqSize:
- p.reqSz = metric.(prometheus.Summary)
- }
- metricDef.MetricCollector = metric
- }
-}
-
-// Use adds the middleware to a gin engine.
-func (p *Prometheus) Use(e *gin.Engine) error {
- e.Use(p.HandlerFunc())
- return p.SetMetricsPath(e)
-}
-
-// UseWithAuth adds the middleware to a gin engine with BasicAuth.
-func (p *Prometheus) UseWithAuth(e *gin.Engine, accounts gin.Accounts) error {
- e.Use(p.HandlerFunc())
- return p.SetMetricsPathWithAuth(e, accounts)
-}
-
-// HandlerFunc defines handler function for middleware.
-func (p *Prometheus) HandlerFunc() gin.HandlerFunc {
- return func(c *gin.Context) {
- if c.Request.URL.Path == p.MetricsPath {
- c.Next()
- return
- }
-
- start := time.Now()
- reqSz := computeApproximateRequestSize(c.Request)
-
- c.Next()
-
- status := strconv.Itoa(c.Writer.Status())
- elapsed := float64(time.Since(start)) / float64(time.Second)
- resSz := float64(c.Writer.Size())
-
- url := p.ReqCntURLLabelMappingFn(c)
- if len(p.URLLabelFromContext) > 0 {
- u, found := c.Get(p.URLLabelFromContext)
- if !found {
- u = "unknown"
- }
- url = u.(string)
- }
- p.reqDur.WithLabelValues(status, c.Request.Method, url).Observe(elapsed)
- p.reqCnt.WithLabelValues(status, c.Request.Method, c.HandlerName(), c.Request.Host, url).Inc()
- p.reqSz.Observe(float64(reqSz))
- p.resSz.Observe(resSz)
- }
-}
-
-func prometheusHandler() gin.HandlerFunc {
- h := promhttp.Handler()
- return func(c *gin.Context) {
- h.ServeHTTP(c.Writer, c.Request)
- }
-}
-
-func computeApproximateRequestSize(r *http.Request) int {
- var s int
- if r.URL != nil {
- s = len(r.URL.Path)
- }
-
- s += len(r.Method)
- s += len(r.Proto)
- for name, values := range r.Header {
- s += len(name)
- for _, value := range values {
- s += len(value)
- }
- }
- s += len(r.Host)
-
- // r.FormData and r.MultipartForm are assumed to be included in r.URL.
-
- if r.ContentLength != -1 {
- s += int(r.ContentLength)
- }
- return s
-}
+//
+//import (
+// "bytes"
+// "fmt"
+// "io"
+// "net/http"
+// "os"
+// "strconv"
+// "time"
+//
+// "github.com/gin-gonic/gin"
+// "github.com/prometheus/client_golang/prometheus"
+// "github.com/prometheus/client_golang/prometheus/promhttp"
+//)
+//
+//var defaultMetricPath = "/metrics"
+//
+//// counter, counter_vec, gauge, gauge_vec,
+//// histogram, histogram_vec, summary, summary_vec.
+//var (
+// reqCounter = &Metric{
+// ID: "reqCnt",
+// Name: "requests_total",
+// Description: "How many HTTP requests processed, partitioned by status code and HTTP method.",
+// Type: "counter_vec",
+// Args: []string{"code", "method", "handler", "host", "url"}}
+//
+// reqDuration = &Metric{
+// ID: "reqDur",
+// Name: "request_duration_seconds",
+// Description: "The HTTP request latencies in seconds.",
+// Type: "histogram_vec",
+// Args: []string{"code", "method", "url"},
+// }
+//
+// resSize = &Metric{
+// ID: "resSz",
+// Name: "response_size_bytes",
+// Description: "The HTTP response sizes in bytes.",
+// Type: "summary"}
+//
+// reqSize = &Metric{
+// ID: "reqSz",
+// Name: "request_size_bytes",
+// Description: "The HTTP request sizes in bytes.",
+// Type: "summary"}
+//
+// standardMetrics = []*Metric{
+// reqCounter,
+// reqDuration,
+// resSize,
+// reqSize,
+// }
+//)
+//
+///*
+//RequestCounterURLLabelMappingFn is a function which can be supplied to the middleware to control
+//the cardinality of the request counter's "url" label, which might be required in some contexts.
+//For instance, if for a "/customer/:name" route you don't want to generate a time series for every
+//possible customer name, you could use this function:
+//
+// func(c *gin.Context) string {
+// url := c.Request.URL.Path
+// for _, p := range c.Params {
+// if p.Key == "name" {
+// url = strings.Replace(url, p.Value, ":name", 1)
+// break
+// }
+// }
+// return url
+// }
+//
+//which would map "/customer/alice" and "/customer/bob" to their template "/customer/:name".
+//*/
+//type RequestCounterURLLabelMappingFn func(c *gin.Context) string
+//
+//// Metric is a definition for the name, description, type, ID, and
+//// prometheus.Collector type (i.e. CounterVec, Summary, etc) of each metric.
+//type Metric struct {
+// MetricCollector prometheus.Collector
+// ID string
+// Name string
+// Description string
+// Type string
+// Args []string
+//}
+//
+//// Prometheus contains the metrics gathered by the instance and its path.
+//type Prometheus struct {
+// reqCnt *prometheus.CounterVec
+// reqDur *prometheus.HistogramVec
+// reqSz, resSz prometheus.Summary
+// router *gin.Engine
+// listenAddress string
+// Ppg PrometheusPushGateway
+//
+// MetricsList []*Metric
+// MetricsPath string
+//
+// ReqCntURLLabelMappingFn RequestCounterURLLabelMappingFn
+//
+// // gin.Context string to use as a prometheus URL label
+// URLLabelFromContext string
+//}
+//
+//// PrometheusPushGateway contains the configuration for pushing to a Prometheus pushgateway (optional).
+//type PrometheusPushGateway struct {
+//
+// // Push interval in seconds
+// PushIntervalSeconds time.Duration
+//
+// // Push Gateway URL in format http://domain:port
+// // where JOBNAME can be any string of your choice
+// PushGatewayURL string
+//
+// // Local metrics URL where metrics are fetched from, this could be omitted in the future
+// // if implemented using prometheus common/expfmt instead
+// MetricsURL string
+//
+// // pushgateway job name, defaults to "gin"
+// Job string
+//}
+//
+//// NewPrometheus generates a new set of metrics with a certain subsystem name.
+//func NewPrometheus(subsystem string, customMetricsList ...[]*Metric) *Prometheus {
+// if subsystem == "" {
+// subsystem = "app"
+// }
+//
+// var metricsList []*Metric
+//
+// if len(customMetricsList) > 1 {
+// panic("Too many args. NewPrometheus( string, ).")
+// } else if len(customMetricsList) == 1 {
+// metricsList = customMetricsList[0]
+// }
+// metricsList = append(metricsList, standardMetrics...)
+//
+// p := &Prometheus{
+// MetricsList: metricsList,
+// MetricsPath: defaultMetricPath,
+// ReqCntURLLabelMappingFn: func(c *gin.Context) string {
+// return c.FullPath() // e.g. /user/:id , /user/:id/info
+// },
+// }
+//
+// p.registerMetrics(subsystem)
+//
+// return p
+//}
+//
+//// SetPushGateway sends metrics to a remote pushgateway exposed on pushGatewayURL
+//// every pushIntervalSeconds. Metrics are fetched from metricsURL.
+//func (p *Prometheus) SetPushGateway(pushGatewayURL, metricsURL string, pushIntervalSeconds time.Duration) {
+// p.Ppg.PushGatewayURL = pushGatewayURL
+// p.Ppg.MetricsURL = metricsURL
+// p.Ppg.PushIntervalSeconds = pushIntervalSeconds
+// p.startPushTicker()
+//}
+//
+//// SetPushGatewayJob job name, defaults to "gin".
+//func (p *Prometheus) SetPushGatewayJob(j string) {
+// p.Ppg.Job = j
+//}
+//
+//// SetListenAddress for exposing metrics on address. If not set, it will be exposed at the
+//// same address of the gin engine that is being used.
+//func (p *Prometheus) SetListenAddress(address string) {
+// p.listenAddress = address
+// if p.listenAddress != "" {
+// p.router = gin.Default()
+// }
+//}
+//
+//// SetListenAddressWithRouter for using a separate router to expose metrics. (this keeps things like GET /metrics out of
+//// your content's access log).
+//func (p *Prometheus) SetListenAddressWithRouter(listenAddress string, r *gin.Engine) {
+// p.listenAddress = listenAddress
+// if len(p.listenAddress) > 0 {
+// p.router = r
+// }
+//}
+//
+//// SetMetricsPath set metrics paths.
+//func (p *Prometheus) SetMetricsPath(e *gin.Engine) error {
+//
+// if p.listenAddress != "" {
+// p.router.GET(p.MetricsPath, prometheusHandler())
+// return p.runServer()
+// } else {
+// e.GET(p.MetricsPath, prometheusHandler())
+// return nil
+// }
+//}
+//
+//// SetMetricsPathWithAuth set metrics paths with authentication.
+//func (p *Prometheus) SetMetricsPathWithAuth(e *gin.Engine, accounts gin.Accounts) error {
+//
+// if p.listenAddress != "" {
+// p.router.GET(p.MetricsPath, gin.BasicAuth(accounts), prometheusHandler())
+// return p.runServer()
+// } else {
+// e.GET(p.MetricsPath, gin.BasicAuth(accounts), prometheusHandler())
+// return nil
+// }
+//
+//}
+//
+//func (p *Prometheus) runServer() error {
+// return p.router.Run(p.listenAddress)
+//}
+//
+//func (p *Prometheus) getMetrics() []byte {
+// response, err := http.Get(p.Ppg.MetricsURL)
+// if err != nil {
+// return nil
+// }
+//
+// defer response.Body.Close()
+//
+// body, _ := io.ReadAll(response.Body)
+// return body
+//}
+//
+//var hostname, _ = os.Hostname()
+//
+//func (p *Prometheus) getPushGatewayURL() string {
+// if p.Ppg.Job == "" {
+// p.Ppg.Job = "gin"
+// }
+// return p.Ppg.PushGatewayURL + "/metrics/job/" + p.Ppg.Job + "/instance/" + hostname
+//}
+//
+//func (p *Prometheus) sendMetricsToPushGateway(metrics []byte) {
+// req, err := http.NewRequest("POST", p.getPushGatewayURL(), bytes.NewBuffer(metrics))
+// if err != nil {
+// return
+// }
+//
+// client := &http.Client{}
+// resp, err := client.Do(req)
+// if err != nil {
+// fmt.Println("Error sending to push gateway error:", err.Error())
+// }
+//
+// resp.Body.Close()
+//}
+//
+//func (p *Prometheus) startPushTicker() {
+// ticker := time.NewTicker(time.Second * p.Ppg.PushIntervalSeconds)
+// go func() {
+// for range ticker.C {
+// p.sendMetricsToPushGateway(p.getMetrics())
+// }
+// }()
+//}
+//
+//// NewMetric associates prometheus.Collector based on Metric.Type.
+//func NewMetric(m *Metric, subsystem string) prometheus.Collector {
+// var metric prometheus.Collector
+// switch m.Type {
+// case "counter_vec":
+// metric = prometheus.NewCounterVec(
+// prometheus.CounterOpts{
+// Subsystem: subsystem,
+// Name: m.Name,
+// Help: m.Description,
+// },
+// m.Args,
+// )
+// case "counter":
+// metric = prometheus.NewCounter(
+// prometheus.CounterOpts{
+// Subsystem: subsystem,
+// Name: m.Name,
+// Help: m.Description,
+// },
+// )
+// case "gauge_vec":
+// metric = prometheus.NewGaugeVec(
+// prometheus.GaugeOpts{
+// Subsystem: subsystem,
+// Name: m.Name,
+// Help: m.Description,
+// },
+// m.Args,
+// )
+// case "gauge":
+// metric = prometheus.NewGauge(
+// prometheus.GaugeOpts{
+// Subsystem: subsystem,
+// Name: m.Name,
+// Help: m.Description,
+// },
+// )
+// case "histogram_vec":
+// metric = prometheus.NewHistogramVec(
+// prometheus.HistogramOpts{
+// Subsystem: subsystem,
+// Name: m.Name,
+// Help: m.Description,
+// },
+// m.Args,
+// )
+// case "histogram":
+// metric = prometheus.NewHistogram(
+// prometheus.HistogramOpts{
+// Subsystem: subsystem,
+// Name: m.Name,
+// Help: m.Description,
+// },
+// )
+// case "summary_vec":
+// metric = prometheus.NewSummaryVec(
+// prometheus.SummaryOpts{
+// Subsystem: subsystem,
+// Name: m.Name,
+// Help: m.Description,
+// },
+// m.Args,
+// )
+// case "summary":
+// metric = prometheus.NewSummary(
+// prometheus.SummaryOpts{
+// Subsystem: subsystem,
+// Name: m.Name,
+// Help: m.Description,
+// },
+// )
+// }
+// return metric
+//}
+//
+//func (p *Prometheus) registerMetrics(subsystem string) {
+// for _, metricDef := range p.MetricsList {
+// metric := NewMetric(metricDef, subsystem)
+// if err := prometheus.Register(metric); err != nil {
+// fmt.Println("could not be registered in Prometheus,metricDef.Name:", metricDef.Name, " error:", err.Error())
+// }
+//
+// switch metricDef {
+// case reqCounter:
+// p.reqCnt = metric.(*prometheus.CounterVec)
+// case reqDuration:
+// p.reqDur = metric.(*prometheus.HistogramVec)
+// case resSize:
+// p.resSz = metric.(prometheus.Summary)
+// case reqSize:
+// p.reqSz = metric.(prometheus.Summary)
+// }
+// metricDef.MetricCollector = metric
+// }
+//}
+//
+//// Use adds the middleware to a gin engine.
+//func (p *Prometheus) Use(e *gin.Engine) error {
+// e.Use(p.HandlerFunc())
+// return p.SetMetricsPath(e)
+//}
+//
+//// UseWithAuth adds the middleware to a gin engine with BasicAuth.
+//func (p *Prometheus) UseWithAuth(e *gin.Engine, accounts gin.Accounts) error {
+// e.Use(p.HandlerFunc())
+// return p.SetMetricsPathWithAuth(e, accounts)
+//}
+//
+//// HandlerFunc defines handler function for middleware.
+//func (p *Prometheus) HandlerFunc() gin.HandlerFunc {
+// return func(c *gin.Context) {
+// if c.Request.URL.Path == p.MetricsPath {
+// c.Next()
+// return
+// }
+//
+// start := time.Now()
+// reqSz := computeApproximateRequestSize(c.Request)
+//
+// c.Next()
+//
+// status := strconv.Itoa(c.Writer.Status())
+// elapsed := float64(time.Since(start)) / float64(time.Second)
+// resSz := float64(c.Writer.Size())
+//
+// url := p.ReqCntURLLabelMappingFn(c)
+// if len(p.URLLabelFromContext) > 0 {
+// u, found := c.Get(p.URLLabelFromContext)
+// if !found {
+// u = "unknown"
+// }
+// url = u.(string)
+// }
+// p.reqDur.WithLabelValues(status, c.Request.Method, url).Observe(elapsed)
+// p.reqCnt.WithLabelValues(status, c.Request.Method, c.HandlerName(), c.Request.Host, url).Inc()
+// p.reqSz.Observe(float64(reqSz))
+// p.resSz.Observe(resSz)
+// }
+//}
+//
+//func prometheusHandler() gin.HandlerFunc {
+// h := promhttp.Handler()
+// return func(c *gin.Context) {
+// h.ServeHTTP(c.Writer, c.Request)
+// }
+//}
+//
+//func computeApproximateRequestSize(r *http.Request) int {
+// var s int
+// if r.URL != nil {
+// s = len(r.URL.Path)
+// }
+//
+// s += len(r.Method)
+// s += len(r.Proto)
+// for name, values := range r.Header {
+// s += len(name)
+// for _, value := range values {
+// s += len(value)
+// }
+// }
+// s += len(r.Host)
+//
+// // r.FormData and r.MultipartForm are assumed to be included in r.URL.
+//
+// if r.ContentLength != -1 {
+// s += int(r.ContentLength)
+// }
+// return s
+//}
diff --git a/pkg/common/prommetrics/api.go b/pkg/common/prommetrics/api.go
new file mode 100644
index 000000000..95b5c06b6
--- /dev/null
+++ b/pkg/common/prommetrics/api.go
@@ -0,0 +1,48 @@
+package prommetrics
+
+import (
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promhttp"
+ "strconv"
+)
+
+var (
+ apiCounter = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "api_count",
+ Help: "Total number of API calls",
+ },
+ []string{"path", "method", "code"},
+ )
+ httpCounter = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "http_count",
+ Help: "Total number of HTTP calls",
+ },
+ []string{"path", "method", "status"},
+ )
+)
+
+func ApiInit(prometheusPort int) error {
+ apiRegistry := prometheus.NewRegistry()
+ cs := append(
+ baseCollector,
+ apiCounter,
+ httpCounter,
+ )
+ return Init(apiRegistry, prometheusPort, commonPath, promhttp.HandlerFor(apiRegistry, promhttp.HandlerOpts{}), cs...)
+}
+
+func APICall(path string, method string, apiCode int) {
+ apiCounter.With(prometheus.Labels{"path": path, "method": method, "code": strconv.Itoa(apiCode)}).Inc()
+}
+
+func HttpCall(path string, method string, status int) {
+ httpCounter.With(prometheus.Labels{"path": path, "method": method, "status": strconv.Itoa(status)}).Inc()
+}
+
+//func ApiHandler() http.Handler {
+// return promhttp.InstrumentMetricHandler(
+// apiRegistry, promhttp.HandlerFor(apiRegistry, promhttp.HandlerOpts{}),
+// )
+//}
diff --git a/pkg/common/prommetrics/gin_api.go b/pkg/common/prommetrics/gin_api.go
deleted file mode 100644
index 9f2e4c99d..000000000
--- a/pkg/common/prommetrics/gin_api.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright © 2023 OpenIM. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prommetrics
-
-import ginprom "github.com/openimsdk/open-im-server/v3/pkg/common/ginprometheus"
-
-/*
-labels := prometheus.Labels{"label_one": "any", "label_two": "value"}
-ApiCustomCnt.MetricCollector.(*prometheus.CounterVec).With(labels).Inc().
-*/
-var (
- ApiCustomCnt = &ginprom.Metric{
- Name: "custom_total",
- Description: "Custom counter events.",
- Type: "counter_vec",
- Args: []string{"label_one", "label_two"},
- }
-)
diff --git a/pkg/common/prommetrics/grpc_user.go b/pkg/common/prommetrics/grpc_user.go
new file mode 100644
index 000000000..cc2fc42e6
--- /dev/null
+++ b/pkg/common/prommetrics/grpc_user.go
@@ -0,0 +1,10 @@
+package prommetrics
+
+import "github.com/prometheus/client_golang/prometheus"
+
+var (
+ UserRegisterCounter = prometheus.NewCounter(prometheus.CounterOpts{
+ Name: "user_register_total",
+ Help: "The number of user login",
+ })
+)
diff --git a/pkg/common/prommetrics/prommetrics.go b/pkg/common/prommetrics/prommetrics.go
index 47e5d02b8..02e408d63 100644
--- a/pkg/common/prommetrics/prommetrics.go
+++ b/pkg/common/prommetrics/prommetrics.go
@@ -15,44 +15,24 @@
package prommetrics
import (
- gp "github.com/grpc-ecosystem/go-grpc-prometheus"
- config2 "github.com/openimsdk/open-im-server/v3/pkg/common/config"
- "github.com/openimsdk/open-im-server/v3/pkg/common/ginprometheus"
+ "fmt"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/collectors"
+ "net/http"
)
-func NewGrpcPromObj(cusMetrics []prometheus.Collector) (*prometheus.Registry, *gp.ServerMetrics, error) {
- reg := prometheus.NewRegistry()
- grpcMetrics := gp.NewServerMetrics()
- grpcMetrics.EnableHandlingTimeHistogram()
- cusMetrics = append(cusMetrics, grpcMetrics, collectors.NewGoCollector())
- reg.MustRegister(cusMetrics...)
- return reg, grpcMetrics, nil
-}
+const commonPath = "/metrics"
-func GetGrpcCusMetrics(registerName string, share *config2.Share) []prometheus.Collector {
- switch registerName {
- case share.RpcRegisterName.MessageGateway:
- return []prometheus.Collector{OnlineUserGauge}
- case share.RpcRegisterName.Msg:
- return []prometheus.Collector{SingleChatMsgProcessSuccessCounter, SingleChatMsgProcessFailedCounter, GroupChatMsgProcessSuccessCounter, GroupChatMsgProcessFailedCounter}
- case "Transfer":
- return []prometheus.Collector{MsgInsertRedisSuccessCounter, MsgInsertRedisFailedCounter, MsgInsertMongoSuccessCounter, MsgInsertMongoFailedCounter, SeqSetFailedCounter}
- case share.RpcRegisterName.Push:
- return []prometheus.Collector{MsgOfflinePushFailedCounter}
- case share.RpcRegisterName.Auth:
- return []prometheus.Collector{UserLoginCounter}
- default:
- return nil
+var (
+ baseCollector = []prometheus.Collector{
+ collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}),
+ collectors.NewGoCollector(),
}
-}
+)
-func GetGinCusMetrics(name string) []*ginprometheus.Metric {
- switch name {
- case "Api":
- return []*ginprometheus.Metric{ApiCustomCnt}
- default:
- return []*ginprometheus.Metric{ApiCustomCnt}
- }
+func Init(registry *prometheus.Registry, prometheusPort int, path string, handler http.Handler, cs ...prometheus.Collector) error {
+ registry.MustRegister(cs...)
+ srv := http.NewServeMux()
+ srv.Handle(path, handler)
+ return http.ListenAndServe(fmt.Sprintf(":%d", prometheusPort), srv)
}
diff --git a/pkg/common/prommetrics/prommetrics_test.go b/pkg/common/prommetrics/prommetrics_test.go
index 65b05652f..14b1aaff3 100644
--- a/pkg/common/prommetrics/prommetrics_test.go
+++ b/pkg/common/prommetrics/prommetrics_test.go
@@ -14,46 +14,39 @@
package prommetrics
-import (
- "testing"
-
- "github.com/prometheus/client_golang/prometheus"
- "github.com/stretchr/testify/assert"
-)
-
-func TestNewGrpcPromObj(t *testing.T) {
- // Create a custom metric to pass into the NewGrpcPromObj function.
- customMetric := prometheus.NewCounter(prometheus.CounterOpts{
- Name: "test_metric",
- Help: "This is a test metric.",
- })
- cusMetrics := []prometheus.Collector{customMetric}
-
- // Call NewGrpcPromObj with the custom metrics.
- reg, grpcMetrics, err := NewGrpcPromObj(cusMetrics)
-
- // Assert no error was returned.
- assert.NoError(t, err)
-
- // Assert the registry was correctly initialized.
- assert.NotNil(t, reg)
-
- // Assert the grpcMetrics was correctly initialized.
- assert.NotNil(t, grpcMetrics)
-
- // Assert that the custom metric is registered.
- mfs, err := reg.Gather()
- assert.NoError(t, err)
- assert.NotEmpty(t, mfs) // Ensure some metrics are present.
- found := false
- for _, mf := range mfs {
- if *mf.Name == "test_metric" {
- found = true
- break
- }
- }
- assert.True(t, found, "Custom metric not found in registry")
-}
+//func TestNewGrpcPromObj(t *testing.T) {
+// // Create a custom metric to pass into the NewGrpcPromObj function.
+// customMetric := prometheus.NewCounter(prometheus.CounterOpts{
+// Name: "test_metric",
+// Help: "This is a test metric.",
+// })
+// cusMetrics := []prometheus.Collector{customMetric}
+//
+// // Call NewGrpcPromObj with the custom metrics.
+// reg, grpcMetrics, err := NewGrpcPromObj(cusMetrics)
+//
+// // Assert no error was returned.
+// assert.NoError(t, err)
+//
+// // Assert the registry was correctly initialized.
+// assert.NotNil(t, reg)
+//
+// // Assert the grpcMetrics was correctly initialized.
+// assert.NotNil(t, grpcMetrics)
+//
+// // Assert that the custom metric is registered.
+// mfs, err := reg.Gather()
+// assert.NoError(t, err)
+// assert.NotEmpty(t, mfs) // Ensure some metrics are present.
+// found := false
+// for _, mf := range mfs {
+// if *mf.Name == "test_metric" {
+// found = true
+// break
+// }
+// }
+// assert.True(t, found, "Custom metric not found in registry")
+//}
//func TestGetGrpcCusMetrics(t *testing.T) {
// conf := config2.NewGlobalConfig()
diff --git a/pkg/common/prommetrics/rpc.go b/pkg/common/prommetrics/rpc.go
new file mode 100644
index 000000000..dc16322da
--- /dev/null
+++ b/pkg/common/prommetrics/rpc.go
@@ -0,0 +1,60 @@
+package prommetrics
+
+import (
+ gp "github.com/grpc-ecosystem/go-grpc-prometheus"
+ "github.com/openimsdk/open-im-server/v3/pkg/common/config"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promhttp"
+ "strconv"
+)
+
+const rpcPath = commonPath
+
+var (
+ grpcMetrics *gp.ServerMetrics
+ rpcCounter = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "rpc_count",
+ Help: "Total number of RPC calls",
+ },
+ []string{"name", "path", "code"},
+ )
+)
+
+func RpcInit(cs []prometheus.Collector, prometheusPort int) error {
+ reg := prometheus.NewRegistry()
+ cs = append(append(
+ baseCollector,
+ rpcCounter,
+ ), cs...)
+ return Init(reg, prometheusPort, rpcPath, promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg}), cs...)
+}
+
+func RPCCall(name string, path string, code int) {
+ rpcCounter.With(prometheus.Labels{"name": name, "path": path, "code": strconv.Itoa(code)}).Inc()
+}
+
+func GetGrpcServerMetrics() *gp.ServerMetrics {
+ if grpcMetrics == nil {
+ grpcMetrics = gp.NewServerMetrics()
+ grpcMetrics.EnableHandlingTimeHistogram()
+ }
+ return grpcMetrics
+}
+
+func GetGrpcCusMetrics(registerName string, share *config.Share) []prometheus.Collector {
+ switch registerName {
+ case share.RpcRegisterName.MessageGateway:
+ return []prometheus.Collector{OnlineUserGauge}
+ case share.RpcRegisterName.Msg:
+ return []prometheus.Collector{SingleChatMsgProcessSuccessCounter, SingleChatMsgProcessFailedCounter, GroupChatMsgProcessSuccessCounter, GroupChatMsgProcessFailedCounter}
+ case share.RpcRegisterName.Push:
+ return []prometheus.Collector{MsgOfflinePushFailedCounter}
+ case share.RpcRegisterName.Auth:
+ return []prometheus.Collector{UserLoginCounter}
+ case share.RpcRegisterName.User:
+ return []prometheus.Collector{UserRegisterCounter}
+ default:
+ return nil
+ }
+}
diff --git a/pkg/common/prommetrics/transfer.go b/pkg/common/prommetrics/transfer.go
index 197b6f7fc..f0abb8285 100644
--- a/pkg/common/prommetrics/transfer.go
+++ b/pkg/common/prommetrics/transfer.go
@@ -16,6 +16,7 @@ package prommetrics
import (
"github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promhttp"
)
var (
@@ -40,3 +41,16 @@ var (
Help: "The number of failed set seq",
})
)
+
+func TransferInit(prometheusPort int) error {
+ reg := prometheus.NewRegistry()
+ cs := append(
+ baseCollector,
+ MsgInsertRedisSuccessCounter,
+ MsgInsertRedisFailedCounter,
+ MsgInsertMongoSuccessCounter,
+ MsgInsertMongoFailedCounter,
+ SeqSetFailedCounter,
+ )
+ return Init(reg, prometheusPort, commonPath, promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg}), cs...)
+}
diff --git a/pkg/common/startrpc/start.go b/pkg/common/startrpc/start.go
index 069c92012..4091a5f6e 100644
--- a/pkg/common/startrpc/start.go
+++ b/pkg/common/startrpc/start.go
@@ -17,9 +17,9 @@ package startrpc
import (
"context"
"fmt"
- config2 "github.com/openimsdk/open-im-server/v3/pkg/common/config"
+ "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/tools/utils/datautil"
- "github.com/prometheus/client_golang/prometheus"
+ "google.golang.org/grpc/status"
"net"
"net/http"
"os"
@@ -29,7 +29,6 @@ import (
"syscall"
"time"
- grpcprometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
kdisc "github.com/openimsdk/open-im-server/v3/pkg/common/discoveryregister"
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
"github.com/openimsdk/tools/discovery"
@@ -38,14 +37,13 @@ import (
"github.com/openimsdk/tools/mw"
"github.com/openimsdk/tools/system/program"
"github.com/openimsdk/tools/utils/network"
- "github.com/prometheus/client_golang/prometheus/promhttp"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
)
// Start rpc server.
-func Start[T any](ctx context.Context, discovery *config2.Discovery, prometheusConfig *config2.Prometheus, listenIP,
- registerIP string, rpcPorts []int, index int, rpcRegisterName string, share *config2.Share, config T, rpcFn func(ctx context.Context,
+func Start[T any](ctx context.Context, discovery *config.Discovery, prometheusConfig *config.Prometheus, listenIP,
+ registerIP string, rpcPorts []int, index int, rpcRegisterName string, share *config.Share, config T, rpcFn func(ctx context.Context,
config T, client discovery.SvcDiscoveryRegistry, server *grpc.Server) error, options ...grpc.ServerOption) error {
rpcPort, err := datautil.GetElemByIndex(rpcPorts, index)
@@ -77,13 +75,18 @@ func Start[T any](ctx context.Context, discovery *config2.Discovery, prometheusC
return err
}
- var reg *prometheus.Registry
- var metric *grpcprometheus.ServerMetrics
+ //var reg *prometheus.Registry
+ //var metric *grpcprometheus.ServerMetrics
if prometheusConfig.Enable {
- cusMetrics := prommetrics.GetGrpcCusMetrics(rpcRegisterName, share)
- reg, metric, _ = prommetrics.NewGrpcPromObj(cusMetrics)
- options = append(options, mw.GrpcServer(), grpc.StreamInterceptor(metric.StreamServerInterceptor()),
- grpc.UnaryInterceptor(metric.UnaryServerInterceptor()))
+ //cusMetrics := prommetrics.GetGrpcCusMetrics(rpcRegisterName, share)
+ //reg, metric, _ = prommetrics.NewGrpcPromObj(cusMetrics)
+ //options = append(options, mw.GrpcServer(), grpc.StreamInterceptor(metric.StreamServerInterceptor()),
+ // grpc.UnaryInterceptor(metric.UnaryServerInterceptor()))
+ options = append(
+ options, mw.GrpcServer(),
+ prommetricsUnaryInterceptor(rpcRegisterName),
+ prommetricsStreamInterceptor(rpcRegisterName),
+ )
} else {
options = append(options, mw.GrpcServer())
}
@@ -122,13 +125,18 @@ func Start[T any](ctx context.Context, discovery *config2.Discovery, prometheusC
netDone <- struct{}{}
return
}
- metric.InitializeMetrics(srv)
- // Create a HTTP server for prometheus.
- httpServer = &http.Server{Handler: promhttp.HandlerFor(reg, promhttp.HandlerOpts{}), Addr: fmt.Sprintf("0.0.0.0:%d", prometheusPort)}
- if err := httpServer.ListenAndServe(); err != nil && err != http.ErrServerClosed {
- netErr = errs.WrapMsg(err, "prometheus start err", httpServer.Addr)
+ cs := prommetrics.GetGrpcCusMetrics(rpcRegisterName, share)
+ if err := prommetrics.RpcInit(cs, prometheusPort); err != nil && err != http.ErrServerClosed {
+ netErr = errs.WrapMsg(err, fmt.Sprintf("rpc %s prometheus start err: %d", rpcRegisterName, prometheusPort))
netDone <- struct{}{}
}
+ //metric.InitializeMetrics(srv)
+ // Create a HTTP server for prometheus.
+ //httpServer = &http.Server{Handler: promhttp.HandlerFor(reg, promhttp.HandlerOpts{}), Addr: fmt.Sprintf("0.0.0.0:%d", prometheusPort)}
+ //if err := httpServer.ListenAndServe(); err != nil && err != http.ErrServerClosed {
+ // netErr = errs.WrapMsg(err, "prometheus start err", httpServer.Addr)
+ // netDone <- struct{}{}
+ //}
}()
}
@@ -158,7 +166,6 @@ func Start[T any](ctx context.Context, discovery *config2.Discovery, prometheusC
}
return nil
case <-netDone:
- close(netDone)
return netErr
}
}
@@ -176,3 +183,25 @@ func gracefulStopWithCtx(ctx context.Context, f func()) error {
return nil
}
}
+
+func prommetricsUnaryInterceptor(rpcRegisterName string) grpc.ServerOption {
+ getCode := func(err error) int {
+ if err == nil {
+ return 0
+ }
+ rpcErr, ok := err.(interface{ GRPCStatus() *status.Status })
+ if !ok {
+ return -1
+ }
+ return int(rpcErr.GRPCStatus().Code())
+ }
+ return grpc.ChainUnaryInterceptor(func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) {
+ resp, err := handler(ctx, req)
+ prommetrics.RPCCall(rpcRegisterName, info.FullMethod, getCode(err))
+ return resp, err
+ })
+}
+
+func prommetricsStreamInterceptor(rpcRegisterName string) grpc.ServerOption {
+ return grpc.ChainStreamInterceptor()
+}
diff --git a/pkg/common/storage/cache/cachekey/conversation.go b/pkg/common/storage/cache/cachekey/conversation.go
index aea4ceec6..d19fcc576 100644
--- a/pkg/common/storage/cache/cachekey/conversation.go
+++ b/pkg/common/storage/cache/cachekey/conversation.go
@@ -23,6 +23,7 @@ const (
SuperGroupRecvMsgNotNotifyUserIDsKey = "SUPER_GROUP_RECV_MSG_NOT_NOTIFY_USER_IDS:"
SuperGroupRecvMsgNotNotifyUserIDsHashKey = "SUPER_GROUP_RECV_MSG_NOT_NOTIFY_USER_IDS_HASH:"
ConversationNotReceiveMessageUserIDsKey = "CONVERSATION_NOT_RECEIVE_MESSAGE_USER_IDS:"
+ ConversationUserMaxKey = "CONVERSATION_USER_MAX:"
)
func GetConversationKey(ownerUserID, conversationID string) string {
@@ -56,3 +57,7 @@ func GetConversationNotReceiveMessageUserIDsKey(conversationID string) string {
func GetUserConversationIDsHashKey(ownerUserID string) string {
return ConversationIDsHashKey + ownerUserID
}
+
+func GetConversationUserMaxVersionKey(userID string) string {
+ return ConversationUserMaxKey + userID
+}
diff --git a/pkg/common/storage/cache/cachekey/friend.go b/pkg/common/storage/cache/cachekey/friend.go
index 9691b1f5c..8a053ca32 100644
--- a/pkg/common/storage/cache/cachekey/friend.go
+++ b/pkg/common/storage/cache/cachekey/friend.go
@@ -19,6 +19,8 @@ const (
TwoWayFriendsIDsKey = "COMMON_FRIENDS_IDS:"
FriendKey = "FRIEND_INFO:"
IsFriendKey = "IS_FRIEND:" // local cache key
+ //FriendSyncSortUserIDsKey = "FRIEND_SYNC_SORT_USER_IDS:"
+ FriendMaxVersionKey = "FRIEND_MAX_VERSION:"
)
func GetFriendIDsKey(ownerUserID string) string {
@@ -33,6 +35,14 @@ func GetFriendKey(ownerUserID, friendUserID string) string {
return FriendKey + ownerUserID + "-" + friendUserID
}
+func GetFriendMaxVersionKey(ownerUserID string) string {
+ return FriendMaxVersionKey + ownerUserID
+}
+
func GetIsFriendKey(possibleFriendUserID, userID string) string {
return IsFriendKey + possibleFriendUserID + "-" + userID
}
+
+//func GetFriendSyncSortUserIDsKey(ownerUserID string, count int) string {
+// return FriendSyncSortUserIDsKey + strconv.Itoa(count) + ":" + ownerUserID
+//}
diff --git a/pkg/common/storage/cache/cachekey/group.go b/pkg/common/storage/cache/cachekey/group.go
index 681121ecb..2ef42c0ff 100644
--- a/pkg/common/storage/cache/cachekey/group.go
+++ b/pkg/common/storage/cache/cachekey/group.go
@@ -28,6 +28,8 @@ const (
JoinedGroupsKey = "JOIN_GROUPS_KEY:"
GroupMemberNumKey = "GROUP_MEMBER_NUM_CACHE:"
GroupRoleLevelMemberIDsKey = "GROUP_ROLE_LEVEL_MEMBER_IDS:"
+ GroupMemberMaxVersionKey = "GROUP_MEMBER_MAX_VERSION:"
+ GroupJoinMaxVersionKey = "GROUP_JOIN_MAX_VERSION:"
)
func GetGroupInfoKey(groupID string) string {
@@ -57,3 +59,11 @@ func GetGroupMemberNumKey(groupID string) string {
func GetGroupRoleLevelMemberIDsKey(groupID string, roleLevel int32) string {
return GroupRoleLevelMemberIDsKey + groupID + "-" + strconv.Itoa(int(roleLevel))
}
+
+func GetGroupMemberMaxVersionKey(groupID string) string {
+ return GroupMemberMaxVersionKey + groupID
+}
+
+func GetJoinGroupMaxVersionKey(userID string) string {
+ return GroupJoinMaxVersionKey + userID
+}
diff --git a/pkg/common/storage/cache/cachekey/online.go b/pkg/common/storage/cache/cachekey/online.go
new file mode 100644
index 000000000..164e5f2f4
--- /dev/null
+++ b/pkg/common/storage/cache/cachekey/online.go
@@ -0,0 +1,13 @@
+package cachekey
+
+import "time"
+
+const (
+ OnlineKey = "ONLINE:"
+ OnlineChannel = "online_change"
+ OnlineExpire = time.Hour / 2
+)
+
+func GetOnlineKey(userID string) string {
+ return OnlineKey + userID
+}
diff --git a/pkg/common/storage/cache/cachekey/seq.go b/pkg/common/storage/cache/cachekey/seq.go
index 3f0ce98a4..b32e78300 100644
--- a/pkg/common/storage/cache/cachekey/seq.go
+++ b/pkg/common/storage/cache/cachekey/seq.go
@@ -1,38 +1,30 @@
-// Copyright © 2024 OpenIM. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
package cachekey
const (
- maxSeq = "MAX_SEQ:"
- minSeq = "MIN_SEQ:"
- conversationUserMinSeq = "CON_USER_MIN_SEQ:"
- hasReadSeq = "HAS_READ_SEQ:"
+ MallocSeq = "MALLOC_SEQ:"
+ MallocMinSeqLock = "MALLOC_MIN_SEQ:"
+
+ SeqUserMaxSeq = "SEQ_USER_MAX:"
+ SeqUserMinSeq = "SEQ_USER_MIN:"
+ SeqUserReadSeq = "SEQ_USER_READ:"
)
-func GetMaxSeqKey(conversationID string) string {
- return maxSeq + conversationID
+func GetMallocSeqKey(conversationID string) string {
+ return MallocSeq + conversationID
+}
+
+func GetMallocMinSeqKey(conversationID string) string {
+ return MallocMinSeqLock + conversationID
}
-func GetMinSeqKey(conversationID string) string {
- return minSeq + conversationID
+func GetSeqUserMaxSeqKey(conversationID string, userID string) string {
+ return SeqUserMaxSeq + conversationID + ":" + userID
}
-func GetHasReadSeqKey(conversationID string, userID string) string {
- return hasReadSeq + userID + ":" + conversationID
+func GetSeqUserMinSeqKey(conversationID string, userID string) string {
+ return SeqUserMinSeq + conversationID + ":" + userID
}
-func GetConversationUserMinSeqKey(conversationID, userID string) string {
- return conversationUserMinSeq + conversationID + "u:" + userID
+func GetSeqUserReadSeqKey(conversationID string, userID string) string {
+ return SeqUserReadSeq + conversationID + ":" + userID
}
diff --git a/pkg/common/storage/cache/cachekey/user.go b/pkg/common/storage/cache/cachekey/user.go
index 7d06d4f75..473ca1b12 100644
--- a/pkg/common/storage/cache/cachekey/user.go
+++ b/pkg/common/storage/cache/cachekey/user.go
@@ -17,7 +17,6 @@ package cachekey
const (
UserInfoKey = "USER_INFO:"
UserGlobalRecvMsgOptKey = "USER_GLOBAL_RECV_MSG_OPT_KEY:"
- olineStatusKey = "ONLINE_STATUS:"
)
func GetUserInfoKey(userID string) string {
@@ -27,7 +26,3 @@ func GetUserInfoKey(userID string) string {
func GetUserGlobalRecvMsgOptKey(userID string) string {
return UserGlobalRecvMsgOptKey + userID
}
-
-func GetOnlineStatusKey(modKey string) string {
- return olineStatusKey + modKey
-}
diff --git a/pkg/common/storage/cache/conversation.go b/pkg/common/storage/cache/conversation.go
index f34fd599f..bc1761483 100644
--- a/pkg/common/storage/cache/conversation.go
+++ b/pkg/common/storage/cache/conversation.go
@@ -54,4 +54,8 @@ type ConversationCache interface {
GetConversationNotReceiveMessageUserIDs(ctx context.Context, conversationID string) ([]string, error)
DelConversationNotReceiveMessageUserIDs(conversationIDs ...string) ConversationCache
+
+ DelConversationVersionUserIDs(userIDs ...string) ConversationCache
+
+ FindMaxConversationUserVersion(ctx context.Context, userID string) (*relationtb.VersionLog, error)
}
diff --git a/pkg/common/storage/cache/friend.go b/pkg/common/storage/cache/friend.go
index acff829f8..b451d3675 100644
--- a/pkg/common/storage/cache/friend.go
+++ b/pkg/common/storage/cache/friend.go
@@ -32,4 +32,16 @@ type FriendCache interface {
DelFriend(ownerUserID, friendUserID string) FriendCache
// Delete friends when friends' info changed
DelFriends(ownerUserID string, friendUserIDs []string) FriendCache
+
+ DelOwner(friendUserID string, ownerUserIDs []string) FriendCache
+
+ DelMaxFriendVersion(ownerUserIDs ...string) FriendCache
+
+ //DelSortFriendUserIDs(ownerUserIDs ...string) FriendCache
+
+ //FindSortFriendUserIDs(ctx context.Context, ownerUserID string) ([]string, error)
+
+ //FindFriendIncrVersion(ctx context.Context, ownerUserID string, version uint, limit int) (*relationtb.VersionLog, error)
+
+ FindMaxFriendVersion(ctx context.Context, ownerUserID string) (*relationtb.VersionLog, error)
}
diff --git a/pkg/common/storage/cache/group.go b/pkg/common/storage/cache/group.go
index 53e2cd1c7..1ec046295 100644
--- a/pkg/common/storage/cache/group.go
+++ b/pkg/common/storage/cache/group.go
@@ -36,7 +36,6 @@ type GroupCache interface {
DelGroupMembersHash(groupID string) GroupCache
GetGroupMemberIDs(ctx context.Context, groupID string) (groupMemberIDs []string, err error)
- GetGroupsMemberIDs(ctx context.Context, groupIDs []string) (groupMemberIDs map[string][]string, err error)
DelGroupMemberIDs(groupID string) GroupCache
@@ -46,7 +45,6 @@ type GroupCache interface {
GetGroupMemberInfo(ctx context.Context, groupID, userID string) (groupMember *model.GroupMember, err error)
GetGroupMembersInfo(ctx context.Context, groupID string, userID []string) (groupMembers []*model.GroupMember, err error)
GetAllGroupMembersInfo(ctx context.Context, groupID string) (groupMembers []*model.GroupMember, err error)
- GetGroupMembersPage(ctx context.Context, groupID string, userID []string, showNumber, pageNumber int32) (total uint32, groupMembers []*model.GroupMember, err error)
FindGroupMemberUser(ctx context.Context, groupIDs []string, userID string) ([]*model.GroupMember, error)
GetGroupRoleLevelMemberIDs(ctx context.Context, groupID string, roleLevel int32) ([]string, error)
@@ -59,4 +57,13 @@ type GroupCache interface {
GetGroupRolesLevelMemberInfo(ctx context.Context, groupID string, roleLevels []int32) ([]*model.GroupMember, error)
GetGroupMemberNum(ctx context.Context, groupID string) (memberNum int64, err error)
DelGroupsMemberNum(groupID ...string) GroupCache
+
+ //FindSortGroupMemberUserIDs(ctx context.Context, groupID string) ([]string, error)
+ //FindSortJoinGroupIDs(ctx context.Context, userID string) ([]string, error)
+
+ DelMaxGroupMemberVersion(groupIDs ...string) GroupCache
+ DelMaxJoinGroupVersion(userIDs ...string) GroupCache
+ FindMaxGroupMemberVersion(ctx context.Context, groupID string) (*model.VersionLog, error)
+ BatchFindMaxGroupMemberVersion(ctx context.Context, groupIDs []string) ([]*model.VersionLog, error)
+ FindMaxJoinGroupVersion(ctx context.Context, userID string) (*model.VersionLog, error)
}
diff --git a/pkg/common/storage/cache/online.go b/pkg/common/storage/cache/online.go
new file mode 100644
index 000000000..7669c8a11
--- /dev/null
+++ b/pkg/common/storage/cache/online.go
@@ -0,0 +1,8 @@
+package cache
+
+import "context"
+
+type OnlineCache interface {
+ GetOnline(ctx context.Context, userID string) ([]int32, error)
+ SetUserOnline(ctx context.Context, userID string, online, offline []int32) error
+}
diff --git a/pkg/common/storage/cache/redis/batch.go b/pkg/common/storage/cache/redis/batch.go
new file mode 100644
index 000000000..4d65c5929
--- /dev/null
+++ b/pkg/common/storage/cache/redis/batch.go
@@ -0,0 +1,96 @@
+package redis
+
+import (
+ "context"
+ "encoding/json"
+ "github.com/dtm-labs/rockscache"
+ "github.com/openimsdk/tools/log"
+ "github.com/redis/go-redis/v9"
+ "golang.org/x/sync/singleflight"
+ "time"
+ "unsafe"
+)
+
+func getRocksCacheRedisClient(cli *rockscache.Client) redis.UniversalClient {
+ type Client struct {
+ rdb redis.UniversalClient
+ _ rockscache.Options
+ _ singleflight.Group
+ }
+ return (*Client)(unsafe.Pointer(cli)).rdb
+}
+
+func batchGetCache2[K comparable, V any](ctx context.Context, rcClient *rockscache.Client, expire time.Duration, ids []K, idKey func(id K) string, vId func(v *V) K, fn func(ctx context.Context, ids []K) ([]*V, error)) ([]*V, error) {
+ if len(ids) == 0 {
+ return nil, nil
+ }
+ findKeys := make([]string, 0, len(ids))
+ keyId := make(map[string]K)
+ for _, id := range ids {
+ key := idKey(id)
+ if _, ok := keyId[key]; ok {
+ continue
+ }
+ keyId[key] = id
+ findKeys = append(findKeys, key)
+ }
+ slotKeys, err := groupKeysBySlot(ctx, getRocksCacheRedisClient(rcClient), findKeys)
+ if err != nil {
+ return nil, err
+ }
+ result := make([]*V, 0, len(findKeys))
+ for _, keys := range slotKeys {
+ indexCache, err := rcClient.FetchBatch2(ctx, keys, expire, func(idx []int) (map[int]string, error) {
+ queryIds := make([]K, 0, len(idx))
+ idIndex := make(map[K]int)
+ for _, index := range idx {
+ id := keyId[keys[index]]
+ idIndex[id] = index
+ queryIds = append(queryIds, id)
+ }
+ values, err := fn(ctx, queryIds)
+ if err != nil {
+ log.ZError(ctx, "batchGetCache query database failed", err, "keys", keys, "queryIds", queryIds)
+ return nil, err
+ }
+ if len(values) == 0 {
+ return map[int]string{}, nil
+ }
+ cacheIndex := make(map[int]string)
+ for _, value := range values {
+ id := vId(value)
+ index, ok := idIndex[id]
+ if !ok {
+ continue
+ }
+ bs, err := json.Marshal(value)
+ if err != nil {
+ return nil, err
+ }
+ cacheIndex[index] = string(bs)
+ }
+ return cacheIndex, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ for index, data := range indexCache {
+ if data == "" {
+ continue
+ }
+ var value V
+ if err := json.Unmarshal([]byte(data), &value); err != nil {
+ return nil, err
+ }
+ if cb, ok := any(&value).(BatchCacheCallback[K]); ok {
+ cb.BatchCache(keyId[keys[index]])
+ }
+ result = append(result, &value)
+ }
+ }
+ return result, nil
+}
+
+type BatchCacheCallback[K comparable] interface {
+ BatchCache(id K)
+}
diff --git a/pkg/common/storage/cache/redis/batch_handler.go b/pkg/common/storage/cache/redis/batch_handler.go
index 95f669904..f9923e198 100644
--- a/pkg/common/storage/cache/redis/batch_handler.go
+++ b/pkg/common/storage/cache/redis/batch_handler.go
@@ -23,7 +23,6 @@ import (
"github.com/openimsdk/open-im-server/v3/pkg/localcache"
"github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log"
- "github.com/openimsdk/tools/mw/specialerror"
"github.com/openimsdk/tools/utils/datautil"
"github.com/redis/go-redis/v9"
"time"
@@ -119,6 +118,7 @@ func getCache[T any](ctx context.Context, rcClient *rockscache.Client, key strin
v, err := rcClient.Fetch2(ctx, key, expire, func() (s string, err error) {
t, err = fn(ctx)
if err != nil {
+ log.ZError(ctx, "getCache query database failed", err, "key", key)
return "", err
}
bs, err := json.Marshal(t)
@@ -147,30 +147,30 @@ func getCache[T any](ctx context.Context, rcClient *rockscache.Client, key strin
return t, nil
}
-func batchGetCache[T any, K comparable](
- ctx context.Context,
- rcClient *rockscache.Client,
- expire time.Duration,
- keys []K,
- keyFn func(key K) string,
- fns func(ctx context.Context, key K) (T, error),
-) ([]T, error) {
- if len(keys) == 0 {
- return nil, nil
- }
- res := make([]T, 0, len(keys))
- for _, key := range keys {
- val, err := getCache(ctx, rcClient, keyFn(key), expire, func(ctx context.Context) (T, error) {
- return fns(ctx, key)
- })
- if err != nil {
- if errs.ErrRecordNotFound.Is(specialerror.ErrCode(errs.Unwrap(err))) {
- continue
- }
- return nil, errs.Wrap(err)
- }
- res = append(res, val)
- }
-
- return res, nil
-}
+//func batchGetCache[T any, K comparable](
+// ctx context.Context,
+// rcClient *rockscache.Client,
+// expire time.Duration,
+// keys []K,
+// keyFn func(key K) string,
+// fns func(ctx context.Context, key K) (T, error),
+//) ([]T, error) {
+// if len(keys) == 0 {
+// return nil, nil
+// }
+// res := make([]T, 0, len(keys))
+// for _, key := range keys {
+// val, err := getCache(ctx, rcClient, keyFn(key), expire, func(ctx context.Context) (T, error) {
+// return fns(ctx, key)
+// })
+// if err != nil {
+// if errs.ErrRecordNotFound.Is(specialerror.ErrCode(errs.Unwrap(err))) {
+// continue
+// }
+// return nil, errs.Wrap(err)
+// }
+// res = append(res, val)
+// }
+//
+// return res, nil
+//}
diff --git a/pkg/common/storage/cache/redis/batch_test.go b/pkg/common/storage/cache/redis/batch_test.go
new file mode 100644
index 000000000..bbb6d76f1
--- /dev/null
+++ b/pkg/common/storage/cache/redis/batch_test.go
@@ -0,0 +1,55 @@
+package redis
+
+import (
+ "context"
+ "github.com/openimsdk/open-im-server/v3/pkg/common/config"
+ "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
+ "github.com/openimsdk/tools/db/mongoutil"
+ "github.com/openimsdk/tools/db/redisutil"
+ "testing"
+)
+
+func TestName(t *testing.T) {
+ //var rocks rockscache.Client
+ //rdb := getRocksCacheRedisClient(&rocks)
+ //t.Log(rdb == nil)
+
+ ctx := context.Background()
+ rdb, err := redisutil.NewRedisClient(ctx, (&config.Redis{
+ Address: []string{"172.16.8.48:16379"},
+ Password: "openIM123",
+ DB: 3,
+ }).Build())
+ if err != nil {
+ panic(err)
+ }
+ mgocli, err := mongoutil.NewMongoDB(ctx, (&config.Mongo{
+ Address: []string{"172.16.8.48:37017"},
+ Database: "openim_v3",
+ Username: "openIM",
+ Password: "openIM123",
+ MaxPoolSize: 100,
+ MaxRetry: 1,
+ }).Build())
+ if err != nil {
+ panic(err)
+ }
+ //userMgo, err := mgo.NewUserMongo(mgocli.GetDB())
+ //if err != nil {
+ // panic(err)
+ //}
+ //rock := rockscache.NewClient(rdb, rockscache.NewDefaultOptions())
+ mgoSeqUser, err := mgo.NewSeqUserMongo(mgocli.GetDB())
+ if err != nil {
+ panic(err)
+ }
+ seqUser := NewSeqUserCacheRedis(rdb, mgoSeqUser)
+
+ res, err := seqUser.GetUserReadSeqs(ctx, "2110910952", []string{"sg_2920732023", "sg_345762580"})
+ if err != nil {
+ panic(err)
+ }
+
+ t.Log(res)
+
+}
diff --git a/pkg/common/storage/cache/redis/conversation.go b/pkg/common/storage/cache/redis/conversation.go
index 8c0393dd5..95e680afb 100644
--- a/pkg/common/storage/cache/redis/conversation.go
+++ b/pkg/common/storage/cache/redis/conversation.go
@@ -95,6 +95,10 @@ func (c *ConversationRedisCache) getUserConversationIDsHashKey(ownerUserID strin
return cachekey.GetUserConversationIDsHashKey(ownerUserID)
}
+func (c *ConversationRedisCache) getConversationUserMaxVersionKey(ownerUserID string) string {
+ return cachekey.GetConversationUserMaxVersionKey(ownerUserID)
+}
+
func (c *ConversationRedisCache) GetUserConversationIDs(ctx context.Context, ownerUserID string) ([]string, error) {
return getCache(ctx, c.rcClient, c.getConversationIDsKey(ownerUserID), c.expireTime, func(ctx context.Context) ([]string, error) {
return c.conversationDB.FindUserIDAllConversationID(ctx, ownerUserID)
@@ -160,10 +164,12 @@ func (c *ConversationRedisCache) DelConversations(ownerUserID string, conversati
}
func (c *ConversationRedisCache) GetConversations(ctx context.Context, ownerUserID string, conversationIDs []string) ([]*model.Conversation, error) {
- return batchGetCache(ctx, c.rcClient, c.expireTime, conversationIDs, func(conversationID string) string {
+ return batchGetCache2(ctx, c.rcClient, c.expireTime, conversationIDs, func(conversationID string) string {
return c.getConversationKey(ownerUserID, conversationID)
- }, func(ctx context.Context, conversationID string) (*model.Conversation, error) {
- return c.conversationDB.Take(ctx, ownerUserID, conversationID)
+ }, func(conversation *model.Conversation) string {
+ return conversation.ConversationID
+ }, func(ctx context.Context, conversationIDs []string) ([]*model.Conversation, error) {
+ return c.conversationDB.Find(ctx, ownerUserID, conversationIDs)
})
}
@@ -233,6 +239,19 @@ func (c *ConversationRedisCache) DelConversationNotReceiveMessageUserIDs(convers
for _, conversationID := range conversationIDs {
cache.AddKeys(c.getConversationNotReceiveMessageUserIDsKey(conversationID))
}
+ return cache
+}
+func (c *ConversationRedisCache) DelConversationVersionUserIDs(userIDs ...string) cache.ConversationCache {
+ cache := c.CloneConversationCache()
+ for _, userID := range userIDs {
+ cache.AddKeys(c.getConversationUserMaxVersionKey(userID))
+ }
return cache
}
+
+func (c *ConversationRedisCache) FindMaxConversationUserVersion(ctx context.Context, userID string) (*model.VersionLog, error) {
+ return getCache(ctx, c.rcClient, c.getConversationUserMaxVersionKey(userID), c.expireTime, func(ctx context.Context) (*model.VersionLog, error) {
+ return c.conversationDB.FindConversationUserVersion(ctx, userID, 0, 0)
+ })
+}
diff --git a/pkg/common/storage/cache/redis/friend.go b/pkg/common/storage/cache/redis/friend.go
index f76e5ff6b..be4687794 100644
--- a/pkg/common/storage/cache/redis/friend.go
+++ b/pkg/common/storage/cache/redis/friend.go
@@ -16,6 +16,8 @@ package redis
import (
"context"
+ "time"
+
"github.com/dtm-labs/rockscache"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
@@ -25,7 +27,6 @@ import (
"github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/utils/datautil"
"github.com/redis/go-redis/v9"
- "time"
)
const (
@@ -38,6 +39,7 @@ type FriendCacheRedis struct {
friendDB database.Friend
expireTime time.Duration
rcClient *rockscache.Client
+ syncCount int
}
// NewFriendCacheRedis creates a new instance of FriendCacheRedis.
@@ -68,6 +70,10 @@ func (f *FriendCacheRedis) getFriendIDsKey(ownerUserID string) string {
return cachekey.GetFriendIDsKey(ownerUserID)
}
+func (f *FriendCacheRedis) getFriendMaxVersionKey(ownerUserID string) string {
+ return cachekey.GetFriendMaxVersionKey(ownerUserID)
+}
+
// getTwoWayFriendsIDsKey returns the key for storing two-way friend IDs in the cache.
func (f *FriendCacheRedis) getTwoWayFriendsIDsKey(ownerUserID string) string {
return cachekey.GetTwoWayFriendsIDsKey(ownerUserID)
@@ -151,3 +157,30 @@ func (f *FriendCacheRedis) DelFriends(ownerUserID string, friendUserIDs []string
return newFriendCache
}
+
+func (f *FriendCacheRedis) DelOwner(friendUserID string, ownerUserIDs []string) cache.FriendCache {
+ newFriendCache := f.CloneFriendCache()
+
+ for _, ownerUserID := range ownerUserIDs {
+ key := f.getFriendKey(ownerUserID, friendUserID)
+ newFriendCache.AddKeys(key) // Assuming AddKeys marks the keys for deletion
+ }
+
+ return newFriendCache
+}
+
+func (f *FriendCacheRedis) DelMaxFriendVersion(ownerUserIDs ...string) cache.FriendCache {
+ newFriendCache := f.CloneFriendCache()
+ for _, ownerUserID := range ownerUserIDs {
+ key := f.getFriendMaxVersionKey(ownerUserID)
+ newFriendCache.AddKeys(key) // Assuming AddKeys marks the keys for deletion
+ }
+
+ return newFriendCache
+}
+
+func (f *FriendCacheRedis) FindMaxFriendVersion(ctx context.Context, ownerUserID string) (*model.VersionLog, error) {
+ return getCache(ctx, f.rcClient, f.getFriendMaxVersionKey(ownerUserID), f.expireTime, func(ctx context.Context) (*model.VersionLog, error) {
+ return f.friendDB.FindIncrVersion(ctx, ownerUserID, 0, 0)
+ })
+}
diff --git a/pkg/common/storage/cache/redis/group.go b/pkg/common/storage/cache/redis/group.go
index 2de03906f..736111df3 100644
--- a/pkg/common/storage/cache/redis/group.go
+++ b/pkg/common/storage/cache/redis/group.go
@@ -17,6 +17,8 @@ package redis
import (
"context"
"fmt"
+ "time"
+
"github.com/dtm-labs/rockscache"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
@@ -27,9 +29,7 @@ import (
"github.com/openimsdk/protocol/constant"
"github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log"
- "github.com/openimsdk/tools/utils/datautil"
"github.com/redis/go-redis/v9"
- "time"
)
const (
@@ -111,34 +111,20 @@ func (g *GroupCacheRedis) getGroupRoleLevelMemberIDsKey(groupID string, roleLeve
return cachekey.GetGroupRoleLevelMemberIDsKey(groupID, roleLevel)
}
-func (g *GroupCacheRedis) GetGroupIndex(group *model.Group, keys []string) (int, error) {
- key := g.getGroupInfoKey(group.GroupID)
- for i, _key := range keys {
- if _key == key {
- return i, nil
- }
- }
-
- return 0, errIndex
+func (g *GroupCacheRedis) getGroupMemberMaxVersionKey(groupID string) string {
+ return cachekey.GetGroupMemberMaxVersionKey(groupID)
}
-func (g *GroupCacheRedis) GetGroupMemberIndex(groupMember *model.GroupMember, keys []string) (int, error) {
- key := g.getGroupMemberInfoKey(groupMember.GroupID, groupMember.UserID)
- for i, _key := range keys {
- if _key == key {
- return i, nil
- }
- }
+func (g *GroupCacheRedis) getJoinGroupMaxVersionKey(userID string) string {
+ return cachekey.GetJoinGroupMaxVersionKey(userID)
+}
- return 0, errIndex
+func (g *GroupCacheRedis) getGroupID(group *model.Group) string {
+ return group.GroupID
}
func (g *GroupCacheRedis) GetGroupsInfo(ctx context.Context, groupIDs []string) (groups []*model.Group, err error) {
- return batchGetCache(ctx, g.rcClient, g.expireTime, groupIDs, func(groupID string) string {
- return g.getGroupInfoKey(groupID)
- }, func(ctx context.Context, groupID string) (*model.Group, error) {
- return g.groupDB.Take(ctx, groupID)
- })
+ return batchGetCache2(ctx, g.rcClient, g.expireTime, groupIDs, g.getGroupInfoKey, g.getGroupID, g.groupDB.Find)
}
func (g *GroupCacheRedis) GetGroupInfo(ctx context.Context, groupID string) (group *model.Group, err error) {
@@ -226,19 +212,6 @@ func (g *GroupCacheRedis) GetGroupMemberIDs(ctx context.Context, groupID string)
})
}
-func (g *GroupCacheRedis) GetGroupsMemberIDs(ctx context.Context, groupIDs []string) (map[string][]string, error) {
- m := make(map[string][]string)
- for _, groupID := range groupIDs {
- userIDs, err := g.GetGroupMemberIDs(ctx, groupID)
- if err != nil {
- return nil, err
- }
- m[groupID] = userIDs
- }
-
- return m, nil
-}
-
func (g *GroupCacheRedis) DelGroupMemberIDs(groupID string) cache.GroupCache {
cache := g.CloneGroupCache()
cache.AddKeys(g.getGroupMemberIDsKey(groupID))
@@ -246,9 +219,17 @@ func (g *GroupCacheRedis) DelGroupMemberIDs(groupID string) cache.GroupCache {
return cache
}
+func (g *GroupCacheRedis) findUserJoinedGroupID(ctx context.Context, userID string) ([]string, error) {
+ groupIDs, err := g.groupMemberDB.FindUserJoinedGroupID(ctx, userID)
+ if err != nil {
+ return nil, err
+ }
+ return g.groupDB.FindJoinSortGroupID(ctx, groupIDs)
+}
+
func (g *GroupCacheRedis) GetJoinedGroupIDs(ctx context.Context, userID string) (joinedGroupIDs []string, err error) {
return getCache(ctx, g.rcClient, g.getJoinedGroupsKey(userID), g.expireTime, func(ctx context.Context) ([]string, error) {
- return g.groupMemberDB.FindUserJoinedGroupID(ctx, userID)
+ return g.findUserJoinedGroupID(ctx, userID)
})
}
@@ -270,33 +251,15 @@ func (g *GroupCacheRedis) GetGroupMemberInfo(ctx context.Context, groupID, userI
}
func (g *GroupCacheRedis) GetGroupMembersInfo(ctx context.Context, groupID string, userIDs []string) ([]*model.GroupMember, error) {
- return batchGetCache(ctx, g.rcClient, g.expireTime, userIDs, func(userID string) string {
+ return batchGetCache2(ctx, g.rcClient, g.expireTime, userIDs, func(userID string) string {
return g.getGroupMemberInfoKey(groupID, userID)
- }, func(ctx context.Context, userID string) (*model.GroupMember, error) {
- return g.groupMemberDB.Take(ctx, groupID, userID)
+ }, func(member *model.GroupMember) string {
+ return member.UserID
+ }, func(ctx context.Context, userIDs []string) ([]*model.GroupMember, error) {
+ return g.groupMemberDB.Find(ctx, groupID, userIDs)
})
}
-func (g *GroupCacheRedis) GetGroupMembersPage(
- ctx context.Context,
- groupID string,
- userIDs []string,
- showNumber, pageNumber int32,
-) (total uint32, groupMembers []*model.GroupMember, err error) {
- groupMemberIDs, err := g.GetGroupMemberIDs(ctx, groupID)
- if err != nil {
- return 0, nil, err
- }
- if userIDs != nil {
- userIDs = datautil.BothExist(userIDs, groupMemberIDs)
- } else {
- userIDs = groupMemberIDs
- }
- groupMembers, err = g.GetGroupMembersInfo(ctx, groupID, datautil.Paginate(userIDs, int(showNumber), int(showNumber)))
-
- return uint32(len(userIDs)), groupMembers, err
-}
-
func (g *GroupCacheRedis) GetAllGroupMembersInfo(ctx context.Context, groupID string) (groupMembers []*model.GroupMember, err error) {
groupMemberIDs, err := g.GetGroupMemberIDs(ctx, groupID)
if err != nil {
@@ -306,14 +269,6 @@ func (g *GroupCacheRedis) GetAllGroupMembersInfo(ctx context.Context, groupID st
return g.GetGroupMembersInfo(ctx, groupID, groupMemberIDs)
}
-func (g *GroupCacheRedis) GetAllGroupMemberInfo(ctx context.Context, groupID string) ([]*model.GroupMember, error) {
- groupMemberIDs, err := g.GetGroupMemberIDs(ctx, groupID)
- if err != nil {
- return nil, err
- }
- return g.GetGroupMembersInfo(ctx, groupID, groupMemberIDs)
-}
-
func (g *GroupCacheRedis) DelGroupMembersInfo(groupID string, userIDs ...string) cache.GroupCache {
keys := make([]string, 0, len(userIDs))
for _, userID := range userIDs {
@@ -393,16 +348,66 @@ func (g *GroupCacheRedis) GetGroupRolesLevelMemberInfo(ctx context.Context, grou
return g.GetGroupMembersInfo(ctx, groupID, userIDs)
}
-func (g *GroupCacheRedis) FindGroupMemberUser(ctx context.Context, groupIDs []string, userID string) (_ []*model.GroupMember, err error) {
+func (g *GroupCacheRedis) FindGroupMemberUser(ctx context.Context, groupIDs []string, userID string) ([]*model.GroupMember, error) {
if len(groupIDs) == 0 {
+ var err error
groupIDs, err = g.GetJoinedGroupIDs(ctx, userID)
if err != nil {
return nil, err
}
}
- return batchGetCache(ctx, g.rcClient, g.expireTime, groupIDs, func(groupID string) string {
+ return batchGetCache2(ctx, g.rcClient, g.expireTime, groupIDs, func(groupID string) string {
return g.getGroupMemberInfoKey(groupID, userID)
- }, func(ctx context.Context, groupID string) (*model.GroupMember, error) {
- return g.groupMemberDB.Take(ctx, groupID, userID)
+ }, func(member *model.GroupMember) string {
+ return member.GroupID
+ }, func(ctx context.Context, groupIDs []string) ([]*model.GroupMember, error) {
+ return g.groupMemberDB.FindInGroup(ctx, userID, groupIDs)
+ })
+}
+
+func (g *GroupCacheRedis) DelMaxGroupMemberVersion(groupIDs ...string) cache.GroupCache {
+ keys := make([]string, 0, len(groupIDs))
+ for _, groupID := range groupIDs {
+ keys = append(keys, g.getGroupMemberMaxVersionKey(groupID))
+ }
+ cache := g.CloneGroupCache()
+ cache.AddKeys(keys...)
+ return cache
+}
+
+func (g *GroupCacheRedis) DelMaxJoinGroupVersion(userIDs ...string) cache.GroupCache {
+ keys := make([]string, 0, len(userIDs))
+ for _, userID := range userIDs {
+ keys = append(keys, g.getJoinGroupMaxVersionKey(userID))
+ }
+ cache := g.CloneGroupCache()
+ cache.AddKeys(keys...)
+ return cache
+}
+
+func (g *GroupCacheRedis) FindMaxGroupMemberVersion(ctx context.Context, groupID string) (*model.VersionLog, error) {
+ return getCache(ctx, g.rcClient, g.getGroupMemberMaxVersionKey(groupID), g.expireTime, func(ctx context.Context) (*model.VersionLog, error) {
+ return g.groupMemberDB.FindMemberIncrVersion(ctx, groupID, 0, 0)
+ })
+}
+
+func (g *GroupCacheRedis) BatchFindMaxGroupMemberVersion(ctx context.Context, groupIDs []string) ([]*model.VersionLog, error) {
+ return batchGetCache2(ctx, g.rcClient, g.expireTime, groupIDs,
+ func(groupID string) string {
+ return g.getGroupMemberMaxVersionKey(groupID)
+ }, func(versionLog *model.VersionLog) string {
+ return versionLog.DID
+ }, func(ctx context.Context, groupIDs []string) ([]*model.VersionLog, error) {
+ // create two slices with len is groupIDs, just need 0
+ versions := make([]uint, len(groupIDs))
+ limits := make([]int, len(groupIDs))
+
+ return g.groupMemberDB.BatchFindMemberIncrVersion(ctx, groupIDs, versions, limits)
+ })
+}
+
+func (g *GroupCacheRedis) FindMaxJoinGroupVersion(ctx context.Context, userID string) (*model.VersionLog, error) {
+ return getCache(ctx, g.rcClient, g.getJoinGroupMaxVersionKey(userID), g.expireTime, func(ctx context.Context) (*model.VersionLog, error) {
+ return g.groupMemberDB.FindJoinIncrVersion(ctx, userID, 0, 0)
})
}
diff --git a/pkg/common/storage/cache/redis/msg.go b/pkg/common/storage/cache/redis/msg.go
index 2d21cfe13..30f367bb7 100644
--- a/pkg/common/storage/cache/redis/msg.go
+++ b/pkg/common/storage/cache/redis/msg.go
@@ -183,5 +183,4 @@ func (c *msgCache) GetMessagesBySeq(ctx context.Context, conversationID string,
return nil, nil, err
}
return seqMsgs, failedSeqs, nil
-
}
diff --git a/pkg/common/storage/cache/redis/online.go b/pkg/common/storage/cache/redis/online.go
new file mode 100644
index 000000000..dc6a5f775
--- /dev/null
+++ b/pkg/common/storage/cache/redis/online.go
@@ -0,0 +1,89 @@
+package redis
+
+import (
+ "context"
+ "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
+ "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
+ "github.com/openimsdk/tools/errs"
+ "github.com/redis/go-redis/v9"
+ "strconv"
+ "time"
+)
+
+func NewUserOnline(rdb redis.UniversalClient) cache.OnlineCache {
+ return &userOnline{
+ rdb: rdb,
+ expire: cachekey.OnlineExpire,
+ channelName: cachekey.OnlineChannel,
+ }
+}
+
+type userOnline struct {
+ rdb redis.UniversalClient
+ expire time.Duration
+ channelName string
+}
+
+func (s *userOnline) getUserOnlineKey(userID string) string {
+ return cachekey.GetOnlineKey(userID)
+}
+
+func (s *userOnline) GetOnline(ctx context.Context, userID string) ([]int32, error) {
+ members, err := s.rdb.ZRangeByScore(ctx, s.getUserOnlineKey(userID), &redis.ZRangeBy{
+ Min: strconv.FormatInt(time.Now().Unix(), 10),
+ Max: "+inf",
+ }).Result()
+ if err != nil {
+ return nil, errs.Wrap(err)
+ }
+ platformIDs := make([]int32, 0, len(members))
+ for _, member := range members {
+ val, err := strconv.Atoi(member)
+ if err != nil {
+ return nil, errs.Wrap(err)
+ }
+ platformIDs = append(platformIDs, int32(val))
+ }
+ return platformIDs, nil
+}
+
+func (s *userOnline) SetUserOnline(ctx context.Context, userID string, online, offline []int32) error {
+ script := `
+ local key = KEYS[1]
+ local score = ARGV[3]
+ local num1 = redis.call("ZCARD", key)
+ redis.call("ZREMRANGEBYSCORE", key, "-inf", ARGV[2])
+ for i = 5, tonumber(ARGV[4])+4 do
+ redis.call("ZREM", key, ARGV[i])
+ end
+ local num2 = redis.call("ZCARD", key)
+ for i = 5+tonumber(ARGV[4]), #ARGV do
+ redis.call("ZADD", key, score, ARGV[i])
+ end
+ redis.call("EXPIRE", key, ARGV[1])
+ local num3 = redis.call("ZCARD", key)
+ local change = (num1 ~= num2) or (num2 ~= num3)
+ if change then
+ local members = redis.call("ZRANGE", key, 0, -1)
+ table.insert(members, KEYS[2])
+ redis.call("PUBLISH", KEYS[3], table.concat(members, ":"))
+ return 1
+ else
+ return 0
+ end
+`
+ now := time.Now()
+ argv := make([]any, 0, 2+len(online)+len(offline))
+ argv = append(argv, int32(s.expire/time.Second), now.Unix(), now.Add(s.expire).Unix(), int32(len(offline)))
+ for _, platformID := range offline {
+ argv = append(argv, platformID)
+ }
+ for _, platformID := range online {
+ argv = append(argv, platformID)
+ }
+ keys := []string{s.getUserOnlineKey(userID), userID, s.channelName}
+ if err := s.rdb.Eval(ctx, script, keys, argv).Err(); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/pkg/common/storage/cache/redis/redis_shard_manager.go b/pkg/common/storage/cache/redis/redis_shard_manager.go
index 98d70dabf..17e5fecf6 100644
--- a/pkg/common/storage/cache/redis/redis_shard_manager.go
+++ b/pkg/common/storage/cache/redis/redis_shard_manager.go
@@ -2,6 +2,7 @@ package redis
import (
"context"
+ "github.com/dtm-labs/rockscache"
"github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log"
"github.com/redis/go-redis/v9"
@@ -109,7 +110,7 @@ func (rsm *RedisShardManager) ProcessKeysBySlot(
func groupKeysBySlot(ctx context.Context, redisClient redis.UniversalClient, keys []string) (map[int64][]string, error) {
slots := make(map[int64][]string)
clusterClient, isCluster := redisClient.(*redis.ClusterClient)
- if isCluster {
+ if isCluster && len(keys) > 1 {
pipe := clusterClient.Pipeline()
cmds := make([]*redis.IntCmd, len(keys))
for i, key := range keys {
@@ -195,3 +196,16 @@ func ProcessKeysBySlot(
}
return nil
}
+
+func DeleteCacheBySlot(ctx context.Context, rcClient *rockscache.Client, keys []string) error {
+ switch len(keys) {
+ case 0:
+ return nil
+ case 1:
+ return rcClient.TagAsDeletedBatch2(ctx, keys)
+ default:
+ return ProcessKeysBySlot(ctx, getRocksCacheRedisClient(rcClient), keys, func(ctx context.Context, slot int64, keys []string) error {
+ return rcClient.TagAsDeletedBatch2(ctx, keys)
+ })
+ }
+}
diff --git a/pkg/common/storage/cache/redis/seq.go b/pkg/common/storage/cache/redis/seq.go
deleted file mode 100644
index 76dd921a5..000000000
--- a/pkg/common/storage/cache/redis/seq.go
+++ /dev/null
@@ -1,173 +0,0 @@
-// Copyright © 2023 OpenIM. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package redis
-
-import (
- "context"
- "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
- "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
- "github.com/openimsdk/tools/errs"
- "github.com/openimsdk/tools/utils/stringutil"
- "github.com/redis/go-redis/v9"
-)
-
-func NewSeqCache(rdb redis.UniversalClient) cache.SeqCache {
- return &seqCache{rdb: rdb}
-}
-
-type seqCache struct {
- rdb redis.UniversalClient
-}
-
-func (c *seqCache) getMaxSeqKey(conversationID string) string {
- return cachekey.GetMaxSeqKey(conversationID)
-}
-
-func (c *seqCache) getMinSeqKey(conversationID string) string {
- return cachekey.GetMinSeqKey(conversationID)
-}
-
-func (c *seqCache) getHasReadSeqKey(conversationID string, userID string) string {
- return cachekey.GetHasReadSeqKey(conversationID, userID)
-}
-
-func (c *seqCache) getConversationUserMinSeqKey(conversationID, userID string) string {
- return cachekey.GetConversationUserMinSeqKey(conversationID, userID)
-}
-
-func (c *seqCache) setSeq(ctx context.Context, conversationID string, seq int64, getkey func(conversationID string) string) error {
- return errs.Wrap(c.rdb.Set(ctx, getkey(conversationID), seq, 0).Err())
-}
-
-func (c *seqCache) getSeq(ctx context.Context, conversationID string, getkey func(conversationID string) string) (int64, error) {
- val, err := c.rdb.Get(ctx, getkey(conversationID)).Int64()
- if err != nil {
- return 0, errs.Wrap(err)
- }
- return val, nil
-}
-
-func (c *seqCache) getSeqs(ctx context.Context, items []string, getkey func(s string) string) (m map[string]int64, err error) {
- m = make(map[string]int64, len(items))
- for i, v := range items {
- res, err := c.rdb.Get(ctx, getkey(v)).Result()
- if err != nil && err != redis.Nil {
- return nil, errs.Wrap(err)
- }
- val := stringutil.StringToInt64(res)
- if val != 0 {
- m[items[i]] = val
- }
- }
-
- return m, nil
-}
-
-func (c *seqCache) SetMaxSeq(ctx context.Context, conversationID string, maxSeq int64) error {
- return c.setSeq(ctx, conversationID, maxSeq, c.getMaxSeqKey)
-}
-
-func (c *seqCache) GetMaxSeqs(ctx context.Context, conversationIDs []string) (m map[string]int64, err error) {
- return c.getSeqs(ctx, conversationIDs, c.getMaxSeqKey)
-}
-
-func (c *seqCache) GetMaxSeq(ctx context.Context, conversationID string) (int64, error) {
- return c.getSeq(ctx, conversationID, c.getMaxSeqKey)
-}
-
-func (c *seqCache) SetMinSeq(ctx context.Context, conversationID string, minSeq int64) error {
- return c.setSeq(ctx, conversationID, minSeq, c.getMinSeqKey)
-}
-
-func (c *seqCache) setSeqs(ctx context.Context, seqs map[string]int64, getkey func(key string) string) error {
- for conversationID, seq := range seqs {
- if err := c.rdb.Set(ctx, getkey(conversationID), seq, 0).Err(); err != nil {
- return errs.Wrap(err)
- }
- }
- return nil
-}
-
-func (c *seqCache) SetMinSeqs(ctx context.Context, seqs map[string]int64) error {
- return c.setSeqs(ctx, seqs, c.getMinSeqKey)
-}
-
-func (c *seqCache) GetMinSeqs(ctx context.Context, conversationIDs []string) (map[string]int64, error) {
- return c.getSeqs(ctx, conversationIDs, c.getMinSeqKey)
-}
-
-func (c *seqCache) GetMinSeq(ctx context.Context, conversationID string) (int64, error) {
- return c.getSeq(ctx, conversationID, c.getMinSeqKey)
-}
-
-func (c *seqCache) GetConversationUserMinSeq(ctx context.Context, conversationID string, userID string) (int64, error) {
- val, err := c.rdb.Get(ctx, c.getConversationUserMinSeqKey(conversationID, userID)).Int64()
- if err != nil {
- return 0, errs.Wrap(err)
- }
- return val, nil
-}
-
-func (c *seqCache) GetConversationUserMinSeqs(ctx context.Context, conversationID string, userIDs []string) (m map[string]int64, err error) {
- return c.getSeqs(ctx, userIDs, func(userID string) string {
- return c.getConversationUserMinSeqKey(conversationID, userID)
- })
-}
-
-func (c *seqCache) SetConversationUserMinSeq(ctx context.Context, conversationID string, userID string, minSeq int64) error {
- return errs.Wrap(c.rdb.Set(ctx, c.getConversationUserMinSeqKey(conversationID, userID), minSeq, 0).Err())
-}
-
-func (c *seqCache) SetConversationUserMinSeqs(ctx context.Context, conversationID string, seqs map[string]int64) (err error) {
- return c.setSeqs(ctx, seqs, func(userID string) string {
- return c.getConversationUserMinSeqKey(conversationID, userID)
- })
-}
-
-func (c *seqCache) SetUserConversationsMinSeqs(ctx context.Context, userID string, seqs map[string]int64) (err error) {
- return c.setSeqs(ctx, seqs, func(conversationID string) string {
- return c.getConversationUserMinSeqKey(conversationID, userID)
- })
-}
-
-func (c *seqCache) SetHasReadSeq(ctx context.Context, userID string, conversationID string, hasReadSeq int64) error {
- return errs.Wrap(c.rdb.Set(ctx, c.getHasReadSeqKey(conversationID, userID), hasReadSeq, 0).Err())
-}
-
-func (c *seqCache) SetHasReadSeqs(ctx context.Context, conversationID string, hasReadSeqs map[string]int64) error {
- return c.setSeqs(ctx, hasReadSeqs, func(userID string) string {
- return c.getHasReadSeqKey(conversationID, userID)
- })
-}
-
-func (c *seqCache) UserSetHasReadSeqs(ctx context.Context, userID string, hasReadSeqs map[string]int64) error {
- return c.setSeqs(ctx, hasReadSeqs, func(conversationID string) string {
- return c.getHasReadSeqKey(conversationID, userID)
- })
-}
-
-func (c *seqCache) GetHasReadSeqs(ctx context.Context, userID string, conversationIDs []string) (map[string]int64, error) {
- return c.getSeqs(ctx, conversationIDs, func(conversationID string) string {
- return c.getHasReadSeqKey(conversationID, userID)
- })
-}
-
-func (c *seqCache) GetHasReadSeq(ctx context.Context, userID string, conversationID string) (int64, error) {
- val, err := c.rdb.Get(ctx, c.getHasReadSeqKey(conversationID, userID)).Int64()
- if err != nil {
- return 0, err
- }
- return val, nil
-}
diff --git a/pkg/common/storage/cache/redis/seq_conversation.go b/pkg/common/storage/cache/redis/seq_conversation.go
new file mode 100644
index 000000000..7fe849193
--- /dev/null
+++ b/pkg/common/storage/cache/redis/seq_conversation.go
@@ -0,0 +1,333 @@
+package redis
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "github.com/dtm-labs/rockscache"
+ "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
+ "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
+ "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
+ "github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
+ "github.com/openimsdk/tools/errs"
+ "github.com/openimsdk/tools/log"
+ "github.com/redis/go-redis/v9"
+ "time"
+)
+
+func NewSeqConversationCacheRedis(rdb redis.UniversalClient, mgo database.SeqConversation) cache.SeqConversationCache {
+ return &seqConversationCacheRedis{
+ rdb: rdb,
+ mgo: mgo,
+ lockTime: time.Second * 3,
+ dataTime: time.Hour * 24 * 365,
+ minSeqExpireTime: time.Hour,
+ rocks: rockscache.NewClient(rdb, *GetRocksCacheOptions()),
+ }
+}
+
+type seqConversationCacheRedis struct {
+ rdb redis.UniversalClient
+ mgo database.SeqConversation
+ rocks *rockscache.Client
+ lockTime time.Duration
+ dataTime time.Duration
+ minSeqExpireTime time.Duration
+}
+
+func (s *seqConversationCacheRedis) getMinSeqKey(conversationID string) string {
+ return cachekey.GetMallocMinSeqKey(conversationID)
+}
+
+func (s *seqConversationCacheRedis) SetMinSeq(ctx context.Context, conversationID string, seq int64) error {
+ return s.SetMinSeqs(ctx, map[string]int64{conversationID: seq})
+}
+
+func (s *seqConversationCacheRedis) GetMinSeq(ctx context.Context, conversationID string) (int64, error) {
+ return getCache(ctx, s.rocks, s.getMinSeqKey(conversationID), s.minSeqExpireTime, func(ctx context.Context) (int64, error) {
+ return s.mgo.GetMinSeq(ctx, conversationID)
+ })
+}
+
+func (s *seqConversationCacheRedis) getSingleMaxSeq(ctx context.Context, conversationID string) (map[string]int64, error) {
+ seq, err := s.GetMaxSeq(ctx, conversationID)
+ if err != nil {
+ return nil, err
+ }
+ return map[string]int64{conversationID: seq}, nil
+}
+
+func (s *seqConversationCacheRedis) batchGetMaxSeq(ctx context.Context, keys []string, keyConversationID map[string]string, seqs map[string]int64) error {
+ result := make([]*redis.StringCmd, len(keys))
+ pipe := s.rdb.Pipeline()
+ for i, key := range keys {
+ result[i] = pipe.HGet(ctx, key, "CURR")
+ }
+ if _, err := pipe.Exec(ctx); err != nil && !errors.Is(err, redis.Nil) {
+ return errs.Wrap(err)
+ }
+ var notFoundKey []string
+ for i, r := range result {
+ req, err := r.Int64()
+ if err == nil {
+ seqs[keyConversationID[keys[i]]] = req
+ } else if errors.Is(err, redis.Nil) {
+ notFoundKey = append(notFoundKey, keys[i])
+ } else {
+ return errs.Wrap(err)
+ }
+ }
+ for _, key := range notFoundKey {
+ conversationID := keyConversationID[key]
+ seq, err := s.GetMaxSeq(ctx, conversationID)
+ if err != nil {
+ return err
+ }
+ seqs[conversationID] = seq
+ }
+ return nil
+}
+
+func (s *seqConversationCacheRedis) GetMaxSeqs(ctx context.Context, conversationIDs []string) (map[string]int64, error) {
+ switch len(conversationIDs) {
+ case 0:
+ return map[string]int64{}, nil
+ case 1:
+ return s.getSingleMaxSeq(ctx, conversationIDs[0])
+ }
+ keys := make([]string, 0, len(conversationIDs))
+ keyConversationID := make(map[string]string, len(conversationIDs))
+ for _, conversationID := range conversationIDs {
+ key := s.getSeqMallocKey(conversationID)
+ if _, ok := keyConversationID[key]; ok {
+ continue
+ }
+ keys = append(keys, key)
+ keyConversationID[key] = conversationID
+ }
+ if len(keys) == 1 {
+ return s.getSingleMaxSeq(ctx, conversationIDs[0])
+ }
+ slotKeys, err := groupKeysBySlot(ctx, s.rdb, keys)
+ if err != nil {
+ return nil, err
+ }
+ seqs := make(map[string]int64, len(conversationIDs))
+ for _, keys := range slotKeys {
+ if err := s.batchGetMaxSeq(ctx, keys, keyConversationID, seqs); err != nil {
+ return nil, err
+ }
+ }
+ return seqs, nil
+}
+
+func (s *seqConversationCacheRedis) getSeqMallocKey(conversationID string) string {
+ return cachekey.GetMallocSeqKey(conversationID)
+}
+
+func (s *seqConversationCacheRedis) setSeq(ctx context.Context, key string, owner int64, currSeq int64, lastSeq int64) (int64, error) {
+ if lastSeq < currSeq {
+ return 0, errs.New("lastSeq must be greater than currSeq")
+ }
+ // 0: success
+ // 1: success the lock has expired, but has not been locked by anyone else
+ // 2: already locked, but not by yourself
+ script := `
+local key = KEYS[1]
+local lockValue = ARGV[1]
+local dataSecond = ARGV[2]
+local curr_seq = tonumber(ARGV[3])
+local last_seq = tonumber(ARGV[4])
+if redis.call("EXISTS", key) == 0 then
+ redis.call("HSET", key, "CURR", curr_seq, "LAST", last_seq)
+ redis.call("EXPIRE", key, dataSecond)
+ return 1
+end
+if redis.call("HGET", key, "LOCK") ~= lockValue then
+ return 2
+end
+redis.call("HDEL", key, "LOCK")
+redis.call("HSET", key, "CURR", curr_seq, "LAST", last_seq)
+redis.call("EXPIRE", key, dataSecond)
+return 0
+`
+ result, err := s.rdb.Eval(ctx, script, []string{key}, owner, int64(s.dataTime/time.Second), currSeq, lastSeq).Int64()
+ if err != nil {
+ return 0, errs.Wrap(err)
+ }
+ return result, nil
+}
+
+// malloc size=0 is to get the current seq size>0 is to allocate seq
+func (s *seqConversationCacheRedis) malloc(ctx context.Context, key string, size int64) ([]int64, error) {
+ // 0: success
+ // 1: need to obtain and lock
+ // 2: already locked
+ // 3: exceeded the maximum value and locked
+ script := `
+local key = KEYS[1]
+local size = tonumber(ARGV[1])
+local lockSecond = ARGV[2]
+local dataSecond = ARGV[3]
+local result = {}
+if redis.call("EXISTS", key) == 0 then
+ local lockValue = math.random(0, 999999999)
+ redis.call("HSET", key, "LOCK", lockValue)
+ redis.call("EXPIRE", key, lockSecond)
+ table.insert(result, 1)
+ table.insert(result, lockValue)
+ return result
+end
+if redis.call("HEXISTS", key, "LOCK") == 1 then
+ table.insert(result, 2)
+ return result
+end
+local curr_seq = tonumber(redis.call("HGET", key, "CURR"))
+local last_seq = tonumber(redis.call("HGET", key, "LAST"))
+if size == 0 then
+ redis.call("EXPIRE", key, dataSecond)
+ table.insert(result, 0)
+ table.insert(result, curr_seq)
+ table.insert(result, last_seq)
+ return result
+end
+local max_seq = curr_seq + size
+if max_seq > last_seq then
+ local lockValue = math.random(0, 999999999)
+ redis.call("HSET", key, "LOCK", lockValue)
+ redis.call("HSET", key, "CURR", last_seq)
+ redis.call("EXPIRE", key, lockSecond)
+ table.insert(result, 3)
+ table.insert(result, curr_seq)
+ table.insert(result, last_seq)
+ table.insert(result, lockValue)
+ return result
+end
+redis.call("HSET", key, "CURR", max_seq)
+redis.call("EXPIRE", key, dataSecond)
+table.insert(result, 0)
+table.insert(result, curr_seq)
+table.insert(result, last_seq)
+return result
+`
+ result, err := s.rdb.Eval(ctx, script, []string{key}, size, int64(s.lockTime/time.Second), int64(s.dataTime/time.Second)).Int64Slice()
+ if err != nil {
+ return nil, errs.Wrap(err)
+ }
+ return result, nil
+}
+
+func (s *seqConversationCacheRedis) wait(ctx context.Context) error {
+ timer := time.NewTimer(time.Second / 4)
+ defer timer.Stop()
+ select {
+ case <-timer.C:
+ return nil
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+}
+
+func (s *seqConversationCacheRedis) setSeqRetry(ctx context.Context, key string, owner int64, currSeq int64, lastSeq int64) {
+ for i := 0; i < 10; i++ {
+ state, err := s.setSeq(ctx, key, owner, currSeq, lastSeq)
+ if err != nil {
+ log.ZError(ctx, "set seq cache failed", err, "key", key, "owner", owner, "currSeq", currSeq, "lastSeq", lastSeq, "count", i+1)
+ if err := s.wait(ctx); err != nil {
+ return
+ }
+ continue
+ }
+ switch state {
+ case 0: // ideal state
+ case 1:
+ log.ZWarn(ctx, "set seq cache lock not found", nil, "key", key, "owner", owner, "currSeq", currSeq, "lastSeq", lastSeq)
+ case 2:
+ log.ZWarn(ctx, "set seq cache lock to be held by someone else", nil, "key", key, "owner", owner, "currSeq", currSeq, "lastSeq", lastSeq)
+ default:
+ log.ZError(ctx, "set seq cache lock unknown state", nil, "key", key, "owner", owner, "currSeq", currSeq, "lastSeq", lastSeq)
+ }
+ return
+ }
+ log.ZError(ctx, "set seq cache retrying still failed", nil, "key", key, "owner", owner, "currSeq", currSeq, "lastSeq", lastSeq)
+}
+
+func (s *seqConversationCacheRedis) getMallocSize(conversationID string, size int64) int64 {
+ if size == 0 {
+ return 0
+ }
+ var basicSize int64
+ if msgprocessor.IsGroupConversationID(conversationID) {
+ basicSize = 100
+ } else {
+ basicSize = 50
+ }
+ basicSize += size
+ return basicSize
+}
+
+func (s *seqConversationCacheRedis) Malloc(ctx context.Context, conversationID string, size int64) (int64, error) {
+ if size < 0 {
+ return 0, errs.New("size must be greater than 0")
+ }
+ key := s.getSeqMallocKey(conversationID)
+ for i := 0; i < 10; i++ {
+ states, err := s.malloc(ctx, key, size)
+ if err != nil {
+ return 0, err
+ }
+ switch states[0] {
+ case 0: // success
+ return states[1], nil
+ case 1: // not found
+ mallocSize := s.getMallocSize(conversationID, size)
+ seq, err := s.mgo.Malloc(ctx, conversationID, mallocSize)
+ if err != nil {
+ return 0, err
+ }
+ s.setSeqRetry(ctx, key, states[1], seq+size, seq+mallocSize)
+ return seq, nil
+ case 2: // locked
+ if err := s.wait(ctx); err != nil {
+ return 0, err
+ }
+ continue
+ case 3: // exceeded cache max value
+ currSeq := states[1]
+ lastSeq := states[2]
+ mallocSize := s.getMallocSize(conversationID, size)
+ seq, err := s.mgo.Malloc(ctx, conversationID, mallocSize)
+ if err != nil {
+ return 0, err
+ }
+ if lastSeq == seq {
+ s.setSeqRetry(ctx, key, states[3], currSeq+size, seq+mallocSize)
+ return currSeq, nil
+ } else {
+ log.ZWarn(ctx, "malloc seq not equal cache last seq", nil, "conversationID", conversationID, "currSeq", currSeq, "lastSeq", lastSeq, "mallocSeq", seq)
+ s.setSeqRetry(ctx, key, states[3], seq+size, seq+mallocSize)
+ return seq, nil
+ }
+ default:
+ log.ZError(ctx, "malloc seq unknown state", nil, "state", states[0], "conversationID", conversationID, "size", size)
+ return 0, errs.New(fmt.Sprintf("unknown state: %d", states[0]))
+ }
+ }
+ log.ZError(ctx, "malloc seq retrying still failed", nil, "conversationID", conversationID, "size", size)
+ return 0, errs.New("malloc seq waiting for lock timeout", "conversationID", conversationID, "size", size)
+}
+
+func (s *seqConversationCacheRedis) GetMaxSeq(ctx context.Context, conversationID string) (int64, error) {
+ return s.Malloc(ctx, conversationID, 0)
+}
+
+func (s *seqConversationCacheRedis) SetMinSeqs(ctx context.Context, seqs map[string]int64) error {
+ keys := make([]string, 0, len(seqs))
+ for conversationID, seq := range seqs {
+ keys = append(keys, s.getMinSeqKey(conversationID))
+ if err := s.mgo.SetMinSeq(ctx, conversationID, seq); err != nil {
+ return err
+ }
+ }
+ return DeleteCacheBySlot(ctx, s.rocks, keys)
+}
diff --git a/pkg/common/storage/cache/redis/seq_conversation_test.go b/pkg/common/storage/cache/redis/seq_conversation_test.go
new file mode 100644
index 000000000..1a40624b8
--- /dev/null
+++ b/pkg/common/storage/cache/redis/seq_conversation_test.go
@@ -0,0 +1,109 @@
+package redis
+
+import (
+ "context"
+ "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
+ "github.com/redis/go-redis/v9"
+ "go.mongodb.org/mongo-driver/mongo"
+ "go.mongodb.org/mongo-driver/mongo/options"
+ "strconv"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+)
+
+func newTestSeq() *seqConversationCacheRedis {
+ mgocli, err := mongo.Connect(context.Background(), options.Client().ApplyURI("mongodb://openIM:openIM123@172.16.8.48:37017/openim_v3?maxPoolSize=100").SetConnectTimeout(5*time.Second))
+ if err != nil {
+ panic(err)
+ }
+ model, err := mgo.NewSeqConversationMongo(mgocli.Database("openim_v3"))
+ if err != nil {
+ panic(err)
+ }
+ opt := &redis.Options{
+ Addr: "172.16.8.48:16379",
+ Password: "openIM123",
+ DB: 1,
+ }
+ rdb := redis.NewClient(opt)
+ if err := rdb.Ping(context.Background()).Err(); err != nil {
+ panic(err)
+ }
+ return NewSeqConversationCacheRedis(rdb, model).(*seqConversationCacheRedis)
+}
+
+func TestSeq(t *testing.T) {
+ ts := newTestSeq()
+ var (
+ wg sync.WaitGroup
+ speed atomic.Int64
+ )
+
+ const count = 128
+ wg.Add(count)
+ for i := 0; i < count; i++ {
+ index := i + 1
+ go func() {
+ defer wg.Done()
+ var size int64 = 10
+ cID := strconv.Itoa(index * 1)
+ for i := 1; ; i++ {
+ //first, err := ts.mgo.Malloc(context.Background(), cID, size) // mongo
+ first, err := ts.Malloc(context.Background(), cID, size) // redis
+ if err != nil {
+ t.Logf("[%d-%d] %s %s", index, i, cID, err)
+ return
+ }
+ speed.Add(size)
+ _ = first
+ //t.Logf("[%d] %d -> %d", i, first+1, first+size)
+ }
+ }()
+ }
+
+ done := make(chan struct{})
+
+ go func() {
+ wg.Wait()
+ close(done)
+ }()
+
+ ticker := time.NewTicker(time.Second)
+
+ for {
+ select {
+ case <-done:
+ ticker.Stop()
+ return
+ case <-ticker.C:
+ value := speed.Swap(0)
+ t.Logf("speed: %d/s", value)
+ }
+ }
+}
+
+func TestDel(t *testing.T) {
+ ts := newTestSeq()
+ for i := 1; i < 100; i++ {
+ var size int64 = 100
+ first, err := ts.Malloc(context.Background(), "100", size)
+ if err != nil {
+ t.Logf("[%d] %s", i, err)
+ return
+ }
+ t.Logf("[%d] %d -> %d", i, first+1, first+size)
+ time.Sleep(time.Second)
+ }
+}
+
+func TestSeqMalloc(t *testing.T) {
+ ts := newTestSeq()
+ t.Log(ts.GetMaxSeq(context.Background(), "100"))
+}
+
+func TestMinSeq(t *testing.T) {
+ ts := newTestSeq()
+ t.Log(ts.GetMinSeq(context.Background(), "10000000"))
+}
diff --git a/pkg/common/storage/cache/redis/seq_user.go b/pkg/common/storage/cache/redis/seq_user.go
new file mode 100644
index 000000000..edbc66b21
--- /dev/null
+++ b/pkg/common/storage/cache/redis/seq_user.go
@@ -0,0 +1,185 @@
+package redis
+
+import (
+ "context"
+ "github.com/dtm-labs/rockscache"
+ "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
+ "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
+ "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
+ "github.com/openimsdk/tools/errs"
+ "github.com/redis/go-redis/v9"
+ "strconv"
+ "time"
+)
+
+func NewSeqUserCacheRedis(rdb redis.UniversalClient, mgo database.SeqUser) cache.SeqUser {
+ return &seqUserCacheRedis{
+ rdb: rdb,
+ mgo: mgo,
+ readSeqWriteRatio: 100,
+ expireTime: time.Hour * 24 * 7,
+ readExpireTime: time.Hour * 24 * 30,
+ rocks: rockscache.NewClient(rdb, *GetRocksCacheOptions()),
+ }
+}
+
+type seqUserCacheRedis struct {
+ rdb redis.UniversalClient
+ mgo database.SeqUser
+ rocks *rockscache.Client
+ expireTime time.Duration
+ readExpireTime time.Duration
+ readSeqWriteRatio int64
+}
+
+func (s *seqUserCacheRedis) getSeqUserMaxSeqKey(conversationID string, userID string) string {
+ return cachekey.GetSeqUserMaxSeqKey(conversationID, userID)
+}
+
+func (s *seqUserCacheRedis) getSeqUserMinSeqKey(conversationID string, userID string) string {
+ return cachekey.GetSeqUserMinSeqKey(conversationID, userID)
+}
+
+func (s *seqUserCacheRedis) getSeqUserReadSeqKey(conversationID string, userID string) string {
+ return cachekey.GetSeqUserReadSeqKey(conversationID, userID)
+}
+
+func (s *seqUserCacheRedis) GetUserMaxSeq(ctx context.Context, conversationID string, userID string) (int64, error) {
+ return getCache(ctx, s.rocks, s.getSeqUserMaxSeqKey(conversationID, userID), s.expireTime, func(ctx context.Context) (int64, error) {
+ return s.mgo.GetUserMaxSeq(ctx, conversationID, userID)
+ })
+}
+
+func (s *seqUserCacheRedis) SetUserMaxSeq(ctx context.Context, conversationID string, userID string, seq int64) error {
+ if err := s.mgo.SetUserMaxSeq(ctx, conversationID, userID, seq); err != nil {
+ return err
+ }
+ return s.rocks.TagAsDeleted2(ctx, s.getSeqUserMaxSeqKey(conversationID, userID))
+}
+
+func (s *seqUserCacheRedis) GetUserMinSeq(ctx context.Context, conversationID string, userID string) (int64, error) {
+ return getCache(ctx, s.rocks, s.getSeqUserMinSeqKey(conversationID, userID), s.expireTime, func(ctx context.Context) (int64, error) {
+ return s.mgo.GetUserMinSeq(ctx, conversationID, userID)
+ })
+}
+
+func (s *seqUserCacheRedis) SetUserMinSeq(ctx context.Context, conversationID string, userID string, seq int64) error {
+ return s.SetUserMinSeqs(ctx, userID, map[string]int64{conversationID: seq})
+}
+
+func (s *seqUserCacheRedis) GetUserReadSeq(ctx context.Context, conversationID string, userID string) (int64, error) {
+ return getCache(ctx, s.rocks, s.getSeqUserReadSeqKey(conversationID, userID), s.readExpireTime, func(ctx context.Context) (int64, error) {
+ return s.mgo.GetUserReadSeq(ctx, conversationID, userID)
+ })
+}
+
+func (s *seqUserCacheRedis) SetUserReadSeq(ctx context.Context, conversationID string, userID string, seq int64) error {
+ if seq%s.readSeqWriteRatio == 0 {
+ if err := s.mgo.SetUserReadSeq(ctx, conversationID, userID, seq); err != nil {
+ return err
+ }
+ }
+ if err := s.rocks.RawSet(ctx, s.getSeqUserReadSeqKey(conversationID, userID), strconv.Itoa(int(seq)), s.readExpireTime); err != nil {
+ return errs.Wrap(err)
+ }
+ return nil
+}
+
+func (s *seqUserCacheRedis) SetUserMinSeqs(ctx context.Context, userID string, seqs map[string]int64) error {
+ keys := make([]string, 0, len(seqs))
+ for conversationID, seq := range seqs {
+ if err := s.mgo.SetUserMinSeq(ctx, conversationID, userID, seq); err != nil {
+ return err
+ }
+ keys = append(keys, s.getSeqUserMinSeqKey(conversationID, userID))
+ }
+ return DeleteCacheBySlot(ctx, s.rocks, keys)
+}
+
+func (s *seqUserCacheRedis) setUserRedisReadSeqs(ctx context.Context, userID string, seqs map[string]int64) error {
+ keys := make([]string, 0, len(seqs))
+ keySeq := make(map[string]int64)
+ for conversationID, seq := range seqs {
+ key := s.getSeqUserReadSeqKey(conversationID, userID)
+ keys = append(keys, key)
+ keySeq[key] = seq
+ }
+ slotKeys, err := groupKeysBySlot(ctx, s.rdb, keys)
+ if err != nil {
+ return err
+ }
+ for _, keys := range slotKeys {
+ pipe := s.rdb.Pipeline()
+ for _, key := range keys {
+ pipe.HSet(ctx, key, "value", strconv.FormatInt(keySeq[key], 10))
+ pipe.Expire(ctx, key, s.readExpireTime)
+ }
+ if _, err := pipe.Exec(ctx); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (s *seqUserCacheRedis) SetUserReadSeqs(ctx context.Context, userID string, seqs map[string]int64) error {
+ if len(seqs) == 0 {
+ return nil
+ }
+ if err := s.setUserRedisReadSeqs(ctx, userID, seqs); err != nil {
+ return err
+ }
+ for conversationID, seq := range seqs {
+ if seq%s.readSeqWriteRatio == 0 {
+ if err := s.mgo.SetUserReadSeq(ctx, conversationID, userID, seq); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (s *seqUserCacheRedis) GetUserReadSeqs(ctx context.Context, userID string, conversationIDs []string) (map[string]int64, error) {
+ res, err := batchGetCache2(ctx, s.rocks, s.readExpireTime, conversationIDs, func(conversationID string) string {
+ return s.getSeqUserReadSeqKey(conversationID, userID)
+ }, func(v *readSeqModel) string {
+ return v.ConversationID
+ }, func(ctx context.Context, conversationIDs []string) ([]*readSeqModel, error) {
+ seqs, err := s.mgo.GetUserReadSeqs(ctx, userID, conversationIDs)
+ if err != nil {
+ return nil, err
+ }
+ res := make([]*readSeqModel, 0, len(seqs))
+ for conversationID, seq := range seqs {
+ res = append(res, &readSeqModel{ConversationID: conversationID, Seq: seq})
+ }
+ return res, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ data := make(map[string]int64)
+ for _, v := range res {
+ data[v.ConversationID] = v.Seq
+ }
+ return data, nil
+}
+
+var _ BatchCacheCallback[string] = (*readSeqModel)(nil)
+
+type readSeqModel struct {
+ ConversationID string
+ Seq int64
+}
+
+func (r *readSeqModel) BatchCache(conversationID string) {
+ r.ConversationID = conversationID
+}
+
+func (r *readSeqModel) UnmarshalJSON(bytes []byte) (err error) {
+ r.Seq, err = strconv.ParseInt(string(bytes), 10, 64)
+ return
+}
+
+func (r *readSeqModel) MarshalJSON() ([]byte, error) {
+ return []byte(strconv.FormatInt(r.Seq, 10)), nil
+}
diff --git a/pkg/common/storage/cache/redis/seq_user_test.go b/pkg/common/storage/cache/redis/seq_user_test.go
new file mode 100644
index 000000000..0059c81db
--- /dev/null
+++ b/pkg/common/storage/cache/redis/seq_user_test.go
@@ -0,0 +1,111 @@
+package redis
+
+import (
+ "context"
+ "fmt"
+ "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
+ mgo2 "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
+ "github.com/redis/go-redis/v9"
+ "go.mongodb.org/mongo-driver/mongo"
+ "go.mongodb.org/mongo-driver/mongo/options"
+ "log"
+ "strconv"
+ "sync/atomic"
+ "testing"
+ "time"
+)
+
+func newTestOnline() *userOnline {
+ opt := &redis.Options{
+ Addr: "172.16.8.48:16379",
+ Password: "openIM123",
+ DB: 0,
+ }
+ rdb := redis.NewClient(opt)
+ if err := rdb.Ping(context.Background()).Err(); err != nil {
+ panic(err)
+ }
+ return &userOnline{rdb: rdb, expire: time.Hour, channelName: "user_online"}
+}
+
+func TestOnline(t *testing.T) {
+ ts := newTestOnline()
+ var count atomic.Int64
+ for i := 0; i < 64; i++ {
+ go func(userID string) {
+ var err error
+ for i := 0; ; i++ {
+ if i%2 == 0 {
+ err = ts.SetUserOnline(context.Background(), userID, []int32{5, 6}, []int32{7, 8, 9})
+ } else {
+ err = ts.SetUserOnline(context.Background(), userID, []int32{1, 2, 3}, []int32{4, 5, 6})
+ }
+ if err != nil {
+ panic(err)
+ }
+ count.Add(1)
+ }
+ }(strconv.Itoa(10000 + i))
+ }
+
+ ticker := time.NewTicker(time.Second)
+ for range ticker.C {
+ t.Log(count.Swap(0))
+ }
+}
+
+func TestGetOnline(t *testing.T) {
+ ts := newTestOnline()
+ ctx := context.Background()
+ pIDs, err := ts.GetOnline(ctx, "10000")
+ if err != nil {
+ panic(err)
+ }
+ t.Log(pIDs)
+}
+
+func TestRecvOnline(t *testing.T) {
+ ts := newTestOnline()
+ ctx := context.Background()
+ pubsub := ts.rdb.Subscribe(ctx, cachekey.OnlineChannel)
+
+ _, err := pubsub.Receive(ctx)
+ if err != nil {
+ log.Fatalf("Could not subscribe: %v", err)
+ }
+
+ ch := pubsub.Channel()
+
+ for msg := range ch {
+ fmt.Printf("Received message from channel %s: %s\n", msg.Channel, msg.Payload)
+ }
+}
+
+func TestName1(t *testing.T) {
+ opt := &redis.Options{
+ Addr: "172.16.8.48:16379",
+ Password: "openIM123",
+ DB: 0,
+ }
+ rdb := redis.NewClient(opt)
+
+ mgo, err := mongo.Connect(context.Background(),
+ options.Client().
+ ApplyURI("mongodb://openIM:openIM123@172.16.8.48:37017/openim_v3?maxPoolSize=100").
+ SetConnectTimeout(5*time.Second))
+ if err != nil {
+ panic(err)
+ }
+ model, err := mgo2.NewSeqUserMongo(mgo.Database("openim_v3"))
+ if err != nil {
+ panic(err)
+ }
+ seq := NewSeqUserCacheRedis(rdb, model)
+
+ res, err := seq.GetUserReadSeqs(context.Background(), "2110910952", []string{"sg_345762580", "2000", "3000"})
+ if err != nil {
+ panic(err)
+ }
+ t.Log(res)
+
+}
diff --git a/pkg/common/storage/cache/redis/token.go b/pkg/common/storage/cache/redis/token.go
index 6098a666c..b82259658 100644
--- a/pkg/common/storage/cache/redis/token.go
+++ b/pkg/common/storage/cache/redis/token.go
@@ -21,22 +21,36 @@ import (
"github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/utils/stringutil"
"github.com/redis/go-redis/v9"
+ "time"
)
type tokenCache struct {
- rdb redis.UniversalClient
+ rdb redis.UniversalClient
+ accessExpire time.Duration
}
-func NewTokenCacheModel(rdb redis.UniversalClient) cache.TokenModel {
- return &tokenCache{
- rdb: rdb,
- }
+func NewTokenCacheModel(rdb redis.UniversalClient, accessExpire int64) cache.TokenModel {
+ c := &tokenCache{rdb: rdb}
+ c.accessExpire = c.getExpireTime(accessExpire)
+ return c
}
-func (c *tokenCache) AddTokenFlag(ctx context.Context, userID string, platformID int, token string, flag int) error {
+func (c *tokenCache) SetTokenFlag(ctx context.Context, userID string, platformID int, token string, flag int) error {
return errs.Wrap(c.rdb.HSet(ctx, cachekey.GetTokenKey(userID, platformID), token, flag).Err())
}
+// SetTokenFlagEx set token and flag with expire time
+func (c *tokenCache) SetTokenFlagEx(ctx context.Context, userID string, platformID int, token string, flag int) error {
+ key := cachekey.GetTokenKey(userID, platformID)
+ if err := c.rdb.HSet(ctx, key, token, flag).Err(); err != nil {
+ return errs.Wrap(err)
+ }
+ if err := c.rdb.Expire(ctx, key, c.accessExpire).Err(); err != nil {
+ return errs.Wrap(err)
+ }
+ return nil
+}
+
func (c *tokenCache) GetTokensWithoutError(ctx context.Context, userID string, platformID int) (map[string]int, error) {
m, err := c.rdb.HGetAll(ctx, cachekey.GetTokenKey(userID, platformID)).Result()
if err != nil {
@@ -61,3 +75,7 @@ func (c *tokenCache) SetTokenMapByUidPid(ctx context.Context, userID string, pla
func (c *tokenCache) DeleteTokenByUidPid(ctx context.Context, userID string, platformID int, fields []string) error {
return errs.Wrap(c.rdb.HDel(ctx, cachekey.GetTokenKey(userID, platformID), fields...).Err())
}
+
+func (c *tokenCache) getExpireTime(t int64) time.Duration {
+ return time.Hour * 24 * time.Duration(t)
+}
diff --git a/pkg/common/storage/cache/redis/user.go b/pkg/common/storage/cache/redis/user.go
index 3de01563b..f6b490730 100644
--- a/pkg/common/storage/cache/redis/user.go
+++ b/pkg/common/storage/cache/redis/user.go
@@ -16,21 +16,14 @@ package redis
import (
"context"
- "encoding/json"
- "errors"
"github.com/dtm-labs/rockscache"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
- "github.com/openimsdk/protocol/constant"
- "github.com/openimsdk/protocol/user"
- "github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log"
"github.com/redis/go-redis/v9"
- "hash/crc32"
- "strconv"
"time"
)
@@ -61,8 +54,8 @@ func NewUserCacheRedis(rdb redis.UniversalClient, localCache *config.LocalCache,
}
}
-func (u *UserCacheRedis) getOnlineStatusKey(modKey string) string {
- return cachekey.GetOnlineStatusKey(modKey)
+func (u *UserCacheRedis) getUserID(user *model.User) string {
+ return user.UserID
}
func (u *UserCacheRedis) CloneUserCache() cache.UserCache {
@@ -90,11 +83,7 @@ func (u *UserCacheRedis) GetUserInfo(ctx context.Context, userID string) (userIn
}
func (u *UserCacheRedis) GetUsersInfo(ctx context.Context, userIDs []string) ([]*model.User, error) {
- return batchGetCache(ctx, u.rcClient, u.expireTime, userIDs, func(userID string) string {
- return u.getUserInfoKey(userID)
- }, func(ctx context.Context, userID string) (*model.User, error) {
- return u.userDB.Take(ctx, userID)
- })
+ return batchGetCache2(ctx, u.rcClient, u.expireTime, userIDs, u.getUserInfoKey, u.getUserID, u.userDB.Find)
}
func (u *UserCacheRedis) DelUsersInfo(userIDs ...string) cache.UserCache {
@@ -130,174 +119,3 @@ func (u *UserCacheRedis) DelUsersGlobalRecvMsgOpt(userIDs ...string) cache.UserC
return cache
}
-
-// GetUserStatus get user status.
-func (u *UserCacheRedis) GetUserStatus(ctx context.Context, userIDs []string) ([]*user.OnlineStatus, error) {
- userStatus := make([]*user.OnlineStatus, 0, len(userIDs))
- for _, userID := range userIDs {
- UserIDNum := crc32.ChecksumIEEE([]byte(userID))
- modKey := strconv.Itoa(int(UserIDNum % statusMod))
- var onlineStatus user.OnlineStatus
- key := u.getOnlineStatusKey(modKey)
- result, err := u.rdb.HGet(ctx, key, userID).Result()
- if err != nil {
- if errors.Is(err, redis.Nil) {
- // key or field does not exist
- userStatus = append(userStatus, &user.OnlineStatus{
- UserID: userID,
- Status: constant.Offline,
- PlatformIDs: nil,
- })
-
- continue
- } else {
- return nil, errs.Wrap(err)
- }
- }
- err = json.Unmarshal([]byte(result), &onlineStatus)
- if err != nil {
- return nil, errs.Wrap(err)
- }
- onlineStatus.UserID = userID
- onlineStatus.Status = constant.Online
- userStatus = append(userStatus, &onlineStatus)
- }
-
- return userStatus, nil
-}
-
-// SetUserStatus Set the user status and save it in redis.
-func (u *UserCacheRedis) SetUserStatus(ctx context.Context, userID string, status, platformID int32) error {
- UserIDNum := crc32.ChecksumIEEE([]byte(userID))
- modKey := strconv.Itoa(int(UserIDNum % statusMod))
- key := u.getOnlineStatusKey(modKey)
- log.ZDebug(ctx, "SetUserStatus args", "userID", userID, "status", status, "platformID", platformID, "modKey", modKey, "key", key)
- isNewKey, err := u.rdb.Exists(ctx, key).Result()
- if err != nil {
- return errs.Wrap(err)
- }
- if isNewKey == 0 {
- if status == constant.Online {
- onlineStatus := user.OnlineStatus{
- UserID: userID,
- Status: constant.Online,
- PlatformIDs: []int32{platformID},
- }
- jsonData, err := json.Marshal(&onlineStatus)
- if err != nil {
- return errs.Wrap(err)
- }
- _, err = u.rdb.HSet(ctx, key, userID, string(jsonData)).Result()
- if err != nil {
- return errs.Wrap(err)
- }
- u.rdb.Expire(ctx, key, userOlineStatusExpireTime)
-
- return nil
- }
- }
-
- isNil := false
- result, err := u.rdb.HGet(ctx, key, userID).Result()
- if err != nil {
- if errors.Is(err, redis.Nil) {
- isNil = true
- } else {
- return errs.Wrap(err)
- }
- }
-
- if status == constant.Offline {
- err = u.refreshStatusOffline(ctx, userID, status, platformID, isNil, err, result, key)
- if err != nil {
- return err
- }
- } else {
- err = u.refreshStatusOnline(ctx, userID, platformID, isNil, err, result, key)
- if err != nil {
- return errs.Wrap(err)
- }
- }
-
- return nil
-}
-
-func (u *UserCacheRedis) refreshStatusOffline(ctx context.Context, userID string, status, platformID int32, isNil bool, err error, result, key string) error {
- if isNil {
- log.ZWarn(ctx, "this user not online,maybe trigger order not right",
- err, "userStatus", status)
-
- return nil
- }
- var onlineStatus user.OnlineStatus
- err = json.Unmarshal([]byte(result), &onlineStatus)
- if err != nil {
- return errs.Wrap(err)
- }
- var newPlatformIDs []int32
- for _, val := range onlineStatus.PlatformIDs {
- if val != platformID {
- newPlatformIDs = append(newPlatformIDs, val)
- }
- }
- if newPlatformIDs == nil {
- _, err = u.rdb.HDel(ctx, key, userID).Result()
- if err != nil {
- return errs.Wrap(err)
- }
- } else {
- onlineStatus.PlatformIDs = newPlatformIDs
- newjsonData, err := json.Marshal(&onlineStatus)
- if err != nil {
- return errs.Wrap(err)
- }
- _, err = u.rdb.HSet(ctx, key, userID, string(newjsonData)).Result()
- if err != nil {
- return errs.Wrap(err)
- }
- }
-
- return nil
-}
-
-func (u *UserCacheRedis) refreshStatusOnline(ctx context.Context, userID string, platformID int32, isNil bool, err error, result, key string) error {
- var onlineStatus user.OnlineStatus
- if !isNil {
- err := json.Unmarshal([]byte(result), &onlineStatus)
- if err != nil {
- return errs.Wrap(err)
- }
- onlineStatus.PlatformIDs = RemoveRepeatedElementsInList(append(onlineStatus.PlatformIDs, platformID))
- } else {
- onlineStatus.PlatformIDs = append(onlineStatus.PlatformIDs, platformID)
- }
- onlineStatus.Status = constant.Online
- onlineStatus.UserID = userID
- newjsonData, err := json.Marshal(&onlineStatus)
- if err != nil {
- return errs.WrapMsg(err, "json.Marshal failed")
- }
- _, err = u.rdb.HSet(ctx, key, userID, string(newjsonData)).Result()
- if err != nil {
- return errs.Wrap(err)
- }
-
- return nil
-}
-
-type Comparable interface {
- ~int | ~string | ~float64 | ~int32
-}
-
-func RemoveRepeatedElementsInList[T Comparable](slc []T) []T {
- var result []T
- tempMap := map[T]struct{}{}
- for _, e := range slc {
- if _, found := tempMap[e]; !found {
- tempMap[e] = struct{}{}
- result = append(result, e)
- }
- }
-
- return result
-}
diff --git a/pkg/common/storage/cache/seq.go b/pkg/common/storage/cache/seq.go
deleted file mode 100644
index 091b318c8..000000000
--- a/pkg/common/storage/cache/seq.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package cache
-
-import (
- "context"
-)
-
-type SeqCache interface {
- SetMaxSeq(ctx context.Context, conversationID string, maxSeq int64) error
- GetMaxSeqs(ctx context.Context, conversationIDs []string) (map[string]int64, error)
- GetMaxSeq(ctx context.Context, conversationID string) (int64, error)
- SetMinSeq(ctx context.Context, conversationID string, minSeq int64) error
- SetMinSeqs(ctx context.Context, seqs map[string]int64) error
- GetMinSeqs(ctx context.Context, conversationIDs []string) (map[string]int64, error)
- GetMinSeq(ctx context.Context, conversationID string) (int64, error)
- GetConversationUserMinSeq(ctx context.Context, conversationID string, userID string) (int64, error)
- GetConversationUserMinSeqs(ctx context.Context, conversationID string, userIDs []string) (map[string]int64, error)
- SetConversationUserMinSeq(ctx context.Context, conversationID string, userID string, minSeq int64) error
- // seqs map: key userID value minSeq
- SetConversationUserMinSeqs(ctx context.Context, conversationID string, seqs map[string]int64) (err error)
- // seqs map: key conversationID value minSeq
- SetUserConversationsMinSeqs(ctx context.Context, userID string, seqs map[string]int64) error
- // has read seq
- SetHasReadSeq(ctx context.Context, userID string, conversationID string, hasReadSeq int64) error
- // k: user, v: seq
- SetHasReadSeqs(ctx context.Context, conversationID string, hasReadSeqs map[string]int64) error
- // k: conversation, v :seq
- UserSetHasReadSeqs(ctx context.Context, userID string, hasReadSeqs map[string]int64) error
- GetHasReadSeqs(ctx context.Context, userID string, conversationIDs []string) (map[string]int64, error)
- GetHasReadSeq(ctx context.Context, userID string, conversationID string) (int64, error)
-}
diff --git a/pkg/common/storage/cache/seq_conversation.go b/pkg/common/storage/cache/seq_conversation.go
new file mode 100644
index 000000000..2c893a5e8
--- /dev/null
+++ b/pkg/common/storage/cache/seq_conversation.go
@@ -0,0 +1,12 @@
+package cache
+
+import "context"
+
+type SeqConversationCache interface {
+ Malloc(ctx context.Context, conversationID string, size int64) (int64, error)
+ GetMaxSeq(ctx context.Context, conversationID string) (int64, error)
+ SetMinSeq(ctx context.Context, conversationID string, seq int64) error
+ GetMinSeq(ctx context.Context, conversationID string) (int64, error)
+ GetMaxSeqs(ctx context.Context, conversationIDs []string) (map[string]int64, error)
+ SetMinSeqs(ctx context.Context, seqs map[string]int64) error
+}
diff --git a/pkg/common/storage/cache/seq_user.go b/pkg/common/storage/cache/seq_user.go
new file mode 100644
index 000000000..61dbc0ab4
--- /dev/null
+++ b/pkg/common/storage/cache/seq_user.go
@@ -0,0 +1,15 @@
+package cache
+
+import "context"
+
+type SeqUser interface {
+ GetUserMaxSeq(ctx context.Context, conversationID string, userID string) (int64, error)
+ SetUserMaxSeq(ctx context.Context, conversationID string, userID string, seq int64) error
+ GetUserMinSeq(ctx context.Context, conversationID string, userID string) (int64, error)
+ SetUserMinSeq(ctx context.Context, conversationID string, userID string, seq int64) error
+ GetUserReadSeq(ctx context.Context, conversationID string, userID string) (int64, error)
+ SetUserReadSeq(ctx context.Context, conversationID string, userID string, seq int64) error
+ SetUserMinSeqs(ctx context.Context, userID string, seqs map[string]int64) error
+ SetUserReadSeqs(ctx context.Context, userID string, seqs map[string]int64) error
+ GetUserReadSeqs(ctx context.Context, userID string, conversationIDs []string) (map[string]int64, error)
+}
diff --git a/pkg/common/storage/cache/token.go b/pkg/common/storage/cache/token.go
index 55b3321ef..4a0fee087 100644
--- a/pkg/common/storage/cache/token.go
+++ b/pkg/common/storage/cache/token.go
@@ -5,7 +5,9 @@ import (
)
type TokenModel interface {
- AddTokenFlag(ctx context.Context, userID string, platformID int, token string, flag int) error
+ SetTokenFlag(ctx context.Context, userID string, platformID int, token string, flag int) error
+ // SetTokenFlagEx set token and flag with expire time
+ SetTokenFlagEx(ctx context.Context, userID string, platformID int, token string, flag int) error
GetTokensWithoutError(ctx context.Context, userID string, platformID int) (map[string]int, error)
SetTokenMapByUidPid(ctx context.Context, userID string, platformID int, m map[string]int) error
DeleteTokenByUidPid(ctx context.Context, userID string, platformID int, fields []string) error
diff --git a/pkg/common/storage/cache/user.go b/pkg/common/storage/cache/user.go
index 5101c0b6c..69a11635c 100644
--- a/pkg/common/storage/cache/user.go
+++ b/pkg/common/storage/cache/user.go
@@ -17,7 +17,6 @@ package cache
import (
"context"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
- "github.com/openimsdk/protocol/user"
)
type UserCache interface {
@@ -28,6 +27,6 @@ type UserCache interface {
DelUsersInfo(userIDs ...string) UserCache
GetUserGlobalRecvMsgOpt(ctx context.Context, userID string) (opt int, err error)
DelUsersGlobalRecvMsgOpt(userIDs ...string) UserCache
- GetUserStatus(ctx context.Context, userIDs []string) ([]*user.OnlineStatus, error)
- SetUserStatus(ctx context.Context, userID string, status, platformID int32) error
+ //GetUserStatus(ctx context.Context, userIDs []string) ([]*user.OnlineStatus, error)
+ //SetUserStatus(ctx context.Context, userID string, status, platformID int32) error
}
diff --git a/pkg/common/storage/controller/auth.go b/pkg/common/storage/controller/auth.go
index 321583743..b725513d9 100644
--- a/pkg/common/storage/controller/auth.go
+++ b/pkg/common/storage/controller/auth.go
@@ -79,5 +79,9 @@ func (a *authDatabase) CreateToken(ctx context.Context, userID string, platformI
if err != nil {
return "", errs.WrapMsg(err, "token.SignedString")
}
- return tokenString, a.cache.AddTokenFlag(ctx, userID, platformID, tokenString, constant.NormalToken)
+
+ if err = a.cache.SetTokenFlagEx(ctx, userID, platformID, tokenString, constant.NormalToken); err != nil {
+ return "", err
+ }
+ return tokenString, nil
}
diff --git a/pkg/common/storage/controller/conversation.go b/pkg/common/storage/controller/conversation.go
index 18ef3f8ba..c804d1cc5 100644
--- a/pkg/common/storage/controller/conversation.go
+++ b/pkg/common/storage/controller/conversation.go
@@ -66,6 +66,9 @@ type ConversationDatabase interface {
GetConversationNotReceiveMessageUserIDs(ctx context.Context, conversationID string) ([]string, error)
// GetUserAllHasReadSeqs(ctx context.Context, ownerUserID string) (map[string]int64, error)
// FindRecvMsgNotNotifyUserIDs(ctx context.Context, groupID string) ([]string, error)
+ FindConversationUserVersion(ctx context.Context, userID string, version uint, limit int) (*relationtb.VersionLog, error)
+ FindMaxConversationUserVersionCache(ctx context.Context, userID string) (*relationtb.VersionLog, error)
+ GetOwnerConversation(ctx context.Context, ownerUserID string, pagination pagination.Pagination) (int64, []*relationtb.Conversation, error)
}
func NewConversationDatabase(conversation database.Conversation, cache cache.ConversationCache, tx tx.Tx) ConversationDatabase {
@@ -106,6 +109,7 @@ func (c *conversationDatabase) SetUsersConversationFieldTx(ctx context.Context,
if _, ok := fieldMap["recv_msg_opt"]; ok {
cache = cache.DelConversationNotReceiveMessageUserIDs(conversation.ConversationID)
}
+ cache = cache.DelConversationVersionUserIDs(haveUserIDs...)
}
NotUserIDs := stringutil.DifferenceString(haveUserIDs, userIDs)
log.ZDebug(ctx, "SetUsersConversationFieldTx", "NotUserIDs", NotUserIDs, "haveUserIDs", haveUserIDs, "userIDs", userIDs)
@@ -137,7 +141,7 @@ func (c *conversationDatabase) UpdateUsersConversationField(ctx context.Context,
return err
}
cache := c.cache.CloneConversationCache()
- cache = cache.DelUsersConversation(conversationID, userIDs...)
+ cache = cache.DelUsersConversation(conversationID, userIDs...).DelConversationVersionUserIDs(userIDs...)
if _, ok := args["recv_msg_opt"]; ok {
cache = cache.DelConversationNotReceiveMessageUserIDs(conversationID)
}
@@ -155,13 +159,14 @@ func (c *conversationDatabase) CreateConversation(ctx context.Context, conversat
cache = cache.DelConversationNotReceiveMessageUserIDs(conversation.ConversationID)
userIDs = append(userIDs, conversation.OwnerUserID)
}
- return cache.DelConversationIDs(userIDs...).DelUserConversationIDsHash(userIDs...).ChainExecDel(ctx)
+ return cache.DelConversationIDs(userIDs...).DelUserConversationIDsHash(userIDs...).DelConversationVersionUserIDs(userIDs...).ChainExecDel(ctx)
}
func (c *conversationDatabase) SyncPeerUserPrivateConversationTx(ctx context.Context, conversations []*relationtb.Conversation) error {
return c.tx.Transaction(ctx, func(ctx context.Context) error {
cache := c.cache.CloneConversationCache()
for _, conversation := range conversations {
+ cache = cache.DelConversationVersionUserIDs(conversation.OwnerUserID)
for _, v := range [][2]string{{conversation.OwnerUserID, conversation.UserID}, {conversation.UserID, conversation.OwnerUserID}} {
ownerUserID := v[0]
userID := v[1]
@@ -207,6 +212,7 @@ func (c *conversationDatabase) GetUserAllConversation(ctx context.Context, owner
func (c *conversationDatabase) SetUserConversations(ctx context.Context, ownerUserID string, conversations []*relationtb.Conversation) error {
return c.tx.Transaction(ctx, func(ctx context.Context) error {
cache := c.cache.CloneConversationCache()
+ cache = cache.DelConversationVersionUserIDs(ownerUserID)
groupIDs := datautil.Distinct(datautil.Filter(conversations, func(e *relationtb.Conversation) (string, bool) {
return e.GroupID, e.GroupID != ""
}))
@@ -322,3 +328,28 @@ func (c *conversationDatabase) GetConversationIDsNeedDestruct(ctx context.Contex
func (c *conversationDatabase) GetConversationNotReceiveMessageUserIDs(ctx context.Context, conversationID string) ([]string, error) {
return c.cache.GetConversationNotReceiveMessageUserIDs(ctx, conversationID)
}
+
+func (c *conversationDatabase) FindConversationUserVersion(ctx context.Context, userID string, version uint, limit int) (*relationtb.VersionLog, error) {
+ return c.conversationDB.FindConversationUserVersion(ctx, userID, version, limit)
+}
+
+func (c *conversationDatabase) FindMaxConversationUserVersionCache(ctx context.Context, userID string) (*relationtb.VersionLog, error) {
+ return c.cache.FindMaxConversationUserVersion(ctx, userID)
+}
+
+func (c *conversationDatabase) GetOwnerConversation(ctx context.Context, ownerUserID string, pagination pagination.Pagination) (int64, []*relationtb.Conversation, error) {
+ conversationIDs, err := c.cache.GetUserConversationIDs(ctx, ownerUserID)
+ if err != nil {
+ return 0, nil, err
+ }
+ findConversationIDs := datautil.Paginate(conversationIDs, int(pagination.GetPageNumber()), int(pagination.GetShowNumber()))
+ conversations := make([]*relationtb.Conversation, 0, len(findConversationIDs))
+ for _, conversationID := range findConversationIDs {
+ conversation, err := c.cache.GetConversation(ctx, ownerUserID, conversationID)
+ if err != nil {
+ return 0, nil, err
+ }
+ conversations = append(conversations, conversation)
+ }
+ return int64(len(conversationIDs)), conversations, nil
+}
diff --git a/pkg/common/storage/controller/friend.go b/pkg/common/storage/controller/friend.go
index 1c3d9f139..e402f5980 100644
--- a/pkg/common/storage/controller/friend.go
+++ b/pkg/common/storage/controller/friend.go
@@ -77,6 +77,16 @@ type FriendDatabase interface {
// UpdateFriends updates fields for friends
UpdateFriends(ctx context.Context, ownerUserID string, friendUserIDs []string, val map[string]any) (err error)
+
+ //FindSortFriendUserIDs(ctx context.Context, ownerUserID string) ([]string, error)
+
+ FindFriendIncrVersion(ctx context.Context, ownerUserID string, version uint, limit int) (*model.VersionLog, error)
+
+ FindMaxFriendVersionCache(ctx context.Context, ownerUserID string) (*model.VersionLog, error)
+
+ FindFriendUserID(ctx context.Context, friendUserID string) ([]string, error)
+
+ OwnerIncrVersion(ctx context.Context, ownerUserID string, friendUserIDs []string, state int32) error
}
type friendDatabase struct {
@@ -175,7 +185,7 @@ func (f *friendDatabase) BecomeFriends(ctx context.Context, ownerUserID string,
return err
}
newFriendIDs = append(newFriendIDs, ownerUserID)
- cache = cache.DelFriendIDs(newFriendIDs...)
+ cache = cache.DelFriendIDs(newFriendIDs...).DelMaxFriendVersion(newFriendIDs...)
return cache.ChainExecDel(ctx)
})
@@ -278,7 +288,7 @@ func (f *friendDatabase) AgreeFriendRequest(ctx context.Context, friendRequest *
return err
}
}
- return f.cache.DelFriendIDs(friendRequest.ToUserID, friendRequest.FromUserID).ChainExecDel(ctx)
+ return f.cache.DelFriendIDs(friendRequest.ToUserID, friendRequest.FromUserID).DelMaxFriendVersion(friendRequest.ToUserID, friendRequest.FromUserID).ChainExecDel(ctx)
})
}
@@ -287,7 +297,8 @@ func (f *friendDatabase) Delete(ctx context.Context, ownerUserID string, friendU
if err := f.friend.Delete(ctx, ownerUserID, friendUserIDs); err != nil {
return err
}
- return f.cache.DelFriendIDs(append(friendUserIDs, ownerUserID)...).ChainExecDel(ctx)
+ userIds := append(friendUserIDs, ownerUserID)
+ return f.cache.DelFriendIDs(userIds...).DelMaxFriendVersion(userIds...).ChainExecDel(ctx)
}
// UpdateRemark updates the remark for a friend. Zero value for remark is also supported.
@@ -295,7 +306,7 @@ func (f *friendDatabase) UpdateRemark(ctx context.Context, ownerUserID, friendUs
if err := f.friend.UpdateRemark(ctx, ownerUserID, friendUserID, remark); err != nil {
return err
}
- return f.cache.DelFriend(ownerUserID, friendUserID).ChainExecDel(ctx)
+ return f.cache.DelFriend(ownerUserID, friendUserID).DelMaxFriendVersion(ownerUserID).ChainExecDel(ctx)
}
// PageOwnerFriends retrieves the list of friends for the ownerUserID. It does not return an error if the result is empty.
@@ -324,9 +335,6 @@ func (f *friendDatabase) FindFriendsWithError(ctx context.Context, ownerUserID s
if err != nil {
return
}
- if len(friends) != len(friendUserIDs) {
- err = errs.ErrRecordNotFound.Wrap()
- }
return
}
@@ -341,8 +349,37 @@ func (f *friendDatabase) UpdateFriends(ctx context.Context, ownerUserID string,
if len(val) == 0 {
return nil
}
- if err := f.friend.UpdateFriends(ctx, ownerUserID, friendUserIDs, val); err != nil {
+ return f.tx.Transaction(ctx, func(ctx context.Context) error {
+ if err := f.friend.UpdateFriends(ctx, ownerUserID, friendUserIDs, val); err != nil {
+ return err
+ }
+ return f.cache.DelFriends(ownerUserID, friendUserIDs).DelMaxFriendVersion(ownerUserID).ChainExecDel(ctx)
+ })
+}
+
+//func (f *friendDatabase) FindSortFriendUserIDs(ctx context.Context, ownerUserID string) ([]string, error) {
+// return f.cache.FindSortFriendUserIDs(ctx, ownerUserID)
+//}
+
+func (f *friendDatabase) FindFriendIncrVersion(ctx context.Context, ownerUserID string, version uint, limit int) (*model.VersionLog, error) {
+ return f.friend.FindIncrVersion(ctx, ownerUserID, version, limit)
+}
+
+func (f *friendDatabase) FindMaxFriendVersionCache(ctx context.Context, ownerUserID string) (*model.VersionLog, error) {
+ return f.cache.FindMaxFriendVersion(ctx, ownerUserID)
+}
+
+func (f *friendDatabase) FindFriendUserID(ctx context.Context, friendUserID string) ([]string, error) {
+ return f.friend.FindFriendUserID(ctx, friendUserID)
+}
+
+//func (f *friendDatabase) SearchFriend(ctx context.Context, ownerUserID, keyword string, pagination pagination.Pagination) (int64, []*model.Friend, error) {
+// return f.friend.SearchFriend(ctx, ownerUserID, keyword, pagination)
+//}
+
+func (f *friendDatabase) OwnerIncrVersion(ctx context.Context, ownerUserID string, friendUserIDs []string, state int32) error {
+ if err := f.friend.IncrVersion(ctx, ownerUserID, friendUserIDs, state); err != nil {
return err
}
- return f.cache.DelFriends(ownerUserID, friendUserIDs).ChainExecDel(ctx)
+ return f.cache.DelMaxFriendVersion(ownerUserID).ChainExecDel(ctx)
}
diff --git a/pkg/common/storage/controller/group.go b/pkg/common/storage/controller/group.go
index f2a135835..072429ed0 100644
--- a/pkg/common/storage/controller/group.go
+++ b/pkg/common/storage/controller/group.go
@@ -16,17 +16,19 @@ package controller
import (
"context"
+ "time"
+
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
redis2 "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/common"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
- "time"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/protocol/constant"
"github.com/openimsdk/tools/db/pagination"
"github.com/openimsdk/tools/db/tx"
+ "github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/utils/datautil"
"github.com/redis/go-redis/v9"
)
@@ -106,6 +108,22 @@ type GroupDatabase interface {
CountRangeEverydayTotal(ctx context.Context, start time.Time, end time.Time) (map[string]int64, error)
// DeleteGroupMemberHash deletes the hash entries for group members in specified groups.
DeleteGroupMemberHash(ctx context.Context, groupIDs []string) error
+
+ FindMemberIncrVersion(ctx context.Context, groupID string, version uint, limit int) (*model.VersionLog, error)
+ BatchFindMemberIncrVersion(ctx context.Context, groupIDs []string, versions []uint64, limits []int) (map[string]*model.VersionLog, error)
+ FindJoinIncrVersion(ctx context.Context, userID string, version uint, limit int) (*model.VersionLog, error)
+ MemberGroupIncrVersion(ctx context.Context, groupID string, userIDs []string, state int32) error
+
+ //FindSortGroupMemberUserIDs(ctx context.Context, groupID string) ([]string, error)
+ //FindSortJoinGroupIDs(ctx context.Context, userID string) ([]string, error)
+
+ FindMaxGroupMemberVersionCache(ctx context.Context, groupID string) (*model.VersionLog, error)
+ BatchFindMaxGroupMemberVersionCache(ctx context.Context, groupIDs []string) (map[string]*model.VersionLog, error)
+ FindMaxJoinGroupVersionCache(ctx context.Context, userID string) (*model.VersionLog, error)
+
+ SearchJoinGroup(ctx context.Context, userID string, keyword string, pagination pagination.Pagination) (int64, []*model.Group, error)
+
+ FindJoinGroupID(ctx context.Context, userID string) ([]string, error)
}
func NewGroupDatabase(
@@ -134,6 +152,10 @@ type groupDatabase struct {
cache cache.GroupCache
}
+func (g *groupDatabase) FindJoinGroupID(ctx context.Context, userID string) ([]string, error) {
+ return g.cache.GetJoinedGroupIDs(ctx, userID)
+}
+
func (g *groupDatabase) FindGroupMembers(ctx context.Context, groupID string, userIDs []string) ([]*model.GroupMember, error) {
return g.cache.GetGroupMembersInfo(ctx, groupID, userIDs)
}
@@ -174,7 +196,8 @@ func (g *groupDatabase) CreateGroup(ctx context.Context, groups []*model.Group,
DelGroupMembersHash(group.GroupID).
DelGroupsMemberNum(group.GroupID).
DelGroupMemberIDs(group.GroupID).
- DelGroupAllRoleLevel(group.GroupID)
+ DelGroupAllRoleLevel(group.GroupID).
+ DelMaxGroupMemberVersion(group.GroupID)
}
}
if len(groupMembers) > 0 {
@@ -187,7 +210,9 @@ func (g *groupDatabase) CreateGroup(ctx context.Context, groups []*model.Group,
DelGroupMemberIDs(groupMember.GroupID).
DelJoinedGroupID(groupMember.UserID).
DelGroupMembersInfo(groupMember.GroupID, groupMember.UserID).
- DelGroupAllRoleLevel(groupMember.GroupID)
+ DelGroupAllRoleLevel(groupMember.GroupID).
+ DelMaxJoinGroupVersion(groupMember.UserID).
+ DelMaxGroupMemberVersion(groupMember.GroupID)
}
}
return c.ChainExecDel(ctx)
@@ -219,10 +244,15 @@ func (g *groupDatabase) SearchGroup(ctx context.Context, keyword string, paginat
}
func (g *groupDatabase) UpdateGroup(ctx context.Context, groupID string, data map[string]any) error {
- if err := g.groupDB.UpdateMap(ctx, groupID, data); err != nil {
- return err
- }
- return g.cache.DelGroupsInfo(groupID).ChainExecDel(ctx)
+ return g.ctxTx.Transaction(ctx, func(ctx context.Context) error {
+ if err := g.groupDB.UpdateMap(ctx, groupID, data); err != nil {
+ return err
+ }
+ if err := g.groupMemberDB.MemberGroupIncrVersion(ctx, groupID, []string{""}, model.VersionStateUpdate); err != nil {
+ return err
+ }
+ return g.cache.CloneGroupCache().DelGroupsInfo(groupID).DelMaxGroupMemberVersion(groupID).ChainExecDel(ctx)
+ })
}
func (g *groupDatabase) DismissGroup(ctx context.Context, groupID string, deleteMember bool) error {
@@ -244,7 +274,19 @@ func (g *groupDatabase) DismissGroup(ctx context.Context, groupID string, delete
DelGroupsMemberNum(groupID).
DelGroupMembersHash(groupID).
DelGroupAllRoleLevel(groupID).
- DelGroupMembersInfo(groupID, userIDs...)
+ DelGroupMembersInfo(groupID, userIDs...).
+ DelMaxGroupMemberVersion(groupID).
+ DelMaxJoinGroupVersion(userIDs...)
+ for _, userID := range userIDs {
+ if err := g.groupMemberDB.JoinGroupIncrVersion(ctx, userID, []string{groupID}, model.VersionStateDelete); err != nil {
+ return err
+ }
+ }
+ } else {
+ if err := g.groupMemberDB.MemberGroupIncrVersion(ctx, groupID, []string{""}, model.VersionStateUpdate); err != nil {
+ return err
+ }
+ c = c.DelMaxGroupMemberVersion(groupID)
}
return c.DelGroupsInfo(groupID).ChainExecDel(ctx)
})
@@ -316,7 +358,9 @@ func (g *groupDatabase) HandlerGroupRequest(ctx context.Context, groupID string,
DelGroupMemberIDs(groupID).
DelGroupsMemberNum(groupID).
DelJoinedGroupID(member.UserID).
- DelGroupRoleLevel(groupID, []int32{member.RoleLevel})
+ DelGroupRoleLevel(groupID, []int32{member.RoleLevel}).
+ DelMaxJoinGroupVersion(userID).
+ DelMaxGroupMemberVersion(groupID)
if err := c.ChainExecDel(ctx); err != nil {
return err
}
@@ -326,17 +370,21 @@ func (g *groupDatabase) HandlerGroupRequest(ctx context.Context, groupID string,
}
func (g *groupDatabase) DeleteGroupMember(ctx context.Context, groupID string, userIDs []string) error {
- if err := g.groupMemberDB.Delete(ctx, groupID, userIDs); err != nil {
- return err
- }
- c := g.cache.CloneGroupCache()
- return c.DelGroupMembersHash(groupID).
- DelGroupMemberIDs(groupID).
- DelGroupsMemberNum(groupID).
- DelJoinedGroupID(userIDs...).
- DelGroupMembersInfo(groupID, userIDs...).
- DelGroupAllRoleLevel(groupID).
- ChainExecDel(ctx)
+ return g.ctxTx.Transaction(ctx, func(ctx context.Context) error {
+ if err := g.groupMemberDB.Delete(ctx, groupID, userIDs); err != nil {
+ return err
+ }
+ c := g.cache.CloneGroupCache()
+ return c.DelGroupMembersHash(groupID).
+ DelGroupMemberIDs(groupID).
+ DelGroupsMemberNum(groupID).
+ DelJoinedGroupID(userIDs...).
+ DelGroupMembersInfo(groupID, userIDs...).
+ DelGroupAllRoleLevel(groupID).
+ DelMaxGroupMemberVersion(groupID).
+ DelMaxJoinGroupVersion(userIDs...).
+ ChainExecDel(ctx)
+ })
}
func (g *groupDatabase) MapGroupMemberUserID(ctx context.Context, groupIDs []string) (map[string]*common.GroupSimpleUserID, error) {
@@ -357,29 +405,35 @@ func (g *groupDatabase) MapGroupMemberNum(ctx context.Context, groupIDs []string
func (g *groupDatabase) TransferGroupOwner(ctx context.Context, groupID string, oldOwnerUserID, newOwnerUserID string, roleLevel int32) error {
return g.ctxTx.Transaction(ctx, func(ctx context.Context) error {
- if err := g.groupMemberDB.UpdateRoleLevel(ctx, groupID, oldOwnerUserID, roleLevel); err != nil {
- return err
- }
- if err := g.groupMemberDB.UpdateRoleLevel(ctx, groupID, newOwnerUserID, constant.GroupOwner); err != nil {
+ if err := g.groupMemberDB.UpdateUserRoleLevels(ctx, groupID, oldOwnerUserID, roleLevel, newOwnerUserID, constant.GroupOwner); err != nil {
return err
}
c := g.cache.CloneGroupCache()
return c.DelGroupMembersInfo(groupID, oldOwnerUserID, newOwnerUserID).
DelGroupAllRoleLevel(groupID).
- DelGroupMembersHash(groupID).ChainExecDel(ctx)
+ DelGroupMembersHash(groupID).
+ DelMaxGroupMemberVersion(groupID).
+ DelGroupMemberIDs(groupID).
+ ChainExecDel(ctx)
})
}
func (g *groupDatabase) UpdateGroupMember(ctx context.Context, groupID string, userID string, data map[string]any) error {
- if err := g.groupMemberDB.Update(ctx, groupID, userID, data); err != nil {
- return err
- }
- c := g.cache.CloneGroupCache()
- c = c.DelGroupMembersInfo(groupID, userID)
- if g.groupMemberDB.IsUpdateRoleLevel(data) {
- c = c.DelGroupAllRoleLevel(groupID)
+ if len(data) == 0 {
+ return nil
}
- return c.ChainExecDel(ctx)
+ return g.ctxTx.Transaction(ctx, func(ctx context.Context) error {
+ if err := g.groupMemberDB.Update(ctx, groupID, userID, data); err != nil {
+ return err
+ }
+ c := g.cache.CloneGroupCache()
+ c = c.DelGroupMembersInfo(groupID, userID)
+ if g.groupMemberDB.IsUpdateRoleLevel(data) {
+ c = c.DelGroupAllRoleLevel(groupID).DelGroupMemberIDs(groupID)
+ }
+ c = c.DelMaxGroupMemberVersion(groupID)
+ return c.ChainExecDel(ctx)
+ })
}
func (g *groupDatabase) UpdateGroupMembers(ctx context.Context, data []*common.BatchUpdateGroupMember) error {
@@ -390,9 +444,9 @@ func (g *groupDatabase) UpdateGroupMembers(ctx context.Context, data []*common.B
return err
}
if g.groupMemberDB.IsUpdateRoleLevel(item.Map) {
- c = c.DelGroupAllRoleLevel(item.GroupID)
+ c = c.DelGroupAllRoleLevel(item.GroupID).DelGroupMemberIDs(item.GroupID)
}
- c = c.DelGroupMembersInfo(item.GroupID, item.UserID).DelGroupMembersHash(item.GroupID)
+ c = c.DelGroupMembersInfo(item.GroupID, item.UserID).DelMaxGroupMemberVersion(item.GroupID).DelGroupMembersHash(item.GroupID)
}
return c.ChainExecDel(ctx)
})
@@ -443,3 +497,71 @@ func (g *groupDatabase) DeleteGroupMemberHash(ctx context.Context, groupIDs []st
}
return c.ChainExecDel(ctx)
}
+
+func (g *groupDatabase) FindMemberIncrVersion(ctx context.Context, groupID string, version uint, limit int) (*model.VersionLog, error) {
+ return g.groupMemberDB.FindMemberIncrVersion(ctx, groupID, version, limit)
+}
+
+func (g *groupDatabase) BatchFindMemberIncrVersion(ctx context.Context, groupIDs []string, versions []uint64, limits []int) (map[string]*model.VersionLog, error) {
+ if len(groupIDs) == 0 {
+ return nil, errs.Wrap(errs.New("groupIDs is nil."))
+ }
+
+ // convert []uint64 to []uint
+ var uintVersions []uint
+ for _, version := range versions {
+ uintVersions = append(uintVersions, uint(version))
+ }
+
+ versionLogs, err := g.groupMemberDB.BatchFindMemberIncrVersion(ctx, groupIDs, uintVersions, limits)
+ if err != nil {
+ return nil, errs.Wrap(err)
+ }
+
+ groupMemberIncrVersionsMap := datautil.SliceToMap(versionLogs, func(e *model.VersionLog) string {
+ return e.DID
+ })
+
+ return groupMemberIncrVersionsMap, nil
+}
+
+func (g *groupDatabase) FindJoinIncrVersion(ctx context.Context, userID string, version uint, limit int) (*model.VersionLog, error) {
+ return g.groupMemberDB.FindJoinIncrVersion(ctx, userID, version, limit)
+}
+
+func (g *groupDatabase) FindMaxGroupMemberVersionCache(ctx context.Context, groupID string) (*model.VersionLog, error) {
+ return g.cache.FindMaxGroupMemberVersion(ctx, groupID)
+}
+
+func (g *groupDatabase) BatchFindMaxGroupMemberVersionCache(ctx context.Context, groupIDs []string) (map[string]*model.VersionLog, error) {
+ if len(groupIDs) == 0 {
+ return nil, errs.Wrap(errs.New("groupIDs is nil in Cache."))
+ }
+ versionLogs, err := g.cache.BatchFindMaxGroupMemberVersion(ctx, groupIDs)
+ if err != nil {
+ return nil, errs.Wrap(err)
+ }
+ maxGroupMemberVersionsMap := datautil.SliceToMap(versionLogs, func(e *model.VersionLog) string {
+ return e.DID
+ })
+ return maxGroupMemberVersionsMap, nil
+}
+
+func (g *groupDatabase) FindMaxJoinGroupVersionCache(ctx context.Context, userID string) (*model.VersionLog, error) {
+ return g.cache.FindMaxJoinGroupVersion(ctx, userID)
+}
+
+func (g *groupDatabase) SearchJoinGroup(ctx context.Context, userID string, keyword string, pagination pagination.Pagination) (int64, []*model.Group, error) {
+ groupIDs, err := g.cache.GetJoinedGroupIDs(ctx, userID)
+ if err != nil {
+ return 0, nil, err
+ }
+ return g.groupDB.SearchJoin(ctx, groupIDs, keyword, pagination)
+}
+
+func (g *groupDatabase) MemberGroupIncrVersion(ctx context.Context, groupID string, userIDs []string, state int32) error {
+ if err := g.groupMemberDB.MemberGroupIncrVersion(ctx, groupID, userIDs, state); err != nil {
+ return err
+ }
+ return g.cache.DelMaxGroupMemberVersion(groupID).ChainExecDel(ctx)
+}
diff --git a/pkg/common/storage/controller/msg.go b/pkg/common/storage/controller/msg.go
index 8eb9e8e6f..4ea74ef69 100644
--- a/pkg/common/storage/controller/msg.go
+++ b/pkg/common/storage/controller/msg.go
@@ -69,29 +69,22 @@ type CommonMsgDatabase interface {
DeleteUserMsgsBySeqs(ctx context.Context, userID string, conversationID string, seqs []int64) error
// DeleteMsgsPhysicalBySeqs physically deletes messages by emptying them based on sequence numbers.
DeleteMsgsPhysicalBySeqs(ctx context.Context, conversationID string, seqs []int64) error
- SetMaxSeq(ctx context.Context, conversationID string, maxSeq int64) error
+ //SetMaxSeq(ctx context.Context, conversationID string, maxSeq int64) error
GetMaxSeqs(ctx context.Context, conversationIDs []string) (map[string]int64, error)
GetMaxSeq(ctx context.Context, conversationID string) (int64, error)
- SetMinSeq(ctx context.Context, conversationID string, minSeq int64) error
SetMinSeqs(ctx context.Context, seqs map[string]int64) error
- GetMinSeqs(ctx context.Context, conversationIDs []string) (map[string]int64, error)
- GetMinSeq(ctx context.Context, conversationID string) (int64, error)
- GetConversationUserMinSeq(ctx context.Context, conversationID string, userID string) (int64, error)
- GetConversationUserMinSeqs(ctx context.Context, conversationID string, userIDs []string) (map[string]int64, error)
- SetConversationUserMinSeq(ctx context.Context, conversationID string, userID string, minSeq int64) error
- SetConversationUserMinSeqs(ctx context.Context, conversationID string, seqs map[string]int64) (err error)
SetUserConversationsMinSeqs(ctx context.Context, userID string, seqs map[string]int64) (err error)
SetHasReadSeq(ctx context.Context, userID string, conversationID string, hasReadSeq int64) error
GetHasReadSeqs(ctx context.Context, userID string, conversationIDs []string) (map[string]int64, error)
GetHasReadSeq(ctx context.Context, userID string, conversationID string) (int64, error)
UserSetHasReadSeqs(ctx context.Context, userID string, hasReadSeqs map[string]int64) error
- GetMongoMaxAndMinSeq(ctx context.Context, conversationID string) (minSeqMongo, maxSeqMongo int64, err error)
- GetConversationMinMaxSeqInMongoAndCache(ctx context.Context, conversationID string) (minSeqMongo, maxSeqMongo, minSeqCache, maxSeqCache int64, err error)
+ //GetMongoMaxAndMinSeq(ctx context.Context, conversationID string) (minSeqMongo, maxSeqMongo int64, err error)
+ //GetConversationMinMaxSeqInMongoAndCache(ctx context.Context, conversationID string) (minSeqMongo, maxSeqMongo, minSeqCache, maxSeqCache int64, err error)
SetSendMsgStatus(ctx context.Context, id string, status int32) error
GetSendMsgStatus(ctx context.Context, id string) (int32, error)
- SearchMessage(ctx context.Context, req *pbmsg.SearchMessageReq) (total int32, msgData []*sdkws.MsgData, err error)
+ SearchMessage(ctx context.Context, req *pbmsg.SearchMessageReq) (total int64, msgData []*sdkws.MsgData, err error)
FindOneByDocIDs(ctx context.Context, docIDs []string, seqs map[string]int64) (map[string]*sdkws.MsgData, error)
// to mq
@@ -108,7 +101,7 @@ type CommonMsgDatabase interface {
DeleteDocMsgBefore(ctx context.Context, ts int64, doc *model.MsgDocModel) ([]int, error)
}
-func NewCommonMsgDatabase(msgDocModel database.Msg, msg cache.MsgCache, seq cache.SeqCache, kafkaConf *config.Kafka) (CommonMsgDatabase, error) {
+func NewCommonMsgDatabase(msgDocModel database.Msg, msg cache.MsgCache, seqUser cache.SeqUser, seqConversation cache.SeqConversationCache, kafkaConf *config.Kafka) (CommonMsgDatabase, error) {
conf, err := kafka.BuildProducerConfig(*kafkaConf.Build())
if err != nil {
return nil, err
@@ -128,29 +121,20 @@ func NewCommonMsgDatabase(msgDocModel database.Msg, msg cache.MsgCache, seq cach
return &commonMsgDatabase{
msgDocDatabase: msgDocModel,
msg: msg,
- seq: seq,
+ seqUser: seqUser,
+ seqConversation: seqConversation,
producer: producerToRedis,
producerToMongo: producerToMongo,
producerToPush: producerToPush,
}, nil
}
-//func InitCommonMsgDatabase(rdb redis.UniversalClient, database *mongo.Database, config *tools.CronTaskConfig) (CommonMsgDatabase, error) {
-// msgDocModel, err := database.NewMsgMongo(database)
-// if err != nil {
-// return nil, err
-// }
-// //todo MsgCacheTimeout
-// msg := cache.NewMsgCache(rdb, 86400, config.RedisConfig.EnablePipeline)
-// seq := cache.NewSeqCache(rdb)
-// return NewCommonMsgDatabase(msgDocModel, msg, seq, &config.KafkaConfig)
-//}
-
type commonMsgDatabase struct {
msgDocDatabase database.Msg
msgTable model.MsgDocModel
msg cache.MsgCache
- seq cache.SeqCache
+ seqConversation cache.SeqConversationCache
+ seqUser cache.SeqUser
producer *kafka.Producer
producerToMongo *kafka.Producer
producerToPush *kafka.Producer
@@ -348,12 +332,16 @@ func (db *commonMsgDatabase) DeleteMessagesFromCache(ctx context.Context, conver
return db.msg.DeleteMessagesFromCache(ctx, conversationID, seqs)
}
-func (db *commonMsgDatabase) BatchInsertChat2Cache(ctx context.Context, conversationID string, msgs []*sdkws.MsgData) (seq int64, isNew bool, err error) {
- currentMaxSeq, err := db.seq.GetMaxSeq(ctx, conversationID)
- if err != nil && errs.Unwrap(err) != redis.Nil {
- log.ZError(ctx, "storage.seq.GetMaxSeq", err)
- return 0, false, err
+func (db *commonMsgDatabase) setHasReadSeqs(ctx context.Context, conversationID string, userSeqMap map[string]int64) error {
+ for userID, seq := range userSeqMap {
+ if err := db.seqUser.SetUserReadSeq(ctx, conversationID, userID, seq); err != nil {
+ return err
+ }
}
+ return nil
+}
+
+func (db *commonMsgDatabase) BatchInsertChat2Cache(ctx context.Context, conversationID string, msgs []*sdkws.MsgData) (seq int64, isNew bool, err error) {
lenList := len(msgs)
if int64(lenList) > db.msgTable.GetSingleGocMsgNum() {
return 0, false, errs.New("message count exceeds limit", "limit", db.msgTable.GetSingleGocMsgNum()).Wrap()
@@ -361,9 +349,12 @@ func (db *commonMsgDatabase) BatchInsertChat2Cache(ctx context.Context, conversa
if lenList < 1 {
return 0, false, errs.New("no messages to insert", "minCount", 1).Wrap()
}
- if errs.Unwrap(err) == redis.Nil {
- isNew = true
+ currentMaxSeq, err := db.seqConversation.Malloc(ctx, conversationID, int64(len(msgs)))
+ if err != nil {
+ log.ZError(ctx, "storage.seq.Malloc", err)
+ return 0, false, err
}
+ isNew = currentMaxSeq == 0
lastMaxSeq := currentMaxSeq
userSeqMap := make(map[string]int64)
for _, m := range msgs {
@@ -379,14 +370,7 @@ func (db *commonMsgDatabase) BatchInsertChat2Cache(ctx context.Context, conversa
} else {
prommetrics.MsgInsertRedisSuccessCounter.Inc()
}
-
- err = db.seq.SetMaxSeq(ctx, conversationID, currentMaxSeq)
- if err != nil {
- log.ZError(ctx, "storage.seq.SetMaxSeq error", err, "conversationID", conversationID)
- prommetrics.SeqSetFailedCounter.Inc()
- }
-
- err = db.seq.SetHasReadSeqs(ctx, conversationID, userSeqMap)
+ err = db.setHasReadSeqs(ctx, conversationID, userSeqMap)
if err != nil {
log.ZError(ctx, "SetHasReadSeqs error", err, "userSeqMap", userSeqMap, "conversationID", conversationID)
prommetrics.SeqSetFailedCounter.Inc()
@@ -514,12 +498,12 @@ func (db *commonMsgDatabase) getMsgBySeqsRange(ctx context.Context, userID strin
// "userMinSeq" can be set as the same value as the conversation's "maxSeq" at the moment they join the group.
// This ensures that their message retrieval starts from the point they joined.
func (db *commonMsgDatabase) GetMsgBySeqsRange(ctx context.Context, userID string, conversationID string, begin, end, num, userMaxSeq int64) (int64, int64, []*sdkws.MsgData, error) {
- userMinSeq, err := db.seq.GetConversationUserMinSeq(ctx, conversationID, userID)
+ userMinSeq, err := db.seqUser.GetUserMinSeq(ctx, conversationID, userID)
if err != nil && errs.Unwrap(err) != redis.Nil {
return 0, 0, nil, err
}
- minSeq, err := db.seq.GetMinSeq(ctx, conversationID)
- if err != nil && errs.Unwrap(err) != redis.Nil {
+ minSeq, err := db.seqConversation.GetMinSeq(ctx, conversationID)
+ if err != nil {
return 0, 0, nil, err
}
if userMinSeq > minSeq {
@@ -530,8 +514,8 @@ func (db *commonMsgDatabase) GetMsgBySeqsRange(ctx context.Context, userID strin
log.ZWarn(ctx, "minSeq > end", errs.New("minSeq>end"), "minSeq", minSeq, "end", end)
return 0, 0, nil, nil
}
- maxSeq, err := db.seq.GetMaxSeq(ctx, conversationID)
- if err != nil && errs.Unwrap(err) != redis.Nil {
+ maxSeq, err := db.seqConversation.GetMaxSeq(ctx, conversationID)
+ if err != nil {
return 0, 0, nil, err
}
log.ZDebug(ctx, "GetMsgBySeqsRange", "userMinSeq", userMinSeq, "conMinSeq", minSeq, "conMaxSeq", maxSeq, "userMaxSeq", userMaxSeq)
@@ -571,11 +555,8 @@ func (db *commonMsgDatabase) GetMsgBySeqsRange(ctx context.Context, userID strin
var successMsgs []*sdkws.MsgData
log.ZDebug(ctx, "GetMsgBySeqsRange", "first seqs", seqs, "newBegin", newBegin, "newEnd", newEnd)
cachedMsgs, failedSeqs, err := db.msg.GetMessagesBySeq(ctx, conversationID, seqs)
- if err != nil {
- if err != redis.Nil {
-
- log.ZError(ctx, "get message from redis exception", err, "conversationID", conversationID, "seqs", seqs)
- }
+ if err != nil && !errors.Is(err, redis.Nil) {
+ log.ZError(ctx, "get message from redis exception", err, "conversationID", conversationID, "seqs", seqs)
}
successMsgs = append(successMsgs, cachedMsgs...)
log.ZDebug(ctx, "get msgs from cache", "cachedMsgs", cachedMsgs)
@@ -595,16 +576,16 @@ func (db *commonMsgDatabase) GetMsgBySeqsRange(ctx context.Context, userID strin
}
func (db *commonMsgDatabase) GetMsgBySeqs(ctx context.Context, userID string, conversationID string, seqs []int64) (int64, int64, []*sdkws.MsgData, error) {
- userMinSeq, err := db.seq.GetConversationUserMinSeq(ctx, conversationID, userID)
+ userMinSeq, err := db.seqUser.GetUserMinSeq(ctx, conversationID, userID)
if err != nil && errs.Unwrap(err) != redis.Nil {
return 0, 0, nil, err
}
- minSeq, err := db.seq.GetMinSeq(ctx, conversationID)
- if err != nil && errs.Unwrap(err) != redis.Nil {
+ minSeq, err := db.seqConversation.GetMinSeq(ctx, conversationID)
+ if err != nil {
return 0, 0, nil, err
}
- maxSeq, err := db.seq.GetMaxSeq(ctx, conversationID)
- if err != nil && errs.Unwrap(err) != redis.Nil {
+ maxSeq, err := db.seqConversation.GetMaxSeq(ctx, conversationID)
+ if err != nil {
return 0, 0, nil, err
}
if userMinSeq < minSeq {
@@ -648,7 +629,7 @@ func (db *commonMsgDatabase) DeleteConversationMsgsAndSetMinSeq(ctx context.Cont
if minSeq == 0 {
return nil
}
- return db.seq.SetMinSeq(ctx, conversationID, minSeq)
+ return db.seqConversation.SetMinSeq(ctx, conversationID, minSeq)
}
func (db *commonMsgDatabase) UserMsgsDestruct(ctx context.Context, userID string, conversationID string, destructTime int64, lastMsgDestructTime time.Time) (seqs []int64, err error) {
@@ -693,12 +674,12 @@ func (db *commonMsgDatabase) UserMsgsDestruct(ctx context.Context, userID string
log.ZDebug(ctx, "UserMsgsDestruct", "conversationID", conversationID, "userID", userID, "seqs", seqs)
if len(seqs) > 0 {
userMinSeq := seqs[len(seqs)-1] + 1
- currentUserMinSeq, err := db.seq.GetConversationUserMinSeq(ctx, conversationID, userID)
- if err != nil && errs.Unwrap(err) != redis.Nil {
+ currentUserMinSeq, err := db.seqUser.GetUserMinSeq(ctx, conversationID, userID)
+ if err != nil {
return nil, err
}
if currentUserMinSeq < userMinSeq {
- if err := db.seq.SetConversationUserMinSeq(ctx, conversationID, userID, userMinSeq); err != nil {
+ if err := db.seqUser.SetUserMinSeq(ctx, conversationID, userID, userMinSeq); err != nil {
return nil, err
}
}
@@ -796,89 +777,40 @@ func (db *commonMsgDatabase) DeleteUserMsgsBySeqs(ctx context.Context, userID st
return nil
}
-func (db *commonMsgDatabase) DeleteMsgsBySeqs(ctx context.Context, conversationID string, seqs []int64) error {
- return nil
-}
-
-func (db *commonMsgDatabase) CleanUpUserConversationsMsgs(ctx context.Context, user string, conversationIDs []string) {
- for _, conversationID := range conversationIDs {
- maxSeq, err := db.seq.GetMaxSeq(ctx, conversationID)
- if err != nil {
- if err == redis.Nil {
- log.ZDebug(ctx, "max seq is nil", "conversationID", conversationID)
- } else {
- log.ZError(ctx, "get max seq failed", err, "conversationID", conversationID)
- }
- continue
- }
- if err := db.seq.SetMinSeq(ctx, conversationID, maxSeq+1); err != nil {
- log.ZError(ctx, "set min seq failed", err, "conversationID", conversationID, "minSeq", maxSeq+1)
- }
- }
-}
-
-func (db *commonMsgDatabase) SetMaxSeq(ctx context.Context, conversationID string, maxSeq int64) error {
- return db.seq.SetMaxSeq(ctx, conversationID, maxSeq)
-}
-
func (db *commonMsgDatabase) GetMaxSeqs(ctx context.Context, conversationIDs []string) (map[string]int64, error) {
- return db.seq.GetMaxSeqs(ctx, conversationIDs)
+ return db.seqConversation.GetMaxSeqs(ctx, conversationIDs)
}
func (db *commonMsgDatabase) GetMaxSeq(ctx context.Context, conversationID string) (int64, error) {
- return db.seq.GetMaxSeq(ctx, conversationID)
+ return db.seqConversation.GetMaxSeq(ctx, conversationID)
}
func (db *commonMsgDatabase) SetMinSeq(ctx context.Context, conversationID string, minSeq int64) error {
- return db.seq.SetMinSeq(ctx, conversationID, minSeq)
+ return db.seqConversation.SetMinSeq(ctx, conversationID, minSeq)
}
func (db *commonMsgDatabase) SetMinSeqs(ctx context.Context, seqs map[string]int64) error {
- return db.seq.SetMinSeqs(ctx, seqs)
-}
-
-func (db *commonMsgDatabase) GetMinSeqs(ctx context.Context, conversationIDs []string) (map[string]int64, error) {
- return db.seq.GetMinSeqs(ctx, conversationIDs)
-}
-
-func (db *commonMsgDatabase) GetMinSeq(ctx context.Context, conversationID string) (int64, error) {
- return db.seq.GetMinSeq(ctx, conversationID)
-}
-
-func (db *commonMsgDatabase) GetConversationUserMinSeq(ctx context.Context, conversationID string, userID string) (int64, error) {
- return db.seq.GetConversationUserMinSeq(ctx, conversationID, userID)
-}
-
-func (db *commonMsgDatabase) GetConversationUserMinSeqs(ctx context.Context, conversationID string, userIDs []string) (map[string]int64, error) {
- return db.seq.GetConversationUserMinSeqs(ctx, conversationID, userIDs)
-}
-
-func (db *commonMsgDatabase) SetConversationUserMinSeq(ctx context.Context, conversationID string, userID string, minSeq int64) error {
- return db.seq.SetConversationUserMinSeq(ctx, conversationID, userID, minSeq)
-}
-
-func (db *commonMsgDatabase) SetConversationUserMinSeqs(ctx context.Context, conversationID string, seqs map[string]int64) (err error) {
- return db.seq.SetConversationUserMinSeqs(ctx, conversationID, seqs)
+ return db.seqConversation.SetMinSeqs(ctx, seqs)
}
func (db *commonMsgDatabase) SetUserConversationsMinSeqs(ctx context.Context, userID string, seqs map[string]int64) error {
- return db.seq.SetUserConversationsMinSeqs(ctx, userID, seqs)
+ return db.seqUser.SetUserMinSeqs(ctx, userID, seqs)
}
func (db *commonMsgDatabase) UserSetHasReadSeqs(ctx context.Context, userID string, hasReadSeqs map[string]int64) error {
- return db.seq.UserSetHasReadSeqs(ctx, userID, hasReadSeqs)
+ return db.seqUser.SetUserReadSeqs(ctx, userID, hasReadSeqs)
}
func (db *commonMsgDatabase) SetHasReadSeq(ctx context.Context, userID string, conversationID string, hasReadSeq int64) error {
- return db.seq.SetHasReadSeq(ctx, userID, conversationID, hasReadSeq)
+ return db.seqUser.SetUserReadSeq(ctx, conversationID, userID, hasReadSeq)
}
func (db *commonMsgDatabase) GetHasReadSeqs(ctx context.Context, userID string, conversationIDs []string) (map[string]int64, error) {
- return db.seq.GetHasReadSeqs(ctx, userID, conversationIDs)
+ return db.seqUser.GetUserReadSeqs(ctx, userID, conversationIDs)
}
func (db *commonMsgDatabase) GetHasReadSeq(ctx context.Context, userID string, conversationID string) (int64, error) {
- return db.seq.GetHasReadSeq(ctx, userID, conversationID)
+ return db.seqUser.GetUserReadSeq(ctx, conversationID, userID)
}
func (db *commonMsgDatabase) SetSendMsgStatus(ctx context.Context, id string, status int32) error {
@@ -894,11 +826,11 @@ func (db *commonMsgDatabase) GetConversationMinMaxSeqInMongoAndCache(ctx context
if err != nil {
return
}
- minSeqCache, err = db.seq.GetMinSeq(ctx, conversationID)
+ minSeqCache, err = db.seqConversation.GetMinSeq(ctx, conversationID)
if err != nil {
return
}
- maxSeqCache, err = db.seq.GetMaxSeq(ctx, conversationID)
+ maxSeqCache, err = db.seqConversation.GetMaxSeq(ctx, conversationID)
if err != nil {
return
}
@@ -946,7 +878,7 @@ func (db *commonMsgDatabase) RangeGroupSendCount(
return db.msgDocDatabase.RangeGroupSendCount(ctx, start, end, ase, pageNumber, showNumber)
}
-func (db *commonMsgDatabase) SearchMessage(ctx context.Context, req *pbmsg.SearchMessageReq) (total int32, msgData []*sdkws.MsgData, err error) {
+func (db *commonMsgDatabase) SearchMessage(ctx context.Context, req *pbmsg.SearchMessageReq) (total int64, msgData []*sdkws.MsgData, err error) {
var totalMsgs []*sdkws.MsgData
total, msgs, err := db.msgDocDatabase.SearchMessage(ctx, req)
if err != nil {
@@ -1010,33 +942,8 @@ func (db *commonMsgDatabase) DeleteDocMsgBefore(ctx context.Context, ts int64, d
}
}
-//func (db *commonMsgDatabase) ClearMsg(ctx context.Context, ts int64) (err error) {
-// var (
-// docNum int
-// msgNum int
-// start = time.Now()
-// )
-// for {
-// msgs, err := db.msgDocDatabase.GetBeforeMsg(ctx, ts, 100)
-// if err != nil {
-// return err
-// }
-// if len(msgs) == 0 {
-// return nil
-// }
-// for _, msg := range msgs {
-// num, err := db.deleteOneMsg(ctx, ts, msg)
-// if err != nil {
-// return err
-// }
-// docNum++
-// msgNum += num
-// }
-// }
-//}
-
func (db *commonMsgDatabase) setMinSeq(ctx context.Context, conversationID string, seq int64) error {
- dbSeq, err := db.seq.GetMinSeq(ctx, conversationID)
+ dbSeq, err := db.seqConversation.GetMinSeq(ctx, conversationID)
if err != nil {
if errors.Is(errs.Unwrap(err), redis.Nil) {
return nil
@@ -1046,5 +953,5 @@ func (db *commonMsgDatabase) setMinSeq(ctx context.Context, conversationID strin
if dbSeq >= seq {
return nil
}
- return db.seq.SetMinSeq(ctx, conversationID, seq)
+ return db.seqConversation.SetMinSeq(ctx, conversationID, seq)
}
diff --git a/pkg/common/storage/controller/s3.go b/pkg/common/storage/controller/s3.go
index b0ad61203..9b56661a5 100644
--- a/pkg/common/storage/controller/s3.go
+++ b/pkg/common/storage/controller/s3.go
@@ -16,13 +16,15 @@ package controller
import (
"context"
- redis2 "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
- "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
- "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
"path/filepath"
"time"
+ redisCache "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
+ "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
+ "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
+
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
+ "github.com/openimsdk/tools/db/pagination"
"github.com/openimsdk/tools/s3"
"github.com/openimsdk/tools/s3/cont"
"github.com/redis/go-redis/v9"
@@ -38,20 +40,27 @@ type S3Database interface {
SetObject(ctx context.Context, info *model.Object) error
StatObject(ctx context.Context, name string) (*s3.ObjectInfo, error)
FormData(ctx context.Context, name string, size int64, contentType string, duration time.Duration) (*s3.FormData, error)
+ FindByExpires(ctx context.Context, duration time.Time, pagination pagination.Pagination) (total int64, objects []*model.Object, err error)
+ DeleteObject(ctx context.Context, name string) error
+ DeleteSpecifiedData(ctx context.Context, engine string, name string) error
+ FindNotDelByS3(ctx context.Context, key string, duration time.Time) (int64, error)
+ DelS3Key(ctx context.Context, engine string, keys ...string) error
}
func NewS3Database(rdb redis.UniversalClient, s3 s3.Interface, obj database.ObjectInfo) S3Database {
return &s3Database{
- s3: cont.New(redis2.NewS3Cache(rdb, s3), s3),
- cache: redis2.NewObjectCacheRedis(rdb, obj),
- db: obj,
+ s3: cont.New(redisCache.NewS3Cache(rdb, s3), s3),
+ cache: redisCache.NewObjectCacheRedis(rdb, obj),
+ s3cache: redisCache.NewS3Cache(rdb, s3),
+ db: obj,
}
}
type s3Database struct {
- s3 *cont.Controller
- cache cache.ObjectCache
- db database.ObjectInfo
+ s3 *cont.Controller
+ cache cache.ObjectCache
+ s3cache cont.S3Cache
+ db database.ObjectInfo
}
func (s *s3Database) PartSize(ctx context.Context, size int64) (int64, error) {
@@ -111,3 +120,22 @@ func (s *s3Database) StatObject(ctx context.Context, name string) (*s3.ObjectInf
func (s *s3Database) FormData(ctx context.Context, name string, size int64, contentType string, duration time.Duration) (*s3.FormData, error) {
return s.s3.FormData(ctx, name, size, contentType, duration)
}
+func (s *s3Database) FindByExpires(ctx context.Context, duration time.Time, pagination pagination.Pagination) (total int64, objects []*model.Object, err error) {
+
+ return s.db.FindByExpires(ctx, duration, pagination)
+}
+
+func (s *s3Database) DeleteObject(ctx context.Context, name string) error {
+ return s.s3.DeleteObject(ctx, name)
+}
+func (s *s3Database) DeleteSpecifiedData(ctx context.Context, engine string, name string) error {
+ return s.db.Delete(ctx, engine, name)
+}
+
+func (s *s3Database) FindNotDelByS3(ctx context.Context, key string, duration time.Time) (int64, error) {
+ return s.db.FindNotDelByS3(ctx, key, duration)
+}
+
+func (s *s3Database) DelS3Key(ctx context.Context, engine string, keys ...string) error {
+ return s.s3cache.DelS3Key(ctx, engine, keys...)
+}
diff --git a/pkg/common/storage/controller/third.go b/pkg/common/storage/controller/third.go
index 344501466..a9c2ae403 100644
--- a/pkg/common/storage/controller/third.go
+++ b/pkg/common/storage/controller/third.go
@@ -16,9 +16,10 @@ package controller
import (
"context"
+ "time"
+
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
- "time"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/tools/db/pagination"
diff --git a/pkg/common/storage/controller/user.go b/pkg/common/storage/controller/user.go
index 09dc2db22..533eac78f 100644
--- a/pkg/common/storage/controller/user.go
+++ b/pkg/common/storage/controller/user.go
@@ -60,18 +60,8 @@ type UserDatabase interface {
CountTotal(ctx context.Context, before *time.Time) (int64, error)
// CountRangeEverydayTotal Get the user increment in the range
CountRangeEverydayTotal(ctx context.Context, start time.Time, end time.Time) (map[string]int64, error)
- // SubscribeUsersStatus Subscribe a user's presence status
- SubscribeUsersStatus(ctx context.Context, userID string, userIDs []string) error
- // UnsubscribeUsersStatus unsubscribe a user's presence status
- UnsubscribeUsersStatus(ctx context.Context, userID string, userIDs []string) error
- // GetAllSubscribeList Get a list of all subscriptions
- GetAllSubscribeList(ctx context.Context, userID string) ([]string, error)
- // GetSubscribedList Get all subscribed lists
- GetSubscribedList(ctx context.Context, userID string) ([]string, error)
- // GetUserStatus Get the online status of the user
- GetUserStatus(ctx context.Context, userIDs []string) ([]*user.OnlineStatus, error)
- // SetUserStatus Set the user status and store the user status in redis
- SetUserStatus(ctx context.Context, userID string, status, platformID int32) error
+
+ SortQuery(ctx context.Context, userIDName map[string]string, asc bool) ([]*model.User, error)
// CRUD user command
AddUserCommand(ctx context.Context, userID string, Type int32, UUID string, value string, ex string) error
@@ -82,14 +72,13 @@ type UserDatabase interface {
}
type userDatabase struct {
- tx tx.Tx
- userDB database.User
- cache cache.UserCache
- mongoDB database.SubscribeUser
+ tx tx.Tx
+ userDB database.User
+ cache cache.UserCache
}
-func NewUserDatabase(userDB database.User, cache cache.UserCache, tx tx.Tx, mongoDB database.SubscribeUser) UserDatabase {
- return &userDatabase{userDB: userDB, cache: cache, tx: tx, mongoDB: mongoDB}
+func NewUserDatabase(userDB database.User, cache cache.UserCache, tx tx.Tx) UserDatabase {
+ return &userDatabase{userDB: userDB, cache: cache, tx: tx}
}
func (u *userDatabase) InitOnce(ctx context.Context, users []*model.User) error {
@@ -121,6 +110,7 @@ func (u *userDatabase) InitOnce(ctx context.Context, users []*model.User) error
// FindWithError Get the information of the specified user and return an error if the userID is not found.
func (u *userDatabase) FindWithError(ctx context.Context, userIDs []string) (users []*model.User, err error) {
+ userIDs = datautil.Distinct(userIDs)
users, err = u.cache.GetUsersInfo(ctx, userIDs)
if err != nil {
return
@@ -197,7 +187,7 @@ func (u *userDatabase) GetAllUserID(ctx context.Context, pagination pagination.P
}
func (u *userDatabase) GetUserByID(ctx context.Context, userID string) (user *model.User, err error) {
- return u.userDB.Take(ctx, userID)
+ return u.cache.GetUserInfo(ctx, userID)
}
// CountTotal Get the total number of users.
@@ -210,45 +200,8 @@ func (u *userDatabase) CountRangeEverydayTotal(ctx context.Context, start time.T
return u.userDB.CountRangeEverydayTotal(ctx, start, end)
}
-// SubscribeUsersStatus Subscribe or unsubscribe a user's presence status.
-func (u *userDatabase) SubscribeUsersStatus(ctx context.Context, userID string, userIDs []string) error {
- err := u.mongoDB.AddSubscriptionList(ctx, userID, userIDs)
- return err
-}
-
-// UnsubscribeUsersStatus unsubscribe a user's presence status.
-func (u *userDatabase) UnsubscribeUsersStatus(ctx context.Context, userID string, userIDs []string) error {
- err := u.mongoDB.UnsubscriptionList(ctx, userID, userIDs)
- return err
-}
-
-// GetAllSubscribeList Get a list of all subscriptions.
-func (u *userDatabase) GetAllSubscribeList(ctx context.Context, userID string) ([]string, error) {
- list, err := u.mongoDB.GetAllSubscribeList(ctx, userID)
- if err != nil {
- return nil, err
- }
- return list, nil
-}
-
-// GetSubscribedList Get all subscribed lists.
-func (u *userDatabase) GetSubscribedList(ctx context.Context, userID string) ([]string, error) {
- list, err := u.mongoDB.GetSubscribedList(ctx, userID)
- if err != nil {
- return nil, err
- }
- return list, nil
-}
-
-// GetUserStatus get user status.
-func (u *userDatabase) GetUserStatus(ctx context.Context, userIDs []string) ([]*user.OnlineStatus, error) {
- onlineStatusList, err := u.cache.GetUserStatus(ctx, userIDs)
- return onlineStatusList, err
-}
-
-// SetUserStatus Set the user status and save it in redis.
-func (u *userDatabase) SetUserStatus(ctx context.Context, userID string, status, platformID int32) error {
- return u.cache.SetUserStatus(ctx, userID, status, platformID)
+func (u *userDatabase) SortQuery(ctx context.Context, userIDName map[string]string, asc bool) ([]*model.User, error) {
+ return u.userDB.SortQuery(ctx, userIDName, asc)
}
func (u *userDatabase) AddUserCommand(ctx context.Context, userID string, Type int32, UUID string, value string, ex string) error {
diff --git a/pkg/common/storage/database/conversation.go b/pkg/common/storage/database/conversation.go
index 46aa02d98..85f3dd668 100644
--- a/pkg/common/storage/database/conversation.go
+++ b/pkg/common/storage/database/conversation.go
@@ -22,7 +22,6 @@ import (
type Conversation interface {
Create(ctx context.Context, conversations []*model.Conversation) (err error)
- Delete(ctx context.Context, groupIDs []string) (err error)
UpdateByMap(ctx context.Context, userIDs []string, conversationID string, args map[string]any) (rows int64, err error)
Update(ctx context.Context, conversation *model.Conversation) (err error)
Find(ctx context.Context, ownerUserID string, conversationIDs []string) (conversations []*model.Conversation, err error)
@@ -39,4 +38,5 @@ type Conversation interface {
GetConversationsByConversationID(ctx context.Context, conversationIDs []string) ([]*model.Conversation, error)
GetConversationIDsNeedDestruct(ctx context.Context) ([]*model.Conversation, error)
GetConversationNotReceiveMessageUserIDs(ctx context.Context, conversationID string) ([]string, error)
+ FindConversationUserVersion(ctx context.Context, userID string, version uint, limit int) (*model.VersionLog, error)
}
diff --git a/pkg/common/storage/database/friend.go b/pkg/common/storage/database/friend.go
index 33d9c17bc..b596411fc 100644
--- a/pkg/common/storage/database/friend.go
+++ b/pkg/common/storage/database/friend.go
@@ -16,6 +16,7 @@ package database
import (
"context"
+
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
"github.com/openimsdk/tools/db/pagination"
)
@@ -46,4 +47,14 @@ type Friend interface {
FindFriendUserIDs(ctx context.Context, ownerUserID string) (friendUserIDs []string, err error)
// UpdateFriends update friends' fields
UpdateFriends(ctx context.Context, ownerUserID string, friendUserIDs []string, val map[string]any) (err error)
+
+ FindIncrVersion(ctx context.Context, ownerUserID string, version uint, limit int) (*model.VersionLog, error)
+
+ FindFriendUserID(ctx context.Context, friendUserID string) ([]string, error)
+
+ //SearchFriend(ctx context.Context, ownerUserID, keyword string, pagination pagination.Pagination) (int64, []*model.Friend, error)
+
+ FindOwnerFriendUserIds(ctx context.Context, ownerUserID string, limit int) ([]string, error)
+
+ IncrVersion(ctx context.Context, ownerUserID string, friendUserIDs []string, state int32) error
}
diff --git a/pkg/common/storage/database/group.go b/pkg/common/storage/database/group.go
index 712db09d2..7ef22f6c9 100644
--- a/pkg/common/storage/database/group.go
+++ b/pkg/common/storage/database/group.go
@@ -32,4 +32,8 @@ type Group interface {
CountTotal(ctx context.Context, before *time.Time) (count int64, err error)
// Get Group total quantity every day
CountRangeEverydayTotal(ctx context.Context, start time.Time, end time.Time) (map[string]int64, error)
+
+ FindJoinSortGroupID(ctx context.Context, groupIDs []string) ([]string, error)
+
+ SearchJoin(ctx context.Context, groupIDs []string, keyword string, pagination pagination.Pagination) (int64, []*model.Group, error)
}
diff --git a/pkg/common/storage/database/group_member.go b/pkg/common/storage/database/group_member.go
index f57f2c317..0ddf0654c 100644
--- a/pkg/common/storage/database/group_member.go
+++ b/pkg/common/storage/database/group_member.go
@@ -16,6 +16,7 @@ package database
import (
"context"
+
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
"github.com/openimsdk/tools/db/pagination"
)
@@ -25,8 +26,11 @@ type GroupMember interface {
Delete(ctx context.Context, groupID string, userIDs []string) (err error)
Update(ctx context.Context, groupID string, userID string, data map[string]any) (err error)
UpdateRoleLevel(ctx context.Context, groupID string, userID string, roleLevel int32) error
+ UpdateUserRoleLevels(ctx context.Context, groupID string, firstUserID string, firstUserRoleLevel int32, secondUserID string, secondUserRoleLevel int32) error
FindMemberUserID(ctx context.Context, groupID string) (userIDs []string, err error)
Take(ctx context.Context, groupID string, userID string) (groupMember *model.GroupMember, err error)
+ Find(ctx context.Context, groupID string, userIDs []string) ([]*model.GroupMember, error)
+ FindInGroup(ctx context.Context, userID string, groupIDs []string) ([]*model.GroupMember, error)
TakeOwner(ctx context.Context, groupID string) (groupMember *model.GroupMember, err error)
SearchMember(ctx context.Context, keyword string, groupID string, pagination pagination.Pagination) (total int64, groupList []*model.GroupMember, err error)
FindRoleLevelUserIDs(ctx context.Context, groupID string, roleLevel int32) ([]string, error)
@@ -34,4 +38,9 @@ type GroupMember interface {
TakeGroupMemberNum(ctx context.Context, groupID string) (count int64, err error)
FindUserManagedGroupID(ctx context.Context, userID string) (groupIDs []string, err error)
IsUpdateRoleLevel(data map[string]any) bool
+ JoinGroupIncrVersion(ctx context.Context, userID string, groupIDs []string, state int32) error
+ MemberGroupIncrVersion(ctx context.Context, groupID string, userIDs []string, state int32) error
+ FindMemberIncrVersion(ctx context.Context, groupID string, version uint, limit int) (*model.VersionLog, error)
+ BatchFindMemberIncrVersion(ctx context.Context, groupIDs []string, versions []uint, limits []int) ([]*model.VersionLog, error)
+ FindJoinIncrVersion(ctx context.Context, userID string, version uint, limit int) (*model.VersionLog, error)
}
diff --git a/pkg/common/storage/database/mgo/black.go b/pkg/common/storage/database/mgo/black.go
index cf74cfab1..4a7a35e6f 100644
--- a/pkg/common/storage/database/mgo/black.go
+++ b/pkg/common/storage/database/mgo/black.go
@@ -27,7 +27,7 @@ import (
)
func NewBlackMongo(db *mongo.Database) (database.Black, error) {
- coll := db.Collection("black")
+ coll := db.Collection(database.BlackName)
_, err := coll.Indexes().CreateOne(context.Background(), mongo.IndexModel{
Keys: bson.D{
{Key: "owner_user_id", Value: 1},
diff --git a/pkg/common/storage/database/mgo/conversation.go b/pkg/common/storage/database/mgo/conversation.go
index 9c35f841b..3d505f1d3 100644
--- a/pkg/common/storage/database/mgo/conversation.go
+++ b/pkg/common/storage/database/mgo/conversation.go
@@ -16,6 +16,7 @@ package mgo
import (
"context"
+ "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
"time"
@@ -29,7 +30,7 @@ import (
)
func NewConversationMongo(db *mongo.Database) (*ConversationMgo, error) {
- coll := db.Collection("conversation")
+ coll := db.Collection(database.ConversationName)
_, err := coll.Indexes().CreateOne(context.Background(), mongo.IndexModel{
Keys: bson.D{
{Key: "owner_user_id", Value: 1},
@@ -40,40 +41,71 @@ func NewConversationMongo(db *mongo.Database) (*ConversationMgo, error) {
if err != nil {
return nil, errs.Wrap(err)
}
- return &ConversationMgo{coll: coll}, nil
+ version, err := NewVersionLog(db.Collection(database.ConversationVersionName))
+ if err != nil {
+ return nil, err
+ }
+ return &ConversationMgo{version: version, coll: coll}, nil
}
type ConversationMgo struct {
- coll *mongo.Collection
+ version database.VersionLog
+ coll *mongo.Collection
}
func (c *ConversationMgo) Create(ctx context.Context, conversations []*model.Conversation) (err error) {
- return mongoutil.InsertMany(ctx, c.coll, conversations)
-}
-
-func (c *ConversationMgo) Delete(ctx context.Context, groupIDs []string) (err error) {
- return mongoutil.DeleteMany(ctx, c.coll, bson.M{"group_id": bson.M{"$in": groupIDs}})
+ return mongoutil.IncrVersion(func() error {
+ return mongoutil.InsertMany(ctx, c.coll, conversations)
+ }, func() error {
+ userConversation := make(map[string][]string)
+ for _, conversation := range conversations {
+ userConversation[conversation.OwnerUserID] = append(userConversation[conversation.OwnerUserID], conversation.ConversationID)
+ }
+ for userID, conversationIDs := range userConversation {
+ if err := c.version.IncrVersion(ctx, userID, conversationIDs, model.VersionStateInsert); err != nil {
+ return err
+ }
+ }
+ return nil
+ })
}
-func (c *ConversationMgo) UpdateByMap(ctx context.Context, userIDs []string, conversationID string, args map[string]any) (rows int64, err error) {
- if len(args) == 0 {
+func (c *ConversationMgo) UpdateByMap(ctx context.Context, userIDs []string, conversationID string, args map[string]any) (int64, error) {
+ if len(args) == 0 || len(userIDs) == 0 {
return 0, nil
}
filter := bson.M{
"conversation_id": conversationID,
+ "owner_user_id": bson.M{"$in": userIDs},
}
- if len(userIDs) > 0 {
- filter["owner_user_id"] = bson.M{"$in": userIDs}
- }
- res, err := mongoutil.UpdateMany(ctx, c.coll, filter, bson.M{"$set": args})
+ var rows int64
+ err := mongoutil.IncrVersion(func() error {
+ res, err := mongoutil.UpdateMany(ctx, c.coll, filter, bson.M{"$set": args})
+ if err != nil {
+ return err
+ }
+ rows = res.ModifiedCount
+ return nil
+ }, func() error {
+ for _, userID := range userIDs {
+ if err := c.version.IncrVersion(ctx, userID, []string{conversationID}, model.VersionStateUpdate); err != nil {
+ return err
+ }
+ }
+ return nil
+ })
if err != nil {
return 0, err
}
- return res.ModifiedCount, nil
+ return rows, nil
}
func (c *ConversationMgo) Update(ctx context.Context, conversation *model.Conversation) (err error) {
- return mongoutil.UpdateOne(ctx, c.coll, bson.M{"owner_user_id": conversation.OwnerUserID, "conversation_id": conversation.ConversationID}, bson.M{"$set": conversation}, true)
+ return mongoutil.IncrVersion(func() error {
+ return mongoutil.UpdateOne(ctx, c.coll, bson.M{"owner_user_id": conversation.OwnerUserID, "conversation_id": conversation.ConversationID}, bson.M{"$set": conversation}, true)
+ }, func() error {
+ return c.version.IncrVersion(ctx, conversation.OwnerUserID, []string{conversation.ConversationID}, model.VersionStateUpdate)
+ })
}
func (c *ConversationMgo) Find(ctx context.Context, ownerUserID string, conversationIDs []string) (conversations []*model.Conversation, err error) {
@@ -177,3 +209,7 @@ func (c *ConversationMgo) GetConversationNotReceiveMessageUserIDs(ctx context.Co
options.Find().SetProjection(bson.M{"_id": 0, "owner_user_id": 1}),
)
}
+
+func (c *ConversationMgo) FindConversationUserVersion(ctx context.Context, userID string, version uint, limit int) (*model.VersionLog, error) {
+ return c.version.FindChangeLog(ctx, userID, version, limit)
+}
diff --git a/pkg/common/storage/database/mgo/friend.go b/pkg/common/storage/database/mgo/friend.go
index ffa006d01..76c82bac2 100644
--- a/pkg/common/storage/database/mgo/friend.go
+++ b/pkg/common/storage/database/mgo/friend.go
@@ -18,6 +18,8 @@ import (
"context"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
+ "go.mongodb.org/mongo-driver/bson/primitive"
+ "time"
"github.com/openimsdk/tools/db/mongoutil"
"github.com/openimsdk/tools/db/pagination"
@@ -28,12 +30,13 @@ import (
// FriendMgo implements Friend using MongoDB as the storage backend.
type FriendMgo struct {
- coll *mongo.Collection
+ coll *mongo.Collection
+ owner database.VersionLog
}
// NewFriendMongo creates a new instance of FriendMgo with the provided MongoDB database.
func NewFriendMongo(db *mongo.Database) (database.Friend, error) {
- coll := db.Collection("friend")
+ coll := db.Collection(database.FriendName)
_, err := coll.Indexes().CreateOne(context.Background(), mongo.IndexModel{
Keys: bson.D{
{Key: "owner_user_id", Value: 1},
@@ -44,12 +47,41 @@ func NewFriendMongo(db *mongo.Database) (database.Friend, error) {
if err != nil {
return nil, err
}
- return &FriendMgo{coll: coll}, nil
+ owner, err := NewVersionLog(db.Collection(database.FriendVersionName))
+ if err != nil {
+ return nil, err
+ }
+ return &FriendMgo{coll: coll, owner: owner}, nil
+}
+
+func (f *FriendMgo) friendSort() any {
+ return bson.D{{"is_pinned", -1}, {"_id", 1}}
}
// Create inserts multiple friend records.
func (f *FriendMgo) Create(ctx context.Context, friends []*model.Friend) error {
- return mongoutil.InsertMany(ctx, f.coll, friends)
+ for i, friend := range friends {
+ if friend.ID.IsZero() {
+ friends[i].ID = primitive.NewObjectID()
+ }
+ if friend.CreateTime.IsZero() {
+ friends[i].CreateTime = time.Now()
+ }
+ }
+ return mongoutil.IncrVersion(func() error {
+ return mongoutil.InsertMany(ctx, f.coll, friends)
+ }, func() error {
+ mp := make(map[string][]string)
+ for _, friend := range friends {
+ mp[friend.OwnerUserID] = append(mp[friend.OwnerUserID], friend.FriendUserID)
+ }
+ for ownerUserID, friendUserIDs := range mp {
+ if err := f.owner.IncrVersion(ctx, ownerUserID, friendUserIDs, model.VersionStateInsert); err != nil {
+ return err
+ }
+ }
+ return nil
+ })
}
// Delete removes specified friends of the owner user.
@@ -58,11 +90,15 @@ func (f *FriendMgo) Delete(ctx context.Context, ownerUserID string, friendUserID
"owner_user_id": ownerUserID,
"friend_user_id": bson.M{"$in": friendUserIDs},
}
- return mongoutil.DeleteOne(ctx, f.coll, filter)
+ return mongoutil.IncrVersion(func() error {
+ return mongoutil.DeleteOne(ctx, f.coll, filter)
+ }, func() error {
+ return f.owner.IncrVersion(ctx, ownerUserID, friendUserIDs, model.VersionStateDelete)
+ })
}
// UpdateByMap updates specific fields of a friend document using a map.
-func (f *FriendMgo) UpdateByMap(ctx context.Context, ownerUserID string, friendUserID string, args map[string]interface{}) error {
+func (f *FriendMgo) UpdateByMap(ctx context.Context, ownerUserID string, friendUserID string, args map[string]any) error {
if len(args) == 0 {
return nil
}
@@ -70,30 +106,61 @@ func (f *FriendMgo) UpdateByMap(ctx context.Context, ownerUserID string, friendU
"owner_user_id": ownerUserID,
"friend_user_id": friendUserID,
}
- return mongoutil.UpdateOne(ctx, f.coll, filter, bson.M{"$set": args}, true)
+ return mongoutil.IncrVersion(func() error {
+ return mongoutil.UpdateOne(ctx, f.coll, filter, bson.M{"$set": args}, true)
+ }, func() error {
+ var friendUserIDs []string
+ if f.IsUpdateIsPinned(args) {
+ friendUserIDs = []string{model.VersionSortChangeID, friendUserID}
+ } else {
+ friendUserIDs = []string{friendUserID}
+ }
+ return f.owner.IncrVersion(ctx, ownerUserID, friendUserIDs, model.VersionStateUpdate)
+ })
}
-// Update modifies multiple friend documents.
-// func (f *FriendMgo) Update(ctx context.Context, friends []*relation.Friend) error {
-// filter := bson.M{
-// "owner_user_id": ownerUserID,
-// "friend_user_id": friendUserID,
-// }
-// return mgotool.UpdateMany(ctx, f.coll, filter, friends)
-// }
-
// UpdateRemark updates the remark for a specific friend.
func (f *FriendMgo) UpdateRemark(ctx context.Context, ownerUserID, friendUserID, remark string) error {
return f.UpdateByMap(ctx, ownerUserID, friendUserID, map[string]any{"remark": remark})
}
+func (f *FriendMgo) fillTime(friends ...*model.Friend) {
+ for i, friend := range friends {
+ if friend.CreateTime.IsZero() {
+ friends[i].CreateTime = friend.ID.Timestamp()
+ }
+ }
+}
+
+func (f *FriendMgo) findOne(ctx context.Context, filter any) (*model.Friend, error) {
+ friend, err := mongoutil.FindOne[*model.Friend](ctx, f.coll, filter)
+ if err != nil {
+ return nil, err
+ }
+ f.fillTime(friend)
+ return friend, nil
+}
+
+func (f *FriendMgo) find(ctx context.Context, filter any) ([]*model.Friend, error) {
+ friends, err := mongoutil.Find[*model.Friend](ctx, f.coll, filter)
+ if err != nil {
+ return nil, err
+ }
+ f.fillTime(friends...)
+ return friends, nil
+}
+
+func (f *FriendMgo) findPage(ctx context.Context, filter any, pagination pagination.Pagination, opts ...*options.FindOptions) (int64, []*model.Friend, error) {
+ return mongoutil.FindPage[*model.Friend](ctx, f.coll, filter, pagination, opts...)
+}
+
// Take retrieves a single friend document. Returns an error if not found.
func (f *FriendMgo) Take(ctx context.Context, ownerUserID, friendUserID string) (*model.Friend, error) {
filter := bson.M{
"owner_user_id": ownerUserID,
"friend_user_id": friendUserID,
}
- return mongoutil.FindOne[*model.Friend](ctx, f.coll, filter)
+ return f.findOne(ctx, filter)
}
// FindUserState finds the friendship status between two users.
@@ -104,7 +171,7 @@ func (f *FriendMgo) FindUserState(ctx context.Context, userID1, userID2 string)
{"owner_user_id": userID2, "friend_user_id": userID1},
},
}
- return mongoutil.Find[*model.Friend](ctx, f.coll, filter)
+ return f.find(ctx, filter)
}
// FindFriends retrieves a list of friends for a given owner. Missing friends do not cause an error.
@@ -113,7 +180,7 @@ func (f *FriendMgo) FindFriends(ctx context.Context, ownerUserID string, friendU
"owner_user_id": ownerUserID,
"friend_user_id": bson.M{"$in": friendUserIDs},
}
- return mongoutil.Find[*model.Friend](ctx, f.coll, filter)
+ return f.find(ctx, filter)
}
// FindReversalFriends finds users who have added the specified user as a friend.
@@ -122,30 +189,38 @@ func (f *FriendMgo) FindReversalFriends(ctx context.Context, friendUserID string
"owner_user_id": bson.M{"$in": ownerUserIDs},
"friend_user_id": friendUserID,
}
- return mongoutil.Find[*model.Friend](ctx, f.coll, filter)
+ return f.find(ctx, filter)
}
// FindOwnerFriends retrieves a paginated list of friends for a given owner.
func (f *FriendMgo) FindOwnerFriends(ctx context.Context, ownerUserID string, pagination pagination.Pagination) (int64, []*model.Friend, error) {
filter := bson.M{"owner_user_id": ownerUserID}
- return mongoutil.FindPage[*model.Friend](ctx, f.coll, filter, pagination)
+ opt := options.Find().SetSort(f.friendSort())
+ return f.findPage(ctx, filter, pagination, opt)
+}
+
+func (f *FriendMgo) FindOwnerFriendUserIds(ctx context.Context, ownerUserID string, limit int) ([]string, error) {
+ filter := bson.M{"owner_user_id": ownerUserID}
+ opt := options.Find().SetProjection(bson.M{"_id": 0, "friend_user_id": 1}).SetSort(f.friendSort()).SetLimit(int64(limit))
+ return mongoutil.Find[string](ctx, f.coll, filter, opt)
}
// FindInWhoseFriends finds users who have added the specified user as a friend, with pagination.
func (f *FriendMgo) FindInWhoseFriends(ctx context.Context, friendUserID string, pagination pagination.Pagination) (int64, []*model.Friend, error) {
filter := bson.M{"friend_user_id": friendUserID}
- return mongoutil.FindPage[*model.Friend](ctx, f.coll, filter, pagination)
+ opt := options.Find().SetSort(f.friendSort())
+ return f.findPage(ctx, filter, pagination, opt)
}
// FindFriendUserIDs retrieves a list of friend user IDs for a given owner.
func (f *FriendMgo) FindFriendUserIDs(ctx context.Context, ownerUserID string) ([]string, error) {
filter := bson.M{"owner_user_id": ownerUserID}
- return mongoutil.Find[string](ctx, f.coll, filter, options.Find().SetProjection(bson.M{"_id": 0, "friend_user_id": 1}))
+ return mongoutil.Find[string](ctx, f.coll, filter, options.Find().SetProjection(bson.M{"_id": 0, "friend_user_id": 1}).SetSort(f.friendSort()))
}
func (f *FriendMgo) UpdateFriends(ctx context.Context, ownerUserID string, friendUserIDs []string, val map[string]any) error {
// Ensure there are IDs to update
- if len(friendUserIDs) == 0 {
+ if len(friendUserIDs) == 0 || len(val) == 0 {
return nil // Or return an error if you expect there to always be IDs
}
@@ -158,7 +233,38 @@ func (f *FriendMgo) UpdateFriends(ctx context.Context, ownerUserID string, frien
// Create an update document
update := bson.M{"$set": val}
- // Perform the update operation for all matching documents
- _, err := mongoutil.UpdateMany(ctx, f.coll, filter, update)
- return err
+ return mongoutil.IncrVersion(func() error {
+ return mongoutil.Ignore(mongoutil.UpdateMany(ctx, f.coll, filter, update))
+ }, func() error {
+ var userIDs []string
+ if f.IsUpdateIsPinned(val) {
+ userIDs = append([]string{model.VersionSortChangeID}, friendUserIDs...)
+ } else {
+ userIDs = friendUserIDs
+ }
+ return f.owner.IncrVersion(ctx, ownerUserID, userIDs, model.VersionStateUpdate)
+ })
+}
+
+func (f *FriendMgo) FindIncrVersion(ctx context.Context, ownerUserID string, version uint, limit int) (*model.VersionLog, error) {
+ return f.owner.FindChangeLog(ctx, ownerUserID, version, limit)
+}
+
+func (f *FriendMgo) FindFriendUserID(ctx context.Context, friendUserID string) ([]string, error) {
+ filter := bson.M{
+ "friend_user_id": friendUserID,
+ }
+ return mongoutil.Find[string](ctx, f.coll, filter, options.Find().SetProjection(bson.M{"_id": 0, "owner_user_id": 1}).SetSort(f.friendSort()))
+}
+
+func (f *FriendMgo) IncrVersion(ctx context.Context, ownerUserID string, friendUserIDs []string, state int32) error {
+ return f.owner.IncrVersion(ctx, ownerUserID, friendUserIDs, state)
+}
+
+func (f *FriendMgo) IsUpdateIsPinned(data map[string]any) bool {
+ if data == nil {
+ return false
+ }
+ _, ok := data["is_pinned"]
+ return ok
}
diff --git a/pkg/common/storage/database/mgo/friend_request.go b/pkg/common/storage/database/mgo/friend_request.go
index 0d60b213d..4eed2f4a2 100644
--- a/pkg/common/storage/database/mgo/friend_request.go
+++ b/pkg/common/storage/database/mgo/friend_request.go
@@ -27,7 +27,7 @@ import (
)
func NewFriendRequestMongo(db *mongo.Database) (database.FriendRequest, error) {
- coll := db.Collection("friend_request")
+ coll := db.Collection(database.FriendRequestName)
_, err := coll.Indexes().CreateOne(context.Background(), mongo.IndexModel{
Keys: bson.D{
{Key: "from_user_id", Value: 1},
diff --git a/pkg/common/storage/database/mgo/group.go b/pkg/common/storage/database/mgo/group.go
index 48d24560b..3be7883af 100644
--- a/pkg/common/storage/database/mgo/group.go
+++ b/pkg/common/storage/database/mgo/group.go
@@ -30,7 +30,7 @@ import (
)
func NewGroupMongo(db *mongo.Database) (database.Group, error) {
- coll := db.Collection("group")
+ coll := db.Collection(database.GroupName)
_, err := coll.Indexes().CreateOne(context.Background(), mongo.IndexModel{
Keys: bson.D{
{Key: "group_id", Value: 1},
@@ -47,6 +47,10 @@ type GroupMgo struct {
coll *mongo.Collection
}
+func (g *GroupMgo) sortGroup() any {
+ return bson.D{{"group_name", 1}, {"create_time", 1}}
+}
+
func (g *GroupMgo) Create(ctx context.Context, groups []*model.Group) (err error) {
return mongoutil.InsertMany(ctx, g.coll, groups)
}
@@ -126,3 +130,32 @@ func (g *GroupMgo) CountRangeEverydayTotal(ctx context.Context, start time.Time,
}
return res, nil
}
+
+func (g *GroupMgo) FindJoinSortGroupID(ctx context.Context, groupIDs []string) ([]string, error) {
+ if len(groupIDs) < 2 {
+ return groupIDs, nil
+ }
+ filter := bson.M{
+ "group_id": bson.M{"$in": groupIDs},
+ "status": bson.M{"$ne": constant.GroupStatusDismissed},
+ }
+ opt := options.Find().SetSort(g.sortGroup()).SetProjection(bson.M{"_id": 0, "group_id": 1})
+ return mongoutil.Find[string](ctx, g.coll, filter, opt)
+}
+
+func (g *GroupMgo) SearchJoin(ctx context.Context, groupIDs []string, keyword string, pagination pagination.Pagination) (int64, []*model.Group, error) {
+ if len(groupIDs) == 0 {
+ return 0, nil, nil
+ }
+ filter := bson.M{
+ "group_id": bson.M{"$in": groupIDs},
+ "status": bson.M{"$ne": constant.GroupStatusDismissed},
+ }
+ if keyword != "" {
+ filter["group_name"] = bson.M{"$regex": keyword}
+ }
+ // Define the sorting options
+ opts := options.Find().SetSort(g.sortGroup())
+ // Perform the search with pagination and sorting
+ return mongoutil.FindPage[*model.Group](ctx, g.coll, filter, pagination, opts)
+}
diff --git a/pkg/common/storage/database/mgo/group_member.go b/pkg/common/storage/database/mgo/group_member.go
index ccca386e5..2fdf2003b 100644
--- a/pkg/common/storage/database/mgo/group_member.go
+++ b/pkg/common/storage/database/mgo/group_member.go
@@ -16,8 +16,10 @@ package mgo
import (
"context"
+
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
+ "github.com/openimsdk/tools/log"
"github.com/openimsdk/protocol/constant"
"github.com/openimsdk/tools/db/mongoutil"
@@ -29,7 +31,7 @@ import (
)
func NewGroupMember(db *mongo.Database) (database.GroupMember, error) {
- coll := db.Collection("group_member")
+ coll := db.Collection(database.GroupMemberName)
_, err := coll.Indexes().CreateOne(context.Background(), mongo.IndexModel{
Keys: bson.D{
{Key: "group_id", Value: 1},
@@ -40,15 +42,53 @@ func NewGroupMember(db *mongo.Database) (database.GroupMember, error) {
if err != nil {
return nil, errs.Wrap(err)
}
- return &GroupMemberMgo{coll: coll}, nil
+ member, err := NewVersionLog(db.Collection(database.GroupMemberVersionName))
+ if err != nil {
+ return nil, err
+ }
+ join, err := NewVersionLog(db.Collection(database.GroupJoinVersionName))
+ if err != nil {
+ return nil, err
+ }
+ return &GroupMemberMgo{coll: coll, member: member, join: join}, nil
}
type GroupMemberMgo struct {
- coll *mongo.Collection
+ coll *mongo.Collection
+ member database.VersionLog
+ join database.VersionLog
+}
+
+func (g *GroupMemberMgo) memberSort() any {
+ return bson.D{{"role_level", -1}, {"create_time", 1}}
}
func (g *GroupMemberMgo) Create(ctx context.Context, groupMembers []*model.GroupMember) (err error) {
- return mongoutil.InsertMany(ctx, g.coll, groupMembers)
+ return mongoutil.IncrVersion(func() error {
+ return mongoutil.InsertMany(ctx, g.coll, groupMembers)
+ }, func() error {
+ gms := make(map[string][]string)
+ for _, member := range groupMembers {
+ gms[member.GroupID] = append(gms[member.GroupID], member.UserID)
+ }
+ for groupID, userIDs := range gms {
+ if err := g.member.IncrVersion(ctx, groupID, userIDs, model.VersionStateInsert); err != nil {
+ return err
+ }
+ }
+ return nil
+ }, func() error {
+ gms := make(map[string][]string)
+ for _, member := range groupMembers {
+ gms[member.UserID] = append(gms[member.UserID], member.GroupID)
+ }
+ for userID, groupIDs := range gms {
+ if err := g.join.IncrVersion(ctx, userID, groupIDs, model.VersionStateInsert); err != nil {
+ return err
+ }
+ }
+ return nil
+ })
}
func (g *GroupMemberMgo) Delete(ctx context.Context, groupID string, userIDs []string) (err error) {
@@ -56,24 +96,83 @@ func (g *GroupMemberMgo) Delete(ctx context.Context, groupID string, userIDs []s
if len(userIDs) > 0 {
filter["user_id"] = bson.M{"$in": userIDs}
}
- return mongoutil.DeleteMany(ctx, g.coll, filter)
+ return mongoutil.IncrVersion(func() error {
+ return mongoutil.DeleteMany(ctx, g.coll, filter)
+ }, func() error {
+ if len(userIDs) == 0 {
+ return g.member.Delete(ctx, groupID)
+ } else {
+ return g.member.IncrVersion(ctx, groupID, userIDs, model.VersionStateDelete)
+ }
+ }, func() error {
+ for _, userID := range userIDs {
+ if err := g.join.IncrVersion(ctx, userID, []string{groupID}, model.VersionStateDelete); err != nil {
+ return err
+ }
+ }
+ return nil
+ })
}
func (g *GroupMemberMgo) UpdateRoleLevel(ctx context.Context, groupID string, userID string, roleLevel int32) error {
- return g.Update(ctx, groupID, userID, bson.M{"role_level": roleLevel})
+ return mongoutil.IncrVersion(func() error {
+ return mongoutil.UpdateOne(ctx, g.coll, bson.M{"group_id": groupID, "user_id": userID},
+ bson.M{"$set": bson.M{"role_level": roleLevel}}, true)
+ }, func() error {
+ return g.member.IncrVersion(ctx, groupID, []string{model.VersionSortChangeID, userID}, model.VersionStateUpdate)
+ })
+}
+func (g *GroupMemberMgo) UpdateUserRoleLevels(ctx context.Context, groupID string, firstUserID string, firstUserRoleLevel int32, secondUserID string, secondUserRoleLevel int32) error {
+ return mongoutil.IncrVersion(func() error {
+ if err := mongoutil.UpdateOne(ctx, g.coll, bson.M{"group_id": groupID, "user_id": firstUserID},
+ bson.M{"$set": bson.M{"role_level": firstUserRoleLevel}}, true); err != nil {
+ return err
+ }
+ if err := mongoutil.UpdateOne(ctx, g.coll, bson.M{"group_id": groupID, "user_id": secondUserID},
+ bson.M{"$set": bson.M{"role_level": secondUserRoleLevel}}, true); err != nil {
+ return err
+ }
+ return nil
+ }, func() error {
+ return g.member.IncrVersion(ctx, groupID, []string{model.VersionSortChangeID, firstUserID, secondUserID}, model.VersionStateUpdate)
+ })
}
func (g *GroupMemberMgo) Update(ctx context.Context, groupID string, userID string, data map[string]any) (err error) {
- return mongoutil.UpdateOne(ctx, g.coll, bson.M{"group_id": groupID, "user_id": userID}, bson.M{"$set": data}, true)
+ if len(data) == 0 {
+ return nil
+ }
+ return mongoutil.IncrVersion(func() error {
+ return mongoutil.UpdateOne(ctx, g.coll, bson.M{"group_id": groupID, "user_id": userID}, bson.M{"$set": data}, true)
+ }, func() error {
+ var userIDs []string
+ if g.IsUpdateRoleLevel(data) {
+ userIDs = []string{model.VersionSortChangeID, userID}
+ } else {
+ userIDs = []string{userID}
+ }
+ return g.member.IncrVersion(ctx, groupID, userIDs, model.VersionStateUpdate)
+ })
}
-func (g *GroupMemberMgo) Find(ctx context.Context, groupIDs []string, userIDs []string, roleLevels []int32) (groupMembers []*model.GroupMember, err error) {
- // TODO implement me
- panic("implement me")
+func (g *GroupMemberMgo) FindMemberUserID(ctx context.Context, groupID string) (userIDs []string, err error) {
+ return mongoutil.Find[string](ctx, g.coll, bson.M{"group_id": groupID}, options.Find().SetProjection(bson.M{"_id": 0, "user_id": 1}).SetSort(g.memberSort()))
}
-func (g *GroupMemberMgo) FindMemberUserID(ctx context.Context, groupID string) (userIDs []string, err error) {
- return mongoutil.Find[string](ctx, g.coll, bson.M{"group_id": groupID}, options.Find().SetProjection(bson.M{"_id": 0, "user_id": 1}))
+func (g *GroupMemberMgo) Find(ctx context.Context, groupID string, userIDs []string) ([]*model.GroupMember, error) {
+ filter := bson.M{"group_id": groupID}
+ if len(userIDs) > 0 {
+ filter["user_id"] = bson.M{"$in": userIDs}
+ }
+ return mongoutil.Find[*model.GroupMember](ctx, g.coll, filter)
+}
+
+func (g *GroupMemberMgo) FindInGroup(ctx context.Context, userID string, groupIDs []string) ([]*model.GroupMember, error) {
+ filter := bson.M{"user_id": userID}
+ if len(groupIDs) > 0 {
+ filter["group_id"] = bson.M{"$in": groupIDs}
+ }
+ return mongoutil.Find[*model.GroupMember](ctx, g.coll, filter)
}
func (g *GroupMemberMgo) Take(ctx context.Context, groupID string, userID string) (groupMember *model.GroupMember, err error) {
@@ -88,13 +187,13 @@ func (g *GroupMemberMgo) FindRoleLevelUserIDs(ctx context.Context, groupID strin
return mongoutil.Find[string](ctx, g.coll, bson.M{"group_id": groupID, "role_level": roleLevel}, options.Find().SetProjection(bson.M{"_id": 0, "user_id": 1}))
}
-func (g *GroupMemberMgo) SearchMember(ctx context.Context, keyword string, groupID string, pagination pagination.Pagination) (total int64, groupList []*model.GroupMember, err error) {
+func (g *GroupMemberMgo) SearchMember(ctx context.Context, keyword string, groupID string, pagination pagination.Pagination) (int64, []*model.GroupMember, error) {
filter := bson.M{"group_id": groupID, "nickname": bson.M{"$regex": keyword}}
- return mongoutil.FindPage[*model.GroupMember](ctx, g.coll, filter, pagination)
+ return mongoutil.FindPage[*model.GroupMember](ctx, g.coll, filter, pagination, options.Find().SetSort(g.memberSort()))
}
func (g *GroupMemberMgo) FindUserJoinedGroupID(ctx context.Context, userID string) (groupIDs []string, err error) {
- return mongoutil.Find[string](ctx, g.coll, bson.M{"user_id": userID}, options.Find().SetProjection(bson.M{"_id": 0, "group_id": 1}))
+ return mongoutil.Find[string](ctx, g.coll, bson.M{"user_id": userID}, options.Find().SetProjection(bson.M{"_id": 0, "group_id": 1}).SetSort(g.memberSort()))
}
func (g *GroupMemberMgo) TakeGroupMemberNum(ctx context.Context, groupID string) (count int64, err error) {
@@ -118,3 +217,26 @@ func (g *GroupMemberMgo) IsUpdateRoleLevel(data map[string]any) bool {
_, ok := data["role_level"]
return ok
}
+
+func (g *GroupMemberMgo) JoinGroupIncrVersion(ctx context.Context, userID string, groupIDs []string, state int32) error {
+ return g.join.IncrVersion(ctx, userID, groupIDs, state)
+}
+
+func (g *GroupMemberMgo) MemberGroupIncrVersion(ctx context.Context, groupID string, userIDs []string, state int32) error {
+ return g.member.IncrVersion(ctx, groupID, userIDs, state)
+}
+
+func (g *GroupMemberMgo) FindMemberIncrVersion(ctx context.Context, groupID string, version uint, limit int) (*model.VersionLog, error) {
+ log.ZDebug(ctx, "find member incr version", "groupID", groupID, "version", version)
+ return g.member.FindChangeLog(ctx, groupID, version, limit)
+}
+
+func (g *GroupMemberMgo) BatchFindMemberIncrVersion(ctx context.Context, groupIDs []string, versions []uint, limits []int) ([]*model.VersionLog, error) {
+ log.ZDebug(ctx, "Batch find member incr version", "groupIDs", groupIDs, "versions", versions)
+ return g.member.BatchFindChangeLog(ctx, groupIDs, versions, limits)
+}
+
+func (g *GroupMemberMgo) FindJoinIncrVersion(ctx context.Context, userID string, version uint, limit int) (*model.VersionLog, error) {
+ log.ZDebug(ctx, "find join incr version", "userID", userID, "version", version)
+ return g.join.FindChangeLog(ctx, userID, version, limit)
+}
diff --git a/pkg/common/storage/database/mgo/group_request.go b/pkg/common/storage/database/mgo/group_request.go
index 4ae778527..b1942b708 100644
--- a/pkg/common/storage/database/mgo/group_request.go
+++ b/pkg/common/storage/database/mgo/group_request.go
@@ -28,7 +28,7 @@ import (
)
func NewGroupRequestMgo(db *mongo.Database) (database.GroupRequest, error) {
- coll := db.Collection("group_request")
+ coll := db.Collection(database.GroupRequestName)
_, err := coll.Indexes().CreateOne(context.Background(), mongo.IndexModel{
Keys: bson.D{
{Key: "group_id", Value: 1},
diff --git a/pkg/common/storage/database/mgo/log.go b/pkg/common/storage/database/mgo/log.go
index 51715bd77..6ff4c6039 100644
--- a/pkg/common/storage/database/mgo/log.go
+++ b/pkg/common/storage/database/mgo/log.go
@@ -28,7 +28,7 @@ import (
)
func NewLogMongo(db *mongo.Database) (database.Log, error) {
- coll := db.Collection("log")
+ coll := db.Collection(database.LogName)
_, err := coll.Indexes().CreateMany(context.Background(), []mongo.IndexModel{
{
Keys: bson.D{
diff --git a/pkg/common/storage/database/mgo/msg.go b/pkg/common/storage/database/mgo/msg.go
index a7291fcc8..03f47c503 100644
--- a/pkg/common/storage/database/mgo/msg.go
+++ b/pkg/common/storage/database/mgo/msg.go
@@ -3,10 +3,11 @@ package mgo
import (
"context"
"fmt"
+ "time"
+
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
"github.com/openimsdk/tools/utils/datautil"
- "time"
"github.com/openimsdk/protocol/constant"
"github.com/openimsdk/protocol/msg"
@@ -93,6 +94,29 @@ func (m *MsgMgo) FindOneByDocID(ctx context.Context, docID string) (*model.MsgDo
}
func (m *MsgMgo) GetMsgBySeqIndexIn1Doc(ctx context.Context, userID, docID string, seqs []int64) ([]*model.MsgInfoModel, error) {
+ msgs, err := m.getMsgBySeqIndexIn1Doc(ctx, userID, docID, seqs)
+ if err != nil {
+ return nil, err
+ }
+ if len(msgs) == len(seqs) {
+ return msgs, nil
+ }
+ tmp := make(map[int64]*model.MsgInfoModel)
+ for i, val := range msgs {
+ tmp[val.Msg.Seq] = msgs[i]
+ }
+ res := make([]*model.MsgInfoModel, 0, len(seqs))
+ for _, seq := range seqs {
+ if val, ok := tmp[seq]; ok {
+ res = append(res, val)
+ } else {
+ res = append(res, &model.MsgInfoModel{Msg: &model.MsgDataModel{Seq: seq}})
+ }
+ }
+ return res, nil
+}
+
+func (m *MsgMgo) getMsgBySeqIndexIn1Doc(ctx context.Context, userID, docID string, seqs []int64) ([]*model.MsgInfoModel, error) {
indexs := make([]int64, 0, len(seqs))
for _, seq := range seqs {
indexs = append(indexs, m.model.GetMsgIndex(seq))
@@ -204,7 +228,7 @@ func (m *MsgMgo) GetMsgDocModelByIndex(ctx context.Context, conversationID strin
if sort != 1 && sort != -1 {
return nil, errs.ErrArgs.WrapMsg("mongo sort must be 1 or -1")
}
- opt := options.Find().SetLimit(1).SetSkip(index).SetSort(bson.M{"doc_id": sort}).SetLimit(1)
+ opt := options.Find().SetSkip(index).SetSort(bson.M{"_id": sort}).SetLimit(1)
filter := bson.M{"doc_id": primitive.Regex{Pattern: fmt.Sprintf("^%s:", conversationID)}}
msgs, err := mongoutil.Find[*model.MsgDocModel](ctx, m.coll, filter, opt)
if err != nil {
@@ -254,125 +278,409 @@ func (m *MsgMgo) MarkSingleChatMsgsAsRead(ctx context.Context, userID string, do
return nil
}
-func (m *MsgMgo) SearchMessage(ctx context.Context, req *msg.SearchMessageReq) (int32, []*model.MsgInfoModel, error) {
- where := make(bson.A, 0, 6)
+//func (m *MsgMgo) searchCount(ctx context.Context, filter any) (int64, error) {
+//
+// return nil, nil
+//}
+
+//func (m *MsgMgo) searchMessage(ctx context.Context, filter any, nextID primitive.ObjectID, content bool, limit int) (int64, []*model.MsgInfoModel, primitive.ObjectID, error) {
+// var pipeline bson.A
+// if !nextID.IsZero() {
+// pipeline = append(pipeline, bson.M{"$match": bson.M{"_id": bson.M{"$gt": nextID}}})
+// }
+// pipeline = append(pipeline,
+// bson.M{"$match": filter},
+// bson.M{"$limit": limit},
+// bson.M{"$unwind": "$msgs"},
+// bson.M{"$match": filter},
+// bson.M{
+// "$group": bson.M{
+// "_id": "$_id",
+// "doc_id": bson.M{
+// "$first": "$doc_id",
+// },
+// "msgs": bson.M{"$push": "$msgs"},
+// },
+// },
+// )
+// if !content {
+// pipeline = append(pipeline,
+// bson.M{
+// "$project": bson.M{
+// "_id": 1,
+// "count": bson.M{"$size": "$msgs"},
+// },
+// },
+// )
+// type result struct {
+// ID primitive.ObjectID `bson:"_id"`
+// Count int64 `bson:"count"`
+// }
+// res, err := mongoutil.Aggregate[result](ctx, m.coll, pipeline)
+// if err != nil {
+// return 0, nil, primitive.ObjectID{}, err
+// }
+// if len(res) == 0 {
+// return 0, nil, primitive.ObjectID{}, nil
+// }
+// var count int64
+// for _, r := range res {
+// count += r.Count
+// }
+// return count, nil, res[len(res)-1].ID, nil
+// }
+// type result struct {
+// ID primitive.ObjectID `bson:"_id"`
+// Msg []*model.MsgInfoModel `bson:"msgs"`
+// }
+// res, err := mongoutil.Aggregate[result](ctx, m.coll, pipeline)
+// if err != nil {
+// return 0, nil, primitive.ObjectID{}, err
+// }
+// if len(res) == 0 {
+// return 0, nil, primitive.ObjectID{}, err
+// }
+// var count int
+// for _, r := range res {
+// count += len(r.Msg)
+// }
+// msgs := make([]*model.MsgInfoModel, 0, count)
+// for _, r := range res {
+// msgs = append(msgs, r.Msg...)
+// }
+// return int64(count), msgs, res[len(res)-1].ID, nil
+//}
+
+/*
+
+db.msg3.aggregate(
+ [
+ {
+ "$match": {
+ "doc_id": "si_7009965934_8710838466:0"
+ },
+
+ }
+ ]
+)
+
+
+*/
+
+type searchMessageIndex struct {
+ ID primitive.ObjectID `bson:"_id"`
+ Index []int64 `bson:"index"`
+}
+
+func (m *MsgMgo) searchMessageIndex(ctx context.Context, filter any, nextID primitive.ObjectID, limit int) ([]searchMessageIndex, error) {
+ var pipeline bson.A
+ if !nextID.IsZero() {
+ pipeline = append(pipeline, bson.M{"$match": bson.M{"_id": bson.M{"$gt": nextID}}})
+ }
+ pipeline = append(pipeline,
+ bson.M{"$sort": bson.M{"_id": 1}},
+ bson.M{"$match": filter},
+ bson.M{"$limit": limit},
+ bson.M{
+ "$project": bson.M{
+ "_id": 1,
+ "msgs": bson.M{
+ "$map": bson.M{
+ "input": "$msgs",
+ "as": "msg",
+ "in": bson.M{
+ "$mergeObjects": bson.A{
+ "$$msg",
+ bson.M{
+ "_search_temp_index": bson.M{
+ "$indexOfArray": bson.A{
+ "$msgs", "$$msg",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ bson.M{"$unwind": "$msgs"},
+ bson.M{"$match": filter},
+ bson.M{
+ "$project": bson.M{
+ "_id": 1,
+ "msgs._search_temp_index": 1,
+ },
+ },
+ bson.M{
+ "$group": bson.M{
+ "_id": "$_id",
+ "index": bson.M{"$push": "$msgs._search_temp_index"},
+ },
+ },
+ bson.M{"$sort": bson.M{"_id": 1}},
+ )
+ return mongoutil.Aggregate[searchMessageIndex](ctx, m.coll, pipeline)
+}
+
+func (m *MsgMgo) searchMessage(ctx context.Context, req *msg.SearchMessageReq) (int64, []searchMessageIndex, error) {
+ filter := bson.M{}
if req.RecvID != "" {
- where = append(where, bson.M{"msgs.msg.recv_id": req.RecvID})
+ filter["$or"] = bson.A{
+ bson.M{"msgs.msg.recv_id": req.RecvID},
+ bson.M{"msgs.msg.group_id": req.RecvID},
+ }
}
if req.SendID != "" {
- where = append(where, bson.M{"msgs.msg.send_id": req.SendID})
+ filter["msgs.msg.send_id"] = req.SendID
}
if req.ContentType != 0 {
- where = append(where, bson.M{"msgs.msg.content_type": req.ContentType})
+ filter["msgs.msg.content_type"] = req.ContentType
}
if req.SessionType != 0 {
- where = append(where, bson.M{"msgs.msg.session_type": req.SessionType})
+ filter["msgs.msg.session_type"] = req.SessionType
}
if req.SendTime != "" {
sendTime, err := time.Parse(time.DateOnly, req.SendTime)
if err != nil {
return 0, nil, errs.ErrArgs.WrapMsg("invalid sendTime", "req", req.SendTime, "format", time.DateOnly, "cause", err.Error())
}
- where = append(where,
- bson.M{
- "msgs.msg.send_time": bson.M{
- "$gte": sendTime.UnixMilli(),
- },
- },
+ filter["$and"] = bson.A{
+ bson.M{"msgs.msg.send_time": bson.M{
+ "$gte": sendTime.UnixMilli(),
+ }},
bson.M{
"msgs.msg.send_time": bson.M{
"$lt": sendTime.Add(time.Hour * 24).UnixMilli(),
},
},
- )
- }
- pipeline := bson.A{
- bson.M{
- "$unwind": "$msgs",
- },
- }
- if len(where) > 0 {
- pipeline = append(pipeline, bson.M{
- "$match": bson.M{"$and": where},
- })
+ }
}
- pipeline = append(pipeline,
- bson.M{
- "$project": bson.M{
- "_id": 0,
- "msg": "$msgs.msg",
- },
- },
- bson.M{
- "$count": "count",
- },
+
+ var (
+ nextID primitive.ObjectID
+ count int
+ dataRange []searchMessageIndex
+ skip = int((req.Pagination.GetPageNumber() - 1) * req.Pagination.GetShowNumber())
)
- count, err := mongoutil.Aggregate[int32](ctx, m.coll, pipeline)
- if err != nil {
- return 0, nil, err
+ _, _ = dataRange, skip
+ const maxDoc = 50
+ data := make([]searchMessageIndex, 0, req.Pagination.GetShowNumber())
+ push := cap(data)
+ for i := 0; ; i++ {
+ res, err := m.searchMessageIndex(ctx, filter, nextID, maxDoc)
+ if err != nil {
+ return 0, nil, err
+ }
+ if len(res) > 0 {
+ nextID = res[len(res)-1].ID
+ }
+ for _, r := range res {
+ var dataIndex []int64
+ for _, index := range r.Index {
+ if push > 0 && count >= skip {
+ dataIndex = append(dataIndex, index)
+ push--
+ }
+ count++
+ }
+ if len(dataIndex) > 0 {
+ data = append(data, searchMessageIndex{
+ ID: r.ID,
+ Index: dataIndex,
+ })
+ }
+ }
+ if push <= 0 {
+ push--
+ }
+ if len(res) < maxDoc || push < -10 {
+ return int64(count), data, nil
+ }
}
- if len(count) == 0 || count[0] == 0 {
- return 0, nil, nil
+}
+
+func (m *MsgMgo) getDocRange(ctx context.Context, id primitive.ObjectID, index []int64) ([]*model.MsgInfoModel, error) {
+ if len(index) == 0 {
+ return nil, nil
+ }
+
+ pipeline := bson.A{
+ bson.M{"$match": bson.M{"_id": id}},
+ bson.M{"$project": "$msgs"},
}
- pipeline = pipeline[:len(pipeline)-1]
- pipeline = append(pipeline,
- bson.M{
- "$skip": (req.Pagination.GetPageNumber() - 1) * req.Pagination.GetShowNumber(),
- },
- bson.M{
- "$limit": req.Pagination.GetShowNumber(),
- },
- )
msgs, err := mongoutil.Aggregate[*model.MsgInfoModel](ctx, m.coll, pipeline)
+ if err != nil {
+ return nil, err
+ }
+ return msgs, nil
+}
+
+func (m *MsgMgo) SearchMessage(ctx context.Context, req *msg.SearchMessageReq) (int64, []*model.MsgInfoModel, error) {
+ count, data, err := m.searchMessage(ctx, req)
if err != nil {
return 0, nil, err
}
- for i := range msgs {
- msgInfo := msgs[i]
- if msgInfo == nil || msgInfo.Msg == nil {
- continue
+ var msgs []*model.MsgInfoModel
+ if len(data) > 0 {
+ var n int
+ for _, d := range data {
+ n += len(d.Index)
}
- if msgInfo.Revoke != nil {
- revokeContent := sdkws.MessageRevokedContent{
- RevokerID: msgInfo.Revoke.UserID,
- RevokerRole: msgInfo.Revoke.Role,
- ClientMsgID: msgInfo.Msg.ClientMsgID,
- RevokerNickname: msgInfo.Revoke.Nickname,
- RevokeTime: msgInfo.Revoke.Time,
- SourceMessageSendTime: msgInfo.Msg.SendTime,
- SourceMessageSendID: msgInfo.Msg.SendID,
- SourceMessageSenderNickname: msgInfo.Msg.SenderNickname,
- SessionType: msgInfo.Msg.SessionType,
- Seq: msgInfo.Msg.Seq,
- Ex: msgInfo.Msg.Ex,
- }
- data, err := jsonutil.JsonMarshal(&revokeContent)
- if err != nil {
- return 0, nil, errs.WrapMsg(err, "json.Marshal revokeContent")
- }
- elem := sdkws.NotificationElem{Detail: string(data)}
- content, err := jsonutil.JsonMarshal(&elem)
- if err != nil {
- return 0, nil, errs.WrapMsg(err, "json.Marshal elem")
+ msgs = make([]*model.MsgInfoModel, 0, n)
+ }
+ for _, val := range data {
+ res, err := mongoutil.FindOne[*model.MsgDocModel](ctx, m.coll, bson.M{"_id": val.ID})
+ if err != nil {
+ return 0, nil, err
+ }
+ for _, i := range val.Index {
+ if i >= int64(len(res.Msg)) {
+ continue
}
- msgInfo.Msg.ContentType = constant.MsgRevokeNotification
- msgInfo.Msg.Content = string(content)
+ msgs = append(msgs, res.Msg[i])
}
- msgs = append(msgs, msgInfo)
- }
- //start := (req.Pagination.PageNumber - 1) * req.Pagination.ShowNumber
- //n := int32(len(msgs))
- //if start >= n {
- // return n, []*relation.MsgInfoModel{}, nil
- //}
- //if start+req.Pagination.ShowNumber < n {
- // msgs = msgs[start : start+req.Pagination.ShowNumber]
- //} else {
- // msgs = msgs[start:]
- //}
- return count[0], msgs, nil
+ }
+ return count, msgs, nil
}
+//func (m *MsgMgo) SearchMessage(ctx context.Context, req *msg.SearchMessageReq) (int32, []*model.MsgInfoModel, error) {
+// where := make(bson.A, 0, 6)
+// if req.RecvID != "" {
+// if req.SessionType == constant.ReadGroupChatType {
+// where = append(where, bson.M{
+// "$or": bson.A{
+// bson.M{"doc_id": "^n_" + req.RecvID + ":"},
+// bson.M{"doc_id": "^sg_" + req.RecvID + ":"},
+// },
+// })
+// } else {
+// where = append(where, bson.M{"msgs.msg.recv_id": req.RecvID})
+// }
+// }
+// if req.SendID != "" {
+// where = append(where, bson.M{"msgs.msg.send_id": req.SendID})
+// }
+// if req.ContentType != 0 {
+// where = append(where, bson.M{"msgs.msg.content_type": req.ContentType})
+// }
+// if req.SessionType != 0 {
+// where = append(where, bson.M{"msgs.msg.session_type": req.SessionType})
+// }
+// if req.SendTime != "" {
+// sendTime, err := time.Parse(time.DateOnly, req.SendTime)
+// if err != nil {
+// return 0, nil, errs.ErrArgs.WrapMsg("invalid sendTime", "req", req.SendTime, "format", time.DateOnly, "cause", err.Error())
+// }
+// where = append(where,
+// bson.M{
+// "msgs.msg.send_time": bson.M{
+// "$gte": sendTime.UnixMilli(),
+// },
+// },
+// bson.M{
+// "msgs.msg.send_time": bson.M{
+// "$lt": sendTime.Add(time.Hour * 24).UnixMilli(),
+// },
+// },
+// )
+// }
+// opt := options.Find().SetLimit(100)
+// res, err := mongoutil.Find[model.MsgDocModel](ctx, m.coll, bson.M{"$and": where}, opt)
+// if err != nil {
+// return 0, nil, err
+// }
+// _ = res
+// fmt.Println()
+//
+// return 0, nil, nil
+// pipeline := bson.A{
+// bson.M{
+// "$unwind": "$msgs",
+// },
+// }
+// if len(where) > 0 {
+// pipeline = append(pipeline, bson.M{
+// "$match": bson.M{"$and": where},
+// })
+// }
+// pipeline = append(pipeline,
+// bson.M{
+// "$project": bson.M{
+// "_id": 0,
+// "msg": "$msgs.msg",
+// },
+// },
+// bson.M{
+// "$count": "count",
+// },
+// )
+// //count, err := mongoutil.Aggregate[int32](ctx, m.coll, pipeline)
+// //if err != nil {
+// // return 0, nil, err
+// //}
+// //if len(count) == 0 || count[0] == 0 {
+// // return 0, nil, nil
+// //}
+// count := []int32{0}
+// pipeline = pipeline[:len(pipeline)-1]
+// pipeline = append(pipeline,
+// bson.M{
+// "$skip": (req.Pagination.GetPageNumber() - 1) * req.Pagination.GetShowNumber(),
+// },
+// bson.M{
+// "$limit": req.Pagination.GetShowNumber(),
+// },
+// )
+// msgs, err := mongoutil.Aggregate[*model.MsgInfoModel](ctx, m.coll, pipeline)
+// if err != nil {
+// return 0, nil, err
+// }
+// for i := range msgs {
+// msgInfo := msgs[i]
+// if msgInfo == nil || msgInfo.Msg == nil {
+// continue
+// }
+// if msgInfo.Revoke != nil {
+// revokeContent := sdkws.MessageRevokedContent{
+// RevokerID: msgInfo.Revoke.UserID,
+// RevokerRole: msgInfo.Revoke.Role,
+// ClientMsgID: msgInfo.Msg.ClientMsgID,
+// RevokerNickname: msgInfo.Revoke.Nickname,
+// RevokeTime: msgInfo.Revoke.Time,
+// SourceMessageSendTime: msgInfo.Msg.SendTime,
+// SourceMessageSendID: msgInfo.Msg.SendID,
+// SourceMessageSenderNickname: msgInfo.Msg.SenderNickname,
+// SessionType: msgInfo.Msg.SessionType,
+// Seq: msgInfo.Msg.Seq,
+// Ex: msgInfo.Msg.Ex,
+// }
+// data, err := jsonutil.JsonMarshal(&revokeContent)
+// if err != nil {
+// return 0, nil, errs.WrapMsg(err, "json.Marshal revokeContent")
+// }
+// elem := sdkws.NotificationElem{Detail: string(data)}
+// content, err := jsonutil.JsonMarshal(&elem)
+// if err != nil {
+// return 0, nil, errs.WrapMsg(err, "json.Marshal elem")
+// }
+// msgInfo.Msg.ContentType = constant.MsgRevokeNotification
+// msgInfo.Msg.Content = string(content)
+// }
+// }
+// //start := (req.Pagination.PageNumber - 1) * req.Pagination.ShowNumber
+// //n := int32(len(msgs))
+// //if start >= n {
+// // return n, []*relation.MsgInfoModel{}, nil
+// //}
+// //if start+req.Pagination.ShowNumber < n {
+// // msgs = msgs[start : start+req.Pagination.ShowNumber]
+// //} else {
+// // msgs = msgs[start:]
+// //}
+// return count[0], msgs, nil
+//}
+
func (m *MsgMgo) RangeUserSendCount(ctx context.Context, start time.Time, end time.Time, group bool, ase bool, pageNumber int32, showNumber int32) (msgCount int64, userCount int64, users []*model.UserCount, dateCount map[string]int64, err error) {
var sort int
if ase {
diff --git a/pkg/common/storage/database/mgo/msg_test.go b/pkg/common/storage/database/mgo/msg_test.go
new file mode 100644
index 000000000..5aed4dc51
--- /dev/null
+++ b/pkg/common/storage/database/mgo/msg_test.go
@@ -0,0 +1,75 @@
+package mgo
+
+import (
+ "context"
+ "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
+ "github.com/openimsdk/protocol/msg"
+ "github.com/openimsdk/protocol/sdkws"
+ "github.com/openimsdk/tools/db/mongoutil"
+ "go.mongodb.org/mongo-driver/bson"
+ "go.mongodb.org/mongo-driver/mongo"
+ "go.mongodb.org/mongo-driver/mongo/options"
+ "math/rand"
+ "strconv"
+ "testing"
+ "time"
+)
+
+func TestName1(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second*300)
+ defer cancel()
+ cli := Result(mongo.Connect(ctx, options.Client().ApplyURI("mongodb://openIM:openIM123@172.16.8.48:37017/openim_v3?maxPoolSize=100").SetConnectTimeout(5*time.Second)))
+
+ v := &MsgMgo{
+ coll: cli.Database("openim_v3").Collection("msg3"),
+ }
+
+ req := &msg.SearchMessageReq{
+ //RecvID: "3187706596",
+ //SendID: "7009965934",
+ ContentType: 101,
+ //SendTime: "2024-05-06",
+ //SessionType: 3,
+ Pagination: &sdkws.RequestPagination{
+ PageNumber: 1,
+ ShowNumber: 10,
+ },
+ }
+ total, res, err := v.SearchMessage(ctx, req)
+ if err != nil {
+ panic(err)
+ }
+
+ for i, re := range res {
+ t.Logf("%d => %d | %+v", i+1, re.Msg.Seq, re.Msg.Content)
+ }
+
+ t.Log(total)
+}
+
+func TestName10(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
+ defer cancel()
+ cli := Result(mongo.Connect(ctx, options.Client().ApplyURI("mongodb://openIM:openIM123@172.16.8.48:37017/openim_v3?maxPoolSize=100").SetConnectTimeout(5*time.Second)))
+
+ v := &MsgMgo{
+ coll: cli.Database("openim_v3").Collection("msg3"),
+ }
+ opt := options.Find().SetLimit(1000)
+
+ res, err := mongoutil.Find[model.MsgDocModel](ctx, v.coll, bson.M{}, opt)
+ if err != nil {
+ panic(err)
+ }
+ ctx = context.Background()
+ for i := 0; i < 100000; i++ {
+ for j := range res {
+ res[j].DocID = strconv.FormatUint(rand.Uint64(), 10) + ":0"
+ }
+ if err := mongoutil.InsertMany(ctx, v.coll, res); err != nil {
+ panic(err)
+ }
+ t.Log("====>", time.Now(), i)
+ }
+
+}
diff --git a/pkg/common/storage/database/mgo/object.go b/pkg/common/storage/database/mgo/object.go
index 8ed7b3a56..4242fbb53 100644
--- a/pkg/common/storage/database/mgo/object.go
+++ b/pkg/common/storage/database/mgo/object.go
@@ -16,10 +16,13 @@ package mgo
import (
"context"
+ "time"
+
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
"github.com/openimsdk/tools/db/mongoutil"
+ "github.com/openimsdk/tools/db/pagination"
"github.com/openimsdk/tools/errs"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
@@ -27,7 +30,7 @@ import (
)
func NewS3Mongo(db *mongo.Database) (database.ObjectInfo, error) {
- coll := db.Collection("s3")
+ coll := db.Collection(database.ObjectName)
_, err := coll.Indexes().CreateOne(context.Background(), mongo.IndexModel{
Keys: bson.D{
{Key: "name", Value: 1},
@@ -68,3 +71,14 @@ func (o *S3Mongo) Take(ctx context.Context, engine string, name string) (*model.
func (o *S3Mongo) Delete(ctx context.Context, engine string, name string) error {
return mongoutil.DeleteOne(ctx, o.coll, bson.M{"name": name, "engine": engine})
}
+func (o *S3Mongo) FindByExpires(ctx context.Context, duration time.Time, pagination pagination.Pagination) (total int64, objects []*model.Object, err error) {
+ return mongoutil.FindPage[*model.Object](ctx, o.coll, bson.M{
+ "create_time": bson.M{"$lt": duration},
+ }, pagination)
+}
+func (o *S3Mongo) FindNotDelByS3(ctx context.Context, key string, duration time.Time) (int64, error) {
+ return mongoutil.Count(ctx, o.coll, bson.M{
+ "key": key,
+ "create_time": bson.M{"$gt": duration},
+ })
+}
diff --git a/pkg/common/storage/database/mgo/seq_conversation.go b/pkg/common/storage/database/mgo/seq_conversation.go
new file mode 100644
index 000000000..7971b7e1a
--- /dev/null
+++ b/pkg/common/storage/database/mgo/seq_conversation.go
@@ -0,0 +1,103 @@
+package mgo
+
+import (
+ "context"
+ "errors"
+ "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
+ "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
+ "github.com/openimsdk/tools/db/mongoutil"
+ "go.mongodb.org/mongo-driver/bson"
+ "go.mongodb.org/mongo-driver/mongo"
+ "go.mongodb.org/mongo-driver/mongo/options"
+)
+
+func NewSeqConversationMongo(db *mongo.Database) (database.SeqConversation, error) {
+ coll := db.Collection(database.SeqConversationName)
+ _, err := coll.Indexes().CreateOne(context.Background(), mongo.IndexModel{
+ Keys: bson.D{
+ {Key: "conversation_id", Value: 1},
+ },
+ })
+ if err != nil {
+ return nil, err
+ }
+ return &seqConversationMongo{coll: coll}, nil
+}
+
+type seqConversationMongo struct {
+ coll *mongo.Collection
+}
+
+func (s *seqConversationMongo) setSeq(ctx context.Context, conversationID string, seq int64, field string) error {
+ filter := map[string]any{
+ "conversation_id": conversationID,
+ }
+ insert := bson.M{
+ "conversation_id": conversationID,
+ "min_seq": 0,
+ "max_seq": 0,
+ }
+ delete(insert, field)
+ update := map[string]any{
+ "$set": bson.M{
+ field: seq,
+ },
+ "$setOnInsert": insert,
+ }
+ opt := options.Update().SetUpsert(true)
+ return mongoutil.UpdateOne(ctx, s.coll, filter, update, false, opt)
+}
+
+func (s *seqConversationMongo) Malloc(ctx context.Context, conversationID string, size int64) (int64, error) {
+ if size < 0 {
+ return 0, errors.New("size must be greater than 0")
+ }
+ if size == 0 {
+ return s.GetMaxSeq(ctx, conversationID)
+ }
+ filter := map[string]any{"conversation_id": conversationID}
+ update := map[string]any{
+ "$inc": map[string]any{"max_seq": size},
+ "$set": map[string]any{"min_seq": int64(0)},
+ }
+ opt := options.FindOneAndUpdate().SetUpsert(true).SetReturnDocument(options.After).SetProjection(map[string]any{"_id": 0, "max_seq": 1})
+ lastSeq, err := mongoutil.FindOneAndUpdate[int64](ctx, s.coll, filter, update, opt)
+ if err != nil {
+ return 0, err
+ }
+ return lastSeq - size, nil
+}
+
+func (s *seqConversationMongo) SetMaxSeq(ctx context.Context, conversationID string, seq int64) error {
+ return s.setSeq(ctx, conversationID, seq, "max_seq")
+}
+
+func (s *seqConversationMongo) GetMaxSeq(ctx context.Context, conversationID string) (int64, error) {
+ seq, err := mongoutil.FindOne[int64](ctx, s.coll, bson.M{"conversation_id": conversationID}, options.FindOne().SetProjection(map[string]any{"_id": 0, "max_seq": 1}))
+ if err == nil {
+ return seq, nil
+ } else if IsNotFound(err) {
+ return 0, nil
+ } else {
+ return 0, err
+ }
+}
+
+func (s *seqConversationMongo) GetMinSeq(ctx context.Context, conversationID string) (int64, error) {
+ seq, err := mongoutil.FindOne[int64](ctx, s.coll, bson.M{"conversation_id": conversationID}, options.FindOne().SetProjection(map[string]any{"_id": 0, "min_seq": 1}))
+ if err == nil {
+ return seq, nil
+ } else if IsNotFound(err) {
+ return 0, nil
+ } else {
+ return 0, err
+ }
+}
+
+func (s *seqConversationMongo) SetMinSeq(ctx context.Context, conversationID string, seq int64) error {
+ return s.setSeq(ctx, conversationID, seq, "min_seq")
+}
+
+func (s *seqConversationMongo) GetConversation(ctx context.Context, conversationID string) (*model.SeqConversation, error) {
+ return mongoutil.FindOne[*model.SeqConversation](ctx, s.coll, bson.M{"conversation_id": conversationID})
+}
diff --git a/pkg/common/storage/database/mgo/seq_conversation_test.go b/pkg/common/storage/database/mgo/seq_conversation_test.go
new file mode 100644
index 000000000..42507a693
--- /dev/null
+++ b/pkg/common/storage/database/mgo/seq_conversation_test.go
@@ -0,0 +1,42 @@
+package mgo
+
+import (
+ "context"
+ "go.mongodb.org/mongo-driver/mongo"
+ "go.mongodb.org/mongo-driver/mongo/options"
+ "testing"
+ "time"
+)
+
+func Result[V any](val V, err error) V {
+ if err != nil {
+ panic(err)
+ }
+ return val
+}
+
+func Mongodb() *mongo.Database {
+ return Result(
+ mongo.Connect(context.Background(),
+ options.Client().
+ ApplyURI("mongodb://openIM:openIM123@172.16.8.48:37017/openim_v3?maxPoolSize=100").
+ SetConnectTimeout(5*time.Second)),
+ ).Database("openim_v3")
+}
+
+func TestUserSeq(t *testing.T) {
+ uSeq := Result(NewSeqUserMongo(Mongodb())).(*seqUserMongo)
+ t.Log(uSeq.SetUserMinSeq(context.Background(), "1000", "2000", 4))
+}
+
+func TestConversationSeq(t *testing.T) {
+ cSeq := Result(NewSeqConversationMongo(Mongodb())).(*seqConversationMongo)
+ t.Log(cSeq.SetMaxSeq(context.Background(), "2000", 10))
+ t.Log(cSeq.Malloc(context.Background(), "2000", 10))
+ t.Log(cSeq.GetMaxSeq(context.Background(), "2000"))
+}
+
+func TestUserGetUserReadSeqs(t *testing.T) {
+ uSeq := Result(NewSeqUserMongo(Mongodb())).(*seqUserMongo)
+ t.Log(uSeq.GetUserReadSeqs(context.Background(), "2110910952", []string{"sg_345762580", "2000", "3000"}))
+}
diff --git a/pkg/common/storage/database/mgo/seq_user.go b/pkg/common/storage/database/mgo/seq_user.go
new file mode 100644
index 000000000..9faad416a
--- /dev/null
+++ b/pkg/common/storage/database/mgo/seq_user.go
@@ -0,0 +1,119 @@
+package mgo
+
+import (
+ "context"
+ "errors"
+ "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
+ "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
+ "github.com/openimsdk/tools/db/mongoutil"
+ "go.mongodb.org/mongo-driver/bson"
+ "go.mongodb.org/mongo-driver/mongo"
+ "go.mongodb.org/mongo-driver/mongo/options"
+)
+
+func NewSeqUserMongo(db *mongo.Database) (database.SeqUser, error) {
+ coll := db.Collection(database.SeqUserName)
+ _, err := coll.Indexes().CreateOne(context.Background(), mongo.IndexModel{
+ Keys: bson.D{
+ {Key: "user_id", Value: 1},
+ {Key: "conversation_id", Value: 1},
+ },
+ })
+ if err != nil {
+ return nil, err
+ }
+ return &seqUserMongo{coll: coll}, nil
+}
+
+type seqUserMongo struct {
+ coll *mongo.Collection
+}
+
+func (s *seqUserMongo) setSeq(ctx context.Context, conversationID string, userID string, seq int64, field string) error {
+ filter := map[string]any{
+ "user_id": userID,
+ "conversation_id": conversationID,
+ }
+ insert := bson.M{
+ "user_id": userID,
+ "conversation_id": conversationID,
+ "min_seq": 0,
+ "max_seq": 0,
+ "read_seq": 0,
+ }
+ delete(insert, field)
+ update := map[string]any{
+ "$set": bson.M{
+ field: seq,
+ },
+ "$setOnInsert": insert,
+ }
+ opt := options.Update().SetUpsert(true)
+ return mongoutil.UpdateOne(ctx, s.coll, filter, update, false, opt)
+}
+
+func (s *seqUserMongo) getSeq(ctx context.Context, conversationID string, userID string, failed string) (int64, error) {
+ filter := map[string]any{
+ "user_id": userID,
+ "conversation_id": conversationID,
+ }
+ opt := options.FindOne().SetProjection(bson.M{"_id": 0, failed: 1})
+ seq, err := mongoutil.FindOne[int64](ctx, s.coll, filter, opt)
+ if err == nil {
+ return seq, nil
+ } else if errors.Is(err, mongo.ErrNoDocuments) {
+ return 0, nil
+ } else {
+ return 0, err
+ }
+}
+
+func (s *seqUserMongo) GetUserMaxSeq(ctx context.Context, conversationID string, userID string) (int64, error) {
+ return s.getSeq(ctx, conversationID, userID, "max_seq")
+}
+
+func (s *seqUserMongo) SetUserMaxSeq(ctx context.Context, conversationID string, userID string, seq int64) error {
+ return s.setSeq(ctx, conversationID, userID, seq, "max_seq")
+}
+
+func (s *seqUserMongo) GetUserMinSeq(ctx context.Context, conversationID string, userID string) (int64, error) {
+ return s.getSeq(ctx, conversationID, userID, "min_seq")
+}
+
+func (s *seqUserMongo) SetUserMinSeq(ctx context.Context, conversationID string, userID string, seq int64) error {
+ return s.setSeq(ctx, conversationID, userID, seq, "min_seq")
+}
+
+func (s *seqUserMongo) GetUserReadSeq(ctx context.Context, conversationID string, userID string) (int64, error) {
+ return s.getSeq(ctx, conversationID, userID, "read_seq")
+}
+
+func (s *seqUserMongo) notFoundSet0(seq map[string]int64, conversationIDs []string) {
+ for _, conversationID := range conversationIDs {
+ if _, ok := seq[conversationID]; !ok {
+ seq[conversationID] = 0
+ }
+ }
+}
+
+func (s *seqUserMongo) GetUserReadSeqs(ctx context.Context, userID string, conversationID []string) (map[string]int64, error) {
+ if len(conversationID) == 0 {
+ return map[string]int64{}, nil
+ }
+ filter := bson.M{"user_id": userID, "conversation_id": bson.M{"$in": conversationID}}
+ opt := options.Find().SetProjection(bson.M{"_id": 0, "conversation_id": 1, "read_seq": 1})
+ seqs, err := mongoutil.Find[*model.SeqUser](ctx, s.coll, filter, opt)
+ if err != nil {
+ return nil, err
+ }
+ res := make(map[string]int64)
+ for _, seq := range seqs {
+ res[seq.ConversationID] = seq.ReadSeq
+ }
+ s.notFoundSet0(res, conversationID)
+ return res, nil
+}
+
+func (s *seqUserMongo) SetUserReadSeq(ctx context.Context, conversationID string, userID string, seq int64) error {
+ return s.setSeq(ctx, conversationID, userID, seq, "read_seq")
+}
diff --git a/pkg/common/storage/database/mgo/subscribe.go b/pkg/common/storage/database/mgo/subscribe.go
deleted file mode 100644
index 5b7d9786b..000000000
--- a/pkg/common/storage/database/mgo/subscribe.go
+++ /dev/null
@@ -1,189 +0,0 @@
-// Copyright © 2023 OpenIM. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mgo
-
-import (
- "context"
- "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
- "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
-
- "github.com/openimsdk/tools/errs"
- "go.mongodb.org/mongo-driver/bson"
- "go.mongodb.org/mongo-driver/mongo"
- "go.mongodb.org/mongo-driver/mongo/options"
-)
-
-// prefixes and suffixes.
-const (
- SubscriptionPrefix = "subscription_prefix"
- SubscribedPrefix = "subscribed_prefix"
-)
-
-// MaximumSubscription Maximum number of subscriptions.
-const (
- MaximumSubscription = 3000
-)
-
-func NewUserMongoDriver(database *mongo.Database) database.SubscribeUser {
- return &UserMongoDriver{
- userCollection: database.Collection(model.SubscribeUserTableName),
- }
-}
-
-type UserMongoDriver struct {
- userCollection *mongo.Collection
-}
-
-// AddSubscriptionList Subscriber's handling of thresholds.
-func (u *UserMongoDriver) AddSubscriptionList(ctx context.Context, userID string, userIDList []string) error {
- // Check the number of lists in the key.
- pipeline := mongo.Pipeline{
- {{"$match", bson.D{{"user_id", SubscriptionPrefix + userID}}}},
- {{"$project", bson.D{{"count", bson.D{{"$size", "$user_id_list"}}}}}},
- }
- // perform aggregate operations
- cursor, err := u.userCollection.Aggregate(ctx, pipeline)
- if err != nil {
- return errs.Wrap(err)
- }
- defer cursor.Close(ctx)
- var cnt struct {
- Count int `bson:"count"`
- }
- // iterate over aggregated results
- for cursor.Next(ctx) {
- err = cursor.Decode(&cnt)
- if err != nil {
- return errs.Wrap(err)
- }
- }
- var newUserIDList []string
- // If the threshold is exceeded, pop out the previous MaximumSubscription - len(userIDList) and insert it.
- if cnt.Count+len(userIDList) > MaximumSubscription {
- newUserIDList, err = u.GetAllSubscribeList(ctx, userID)
- if err != nil {
- return err
- }
- newUserIDList = newUserIDList[MaximumSubscription-len(userIDList):]
- _, err = u.userCollection.UpdateOne(
- ctx,
- bson.M{"user_id": SubscriptionPrefix + userID},
- bson.M{"$set": bson.M{"user_id_list": newUserIDList}},
- )
- if err != nil {
- return err
- }
- // Another way to subscribe to N before pop,Delete after testing
- /*for i := 1; i <= MaximumSubscription-len(userIDList); i++ {
- _, err := u.userCollection.UpdateOne(
- ctx,
- bson.M{"user_id": SubscriptionPrefix + userID},
- bson.M{SubscriptionPrefix + userID: bson.M{"$pop": -1}},
- )
- if err != nil {
- return err
- }
- }*/
- }
- upsert := true
- opts := &options.UpdateOptions{
- Upsert: &upsert,
- }
- _, err = u.userCollection.UpdateOne(
- ctx,
- bson.M{"user_id": SubscriptionPrefix + userID},
- bson.M{"$addToSet": bson.M{"user_id_list": bson.M{"$each": userIDList}}},
- opts,
- )
- if err != nil {
- return errs.Wrap(err)
- }
- for _, user := range userIDList {
- _, err = u.userCollection.UpdateOne(
- ctx,
- bson.M{"user_id": SubscribedPrefix + user},
- bson.M{"$addToSet": bson.M{"user_id_list": userID}},
- opts,
- )
- if err != nil {
- return errs.WrapMsg(err, "transaction failed")
- }
- }
- return nil
-}
-
-// UnsubscriptionList Handling of unsubscribe.
-func (u *UserMongoDriver) UnsubscriptionList(ctx context.Context, userID string, userIDList []string) error {
- _, err := u.userCollection.UpdateOne(
- ctx,
- bson.M{"user_id": SubscriptionPrefix + userID},
- bson.M{"$pull": bson.M{"user_id_list": bson.M{"$in": userIDList}}},
- )
- if err != nil {
- return errs.Wrap(err)
- }
- err = u.RemoveSubscribedListFromUser(ctx, userID, userIDList)
- if err != nil {
- return errs.Wrap(err)
- }
- return nil
-}
-
-// RemoveSubscribedListFromUser Among the unsubscribed users, delete the user from the subscribed list.
-func (u *UserMongoDriver) RemoveSubscribedListFromUser(ctx context.Context, userID string, userIDList []string) error {
- var err error
- for _, userIDTemp := range userIDList {
- _, err = u.userCollection.UpdateOne(
- ctx,
- bson.M{"user_id": SubscribedPrefix + userIDTemp},
- bson.M{"$pull": bson.M{"user_id_list": userID}},
- )
- }
- return errs.Wrap(err)
-}
-
-// GetAllSubscribeList Get all users subscribed by this user.
-func (u *UserMongoDriver) GetAllSubscribeList(ctx context.Context, userID string) (userIDList []string, err error) {
- var user model.SubscribeUser
- cursor := u.userCollection.FindOne(
- ctx,
- bson.M{"user_id": SubscriptionPrefix + userID})
- err = cursor.Decode(&user)
- if err != nil {
- if err == mongo.ErrNoDocuments {
- return []string{}, nil
- } else {
- return nil, errs.Wrap(err)
- }
- }
- return user.UserIDList, nil
-}
-
-// GetSubscribedList Get the user subscribed by those users.
-func (u *UserMongoDriver) GetSubscribedList(ctx context.Context, userID string) (userIDList []string, err error) {
- var user model.SubscribeUser
- cursor := u.userCollection.FindOne(
- ctx,
- bson.M{"user_id": SubscribedPrefix + userID})
- err = cursor.Decode(&user)
- if err != nil {
- if err == mongo.ErrNoDocuments {
- return []string{}, nil
- } else {
- return nil, errs.Wrap(err)
- }
- }
- return user.UserIDList, nil
-}
diff --git a/pkg/common/storage/database/mgo/user.go b/pkg/common/storage/database/mgo/user.go
index 96cb18882..8978e64eb 100644
--- a/pkg/common/storage/database/mgo/user.go
+++ b/pkg/common/storage/database/mgo/user.go
@@ -31,7 +31,7 @@ import (
)
func NewUserMongo(db *mongo.Database) (database.User, error) {
- coll := db.Collection("user")
+ coll := db.Collection(database.UserName)
_, err := coll.Indexes().CreateOne(context.Background(), mongo.IndexModel{
Keys: bson.D{
{Key: "user_id", Value: 1},
@@ -319,3 +319,69 @@ func (u *UserMgo) CountRangeEverydayTotal(ctx context.Context, start time.Time,
}
return res, nil
}
+
+func (u *UserMgo) SortQuery(ctx context.Context, userIDName map[string]string, asc bool) ([]*model.User, error) {
+ if len(userIDName) == 0 {
+ return nil, nil
+ }
+ userIDs := make([]string, 0, len(userIDName))
+ attached := make(map[string]string)
+ for userID, name := range userIDName {
+ userIDs = append(userIDs, userID)
+ if name == "" {
+ continue
+ }
+ attached[userID] = name
+ }
+ var sortValue int
+ if asc {
+ sortValue = 1
+ } else {
+ sortValue = -1
+ }
+ if len(attached) == 0 {
+ filter := bson.M{"user_id": bson.M{"$in": userIDs}}
+ opt := options.Find().SetSort(bson.M{"nickname": sortValue})
+ return mongoutil.Find[*model.User](ctx, u.coll, filter, opt)
+ }
+ pipeline := []bson.M{
+ {
+ "$match": bson.M{
+ "user_id": bson.M{"$in": userIDs},
+ },
+ },
+ {
+ "$addFields": bson.M{
+ "_query_sort_name": bson.M{
+ "$arrayElemAt": []any{
+ bson.M{
+ "$filter": bson.M{
+ "input": bson.M{
+ "$objectToArray": attached,
+ },
+ "as": "item",
+ "cond": bson.M{
+ "$eq": []any{"$$item.k", "$user_id"},
+ },
+ },
+ },
+ 0,
+ },
+ },
+ },
+ },
+ {
+ "$addFields": bson.M{
+ "_query_sort_name": bson.M{
+ "$ifNull": []any{"$_query_sort_name.v", "$nickname"},
+ },
+ },
+ },
+ {
+ "$sort": bson.M{
+ "_query_sort_name": sortValue,
+ },
+ },
+ }
+ return mongoutil.Aggregate[*model.User](ctx, u.coll, pipeline)
+}
diff --git a/pkg/common/storage/database/mgo/version_log.go b/pkg/common/storage/database/mgo/version_log.go
new file mode 100644
index 000000000..2c4bdef4e
--- /dev/null
+++ b/pkg/common/storage/database/mgo/version_log.go
@@ -0,0 +1,304 @@
+package mgo
+
+import (
+ "context"
+ "errors"
+ "time"
+
+ "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
+ "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
+ "github.com/openimsdk/open-im-server/v3/pkg/common/storage/versionctx"
+ "github.com/openimsdk/tools/db/mongoutil"
+ "github.com/openimsdk/tools/errs"
+ "github.com/openimsdk/tools/log"
+ "go.mongodb.org/mongo-driver/bson"
+ "go.mongodb.org/mongo-driver/bson/primitive"
+ "go.mongodb.org/mongo-driver/mongo"
+ "go.mongodb.org/mongo-driver/mongo/options"
+)
+
+func NewVersionLog(coll *mongo.Collection) (database.VersionLog, error) {
+ lm := &VersionLogMgo{coll: coll}
+ if err := lm.initIndex(context.Background()); err != nil {
+ return nil, errs.WrapMsg(err, "init version log index failed", "coll", coll.Name())
+ }
+ return lm, nil
+}
+
+type VersionLogMgo struct {
+ coll *mongo.Collection
+}
+
+func (l *VersionLogMgo) initIndex(ctx context.Context) error {
+ _, err := l.coll.Indexes().CreateOne(ctx, mongo.IndexModel{
+ Keys: bson.M{
+ "d_id": 1,
+ },
+ Options: options.Index().SetUnique(true),
+ })
+
+ return err
+}
+
+func (l *VersionLogMgo) IncrVersion(ctx context.Context, dId string, eIds []string, state int32) error {
+ _, err := l.IncrVersionResult(ctx, dId, eIds, state)
+ return err
+}
+
+func (l *VersionLogMgo) IncrVersionResult(ctx context.Context, dId string, eIds []string, state int32) (*model.VersionLog, error) {
+ vl, err := l.incrVersionResult(ctx, dId, eIds, state)
+ if err != nil {
+ return nil, err
+ }
+ versionctx.GetVersionLog(ctx).Append(versionctx.Collection{
+ Name: l.coll.Name(),
+ Doc: vl,
+ })
+ return vl, nil
+}
+
+func (l *VersionLogMgo) incrVersionResult(ctx context.Context, dId string, eIds []string, state int32) (*model.VersionLog, error) {
+ if len(eIds) == 0 {
+ return nil, errs.ErrArgs.WrapMsg("elem id is empty", "dId", dId)
+ }
+ now := time.Now()
+ if res, err := l.writeLogBatch2(ctx, dId, eIds, state, now); err == nil {
+ return res, nil
+ } else if !errors.Is(err, mongo.ErrNoDocuments) {
+ return nil, err
+ }
+ if res, err := l.initDoc(ctx, dId, eIds, state, now); err == nil {
+ return res, nil
+ } else if !mongo.IsDuplicateKeyError(err) {
+ return nil, err
+ }
+ return l.writeLogBatch2(ctx, dId, eIds, state, now)
+}
+
+func (l *VersionLogMgo) initDoc(ctx context.Context, dId string, eIds []string, state int32, now time.Time) (*model.VersionLog, error) {
+ wl := model.VersionLogTable{
+ ID: primitive.NewObjectID(),
+ DID: dId,
+ Logs: make([]model.VersionLogElem, 0, len(eIds)),
+ Version: database.FirstVersion,
+ Deleted: database.DefaultDeleteVersion,
+ LastUpdate: now,
+ }
+ for _, eId := range eIds {
+ wl.Logs = append(wl.Logs, model.VersionLogElem{
+ EID: eId,
+ State: state,
+ Version: database.FirstVersion,
+ LastUpdate: now,
+ })
+ }
+ if _, err := l.coll.InsertOne(ctx, &wl); err != nil {
+ return nil, err
+ }
+ return wl.VersionLog(), nil
+}
+
+func (l *VersionLogMgo) writeLogBatch2(ctx context.Context, dId string, eIds []string, state int32, now time.Time) (*model.VersionLog, error) {
+ if eIds == nil {
+ eIds = []string{}
+ }
+ filter := bson.M{
+ "d_id": dId,
+ }
+ elems := make([]bson.M, 0, len(eIds))
+ for _, eId := range eIds {
+ elems = append(elems, bson.M{
+ "e_id": eId,
+ "version": "$version",
+ "state": state,
+ "last_update": now,
+ })
+ }
+ pipeline := []bson.M{
+ {
+ "$addFields": bson.M{
+ "delete_e_ids": eIds,
+ },
+ },
+ {
+ "$set": bson.M{
+ "version": bson.M{"$add": []any{"$version", 1}},
+ "last_update": now,
+ },
+ },
+ {
+ "$set": bson.M{
+ "logs": bson.M{
+ "$filter": bson.M{
+ "input": "$logs",
+ "as": "log",
+ "cond": bson.M{
+ "$not": bson.M{
+ "$in": []any{"$$log.e_id", "$delete_e_ids"},
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "$set": bson.M{
+ "logs": bson.M{
+ "$concatArrays": []any{
+ "$logs",
+ elems,
+ },
+ },
+ },
+ },
+ {
+ "$unset": "delete_e_ids",
+ },
+ }
+ projection := bson.M{
+ "logs": 0,
+ }
+ opt := options.FindOneAndUpdate().SetUpsert(false).SetReturnDocument(options.After).SetProjection(projection)
+ res, err := mongoutil.FindOneAndUpdate[*model.VersionLog](ctx, l.coll, filter, pipeline, opt)
+ if err != nil {
+ return nil, err
+ }
+ res.Logs = make([]model.VersionLogElem, 0, len(eIds))
+ for _, id := range eIds {
+ res.Logs = append(res.Logs, model.VersionLogElem{
+ EID: id,
+ State: state,
+ Version: res.Version,
+ LastUpdate: res.LastUpdate,
+ })
+ }
+ return res, nil
+}
+
+func (l *VersionLogMgo) findDoc(ctx context.Context, dId string) (*model.VersionLog, error) {
+ vl, err := mongoutil.FindOne[*model.VersionLogTable](ctx, l.coll, bson.M{"d_id": dId}, options.FindOne().SetProjection(bson.M{"logs": 0}))
+ if err != nil {
+ return nil, err
+ }
+ return vl.VersionLog(), nil
+}
+
+func (l *VersionLogMgo) FindChangeLog(ctx context.Context, dId string, version uint, limit int) (*model.VersionLog, error) {
+ if wl, err := l.findChangeLog(ctx, dId, version, limit); err == nil {
+ return wl, nil
+ } else if !errors.Is(err, mongo.ErrNoDocuments) {
+ return nil, err
+ }
+ log.ZDebug(ctx, "init doc", "dId", dId)
+ if res, err := l.initDoc(ctx, dId, nil, 0, time.Now()); err == nil {
+ log.ZDebug(ctx, "init doc success", "dId", dId)
+ return res, nil
+ } else if mongo.IsDuplicateKeyError(err) {
+ return l.findChangeLog(ctx, dId, version, limit)
+ } else {
+ return nil, err
+ }
+}
+
+func (l *VersionLogMgo) BatchFindChangeLog(ctx context.Context, dIds []string, versions []uint, limits []int) (vLogs []*model.VersionLog, err error) {
+ for i := 0; i < len(dIds); i++ {
+ if vLog, err := l.findChangeLog(ctx, dIds[i], versions[i], limits[i]); err == nil {
+ vLogs = append(vLogs, vLog)
+ } else if !errors.Is(err, mongo.ErrNoDocuments) {
+ log.ZError(ctx, "findChangeLog error:", errs.Wrap(err))
+ }
+ log.ZDebug(ctx, "init doc", "dId", dIds[i])
+ if res, err := l.initDoc(ctx, dIds[i], nil, 0, time.Now()); err == nil {
+ log.ZDebug(ctx, "init doc success", "dId", dIds[i])
+ vLogs = append(vLogs, res)
+ } else if mongo.IsDuplicateKeyError(err) {
+ l.findChangeLog(ctx, dIds[i], versions[i], limits[i])
+ } else {
+ log.ZError(ctx, "init doc error:", errs.Wrap(err))
+ }
+ }
+ return vLogs, errs.Wrap(err)
+}
+
+func (l *VersionLogMgo) findChangeLog(ctx context.Context, dId string, version uint, limit int) (*model.VersionLog, error) {
+ if version == 0 && limit == 0 {
+ return l.findDoc(ctx, dId)
+ }
+ pipeline := []bson.M{
+ {
+ "$match": bson.M{
+ "d_id": dId,
+ },
+ },
+ {
+ "$addFields": bson.M{
+ "logs": bson.M{
+ "$cond": bson.M{
+ "if": bson.M{
+ "$or": []bson.M{
+ {"$lt": []any{"$version", version}},
+ {"$gte": []any{"$deleted", version}},
+ },
+ },
+ "then": []any{},
+ "else": "$logs",
+ },
+ },
+ },
+ },
+ {
+ "$addFields": bson.M{
+ "logs": bson.M{
+ "$filter": bson.M{
+ "input": "$logs",
+ "as": "l",
+ "cond": bson.M{
+ "$gt": []any{"$$l.version", version},
+ },
+ },
+ },
+ },
+ },
+ {
+ "$addFields": bson.M{
+ "log_len": bson.M{"$size": "$logs"},
+ },
+ },
+ {
+ "$addFields": bson.M{
+ "logs": bson.M{
+ "$cond": bson.M{
+ "if": bson.M{
+ "$gt": []any{"$log_len", limit},
+ },
+ "then": []any{},
+ "else": "$logs",
+ },
+ },
+ },
+ },
+ }
+ if limit <= 0 {
+ pipeline = pipeline[:len(pipeline)-1]
+ }
+ vl, err := mongoutil.Aggregate[*model.VersionLog](ctx, l.coll, pipeline)
+ if err != nil {
+ return nil, err
+ }
+ if len(vl) == 0 {
+ return nil, mongo.ErrNoDocuments
+ }
+ return vl[0], nil
+}
+
+func (l *VersionLogMgo) DeleteAfterUnchangedLog(ctx context.Context, deadline time.Time) error {
+ return mongoutil.DeleteMany(ctx, l.coll, bson.M{
+ "last_update": bson.M{
+ "$lt": deadline,
+ },
+ })
+}
+
+func (l *VersionLogMgo) Delete(ctx context.Context, dId string) error {
+ return mongoutil.DeleteOne(ctx, l.coll, bson.M{"d_id": dId})
+}
diff --git a/pkg/common/storage/database/mgo/version_test.go b/pkg/common/storage/database/mgo/version_test.go
new file mode 100644
index 000000000..4576e45bc
--- /dev/null
+++ b/pkg/common/storage/database/mgo/version_test.go
@@ -0,0 +1,39 @@
+package mgo
+
+import (
+ "context"
+ "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
+ "go.mongodb.org/mongo-driver/mongo"
+ "go.mongodb.org/mongo-driver/mongo/options"
+ "testing"
+ "time"
+)
+
+//func Result[V any](val V, err error) V {
+// if err != nil {
+// panic(err)
+// }
+// return val
+//}
+
+func Check(err error) {
+ if err != nil {
+ panic(err)
+ }
+}
+
+func TestName(t *testing.T) {
+ cli := Result(mongo.Connect(context.Background(), options.Client().ApplyURI("mongodb://openIM:openIM123@172.16.8.48:37017/openim_v3?maxPoolSize=100").SetConnectTimeout(5*time.Second)))
+ coll := cli.Database("openim_v3").Collection("version_test")
+ tmp, err := NewVersionLog(coll)
+ if err != nil {
+ panic(err)
+ }
+ vl := tmp.(*VersionLogMgo)
+ res, err := vl.incrVersionResult(context.Background(), "100", []string{"1000", "1001", "1003"}, model.VersionStateInsert)
+ if err != nil {
+ t.Log(err)
+ return
+ }
+ t.Logf("%+v", res)
+}
diff --git a/pkg/common/storage/database/msg.go b/pkg/common/storage/database/msg.go
index b402f3ac7..84f3a9e3e 100644
--- a/pkg/common/storage/database/msg.go
+++ b/pkg/common/storage/database/msg.go
@@ -37,7 +37,7 @@ type Msg interface {
GetMsgDocModelByIndex(ctx context.Context, conversationID string, index, sort int64) (*model.MsgDocModel, error)
DeleteMsgsInOneDocByIndex(ctx context.Context, docID string, indexes []int) error
MarkSingleChatMsgsAsRead(ctx context.Context, userID string, docID string, indexes []int64) error
- SearchMessage(ctx context.Context, req *msg.SearchMessageReq) (int32, []*model.MsgInfoModel, error)
+ SearchMessage(ctx context.Context, req *msg.SearchMessageReq) (int64, []*model.MsgInfoModel, error)
RangeUserSendCount(ctx context.Context, start time.Time, end time.Time, group bool, ase bool, pageNumber int32, showNumber int32) (msgCount int64, userCount int64, users []*model.UserCount, dateCount map[string]int64, err error)
RangeGroupSendCount(ctx context.Context, start time.Time, end time.Time, ase bool, pageNumber int32, showNumber int32) (msgCount int64, userCount int64, groups []*model.GroupCount, dateCount map[string]int64, err error)
ConvertMsgsDocLen(ctx context.Context, conversationIDs []string)
diff --git a/pkg/common/storage/database/name.go b/pkg/common/storage/database/name.go
new file mode 100644
index 000000000..748bd844d
--- /dev/null
+++ b/pkg/common/storage/database/name.go
@@ -0,0 +1,20 @@
+package database
+
+const (
+ BlackName = "black"
+ ConversationName = "conversation"
+ FriendName = "friend"
+ FriendVersionName = "friend_version"
+ FriendRequestName = "friend_request"
+ GroupName = "group"
+ GroupMemberName = "group_member"
+ GroupMemberVersionName = "group_member_version"
+ GroupJoinVersionName = "group_join_version"
+ ConversationVersionName = "conversation_version"
+ GroupRequestName = "group_request"
+ LogName = "log"
+ ObjectName = "s3"
+ UserName = "user"
+ SeqConversationName = "seq"
+ SeqUserName = "seq_user"
+)
diff --git a/pkg/common/storage/database/object.go b/pkg/common/storage/database/object.go
index 554f71f35..8292006a0 100644
--- a/pkg/common/storage/database/object.go
+++ b/pkg/common/storage/database/object.go
@@ -16,11 +16,16 @@ package database
import (
"context"
+ "time"
+
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
+ "github.com/openimsdk/tools/db/pagination"
)
type ObjectInfo interface {
SetObject(ctx context.Context, obj *model.Object) error
Take(ctx context.Context, engine string, name string) (*model.Object, error)
Delete(ctx context.Context, engine string, name string) error
+ FindByExpires(ctx context.Context, duration time.Time, pagination pagination.Pagination) (total int64, objects []*model.Object, err error)
+ FindNotDelByS3(ctx context.Context, key string, duration time.Time) (int64, error)
}
diff --git a/pkg/common/storage/database/seq.go b/pkg/common/storage/database/seq.go
new file mode 100644
index 000000000..cf93b795f
--- /dev/null
+++ b/pkg/common/storage/database/seq.go
@@ -0,0 +1,11 @@
+package database
+
+import "context"
+
+type SeqConversation interface {
+ Malloc(ctx context.Context, conversationID string, size int64) (int64, error)
+ GetMaxSeq(ctx context.Context, conversationID string) (int64, error)
+ SetMaxSeq(ctx context.Context, conversationID string, seq int64) error
+ GetMinSeq(ctx context.Context, conversationID string) (int64, error)
+ SetMinSeq(ctx context.Context, conversationID string, seq int64) error
+}
diff --git a/pkg/common/storage/database/seq_user.go b/pkg/common/storage/database/seq_user.go
new file mode 100644
index 000000000..9f75c710b
--- /dev/null
+++ b/pkg/common/storage/database/seq_user.go
@@ -0,0 +1,13 @@
+package database
+
+import "context"
+
+type SeqUser interface {
+ GetUserMaxSeq(ctx context.Context, conversationID string, userID string) (int64, error)
+ SetUserMaxSeq(ctx context.Context, conversationID string, userID string, seq int64) error
+ GetUserMinSeq(ctx context.Context, conversationID string, userID string) (int64, error)
+ SetUserMinSeq(ctx context.Context, conversationID string, userID string, seq int64) error
+ GetUserReadSeq(ctx context.Context, conversationID string, userID string) (int64, error)
+ SetUserReadSeq(ctx context.Context, conversationID string, userID string, seq int64) error
+ GetUserReadSeqs(ctx context.Context, userID string, conversationID []string) (map[string]int64, error)
+}
diff --git a/pkg/common/storage/database/subscribe.go b/pkg/common/storage/database/subscribe.go
deleted file mode 100644
index 5905ecd07..000000000
--- a/pkg/common/storage/database/subscribe.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright © 2023 OpenIM. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package database
-
-import "context"
-
-// SubscribeUser Operation interface of user mongodb.
-type SubscribeUser interface {
- // AddSubscriptionList Subscriber's handling of thresholds.
- AddSubscriptionList(ctx context.Context, userID string, userIDList []string) error
- // UnsubscriptionList Handling of unsubscribe.
- UnsubscriptionList(ctx context.Context, userID string, userIDList []string) error
- // RemoveSubscribedListFromUser Among the unsubscribed users, delete the user from the subscribed list.
- RemoveSubscribedListFromUser(ctx context.Context, userID string, userIDList []string) error
- // GetAllSubscribeList Get all users subscribed by this user
- GetAllSubscribeList(ctx context.Context, id string) (userIDList []string, err error)
- // GetSubscribedList Get the user subscribed by those users
- GetSubscribedList(ctx context.Context, id string) (userIDList []string, err error)
-}
diff --git a/pkg/common/storage/database/user.go b/pkg/common/storage/database/user.go
index 2e4088620..4ddc8285f 100644
--- a/pkg/common/storage/database/user.go
+++ b/pkg/common/storage/database/user.go
@@ -39,6 +39,9 @@ type User interface {
CountTotal(ctx context.Context, before *time.Time) (count int64, err error)
// Get user total quantity every day
CountRangeEverydayTotal(ctx context.Context, start time.Time, end time.Time) (map[string]int64, error)
+
+ SortQuery(ctx context.Context, userIDName map[string]string, asc bool) ([]*model.User, error)
+
// CRUD user command
AddUserCommand(ctx context.Context, userID string, Type int32, UUID string, value string, ex string) error
DeleteUserCommand(ctx context.Context, userID string, Type int32, UUID string) error
diff --git a/pkg/common/storage/database/version_log.go b/pkg/common/storage/database/version_log.go
new file mode 100644
index 000000000..28224a7c7
--- /dev/null
+++ b/pkg/common/storage/database/version_log.go
@@ -0,0 +1,21 @@
+package database
+
+import (
+ "context"
+ "time"
+
+ "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
+)
+
+const (
+ FirstVersion = 1
+ DefaultDeleteVersion = 0
+)
+
+type VersionLog interface {
+ IncrVersion(ctx context.Context, dId string, eIds []string, state int32) error
+ FindChangeLog(ctx context.Context, dId string, version uint, limit int) (*model.VersionLog, error)
+ BatchFindChangeLog(ctx context.Context, dIds []string, versions []uint, limits []int) ([]*model.VersionLog, error)
+ DeleteAfterUnchangedLog(ctx context.Context, deadline time.Time) error
+ Delete(ctx context.Context, dId string) error
+}
diff --git a/pkg/common/storage/model/friend.go b/pkg/common/storage/model/friend.go
index 60a40d9c2..abcca2f2b 100644
--- a/pkg/common/storage/model/friend.go
+++ b/pkg/common/storage/model/friend.go
@@ -15,17 +15,19 @@
package model
import (
+ "go.mongodb.org/mongo-driver/bson/primitive"
"time"
)
// Friend represents the data structure for a friend relationship in MongoDB.
type Friend struct {
- OwnerUserID string `bson:"owner_user_id"`
- FriendUserID string `bson:"friend_user_id"`
- Remark string `bson:"remark"`
- CreateTime time.Time `bson:"create_time"`
- AddSource int32 `bson:"add_source"`
- OperatorUserID string `bson:"operator_user_id"`
- Ex string `bson:"ex"`
- IsPinned bool `bson:"is_pinned"`
+ ID primitive.ObjectID `bson:"_id"`
+ OwnerUserID string `bson:"owner_user_id"`
+ FriendUserID string `bson:"friend_user_id"`
+ Remark string `bson:"remark"`
+ CreateTime time.Time `bson:"create_time"`
+ AddSource int32 `bson:"add_source"`
+ OperatorUserID string `bson:"operator_user_id"`
+ Ex string `bson:"ex"`
+ IsPinned bool `bson:"is_pinned"`
}
diff --git a/pkg/common/storage/model/seq.go b/pkg/common/storage/model/seq.go
new file mode 100644
index 000000000..1dc75eff1
--- /dev/null
+++ b/pkg/common/storage/model/seq.go
@@ -0,0 +1,7 @@
+package model
+
+type SeqConversation struct {
+ ConversationID string `bson:"conversation_id"`
+ MaxSeq int64 `bson:"max_seq"`
+ MinSeq int64 `bson:"min_seq"`
+}
diff --git a/pkg/common/storage/model/seq_user.go b/pkg/common/storage/model/seq_user.go
new file mode 100644
index 000000000..845996bb8
--- /dev/null
+++ b/pkg/common/storage/model/seq_user.go
@@ -0,0 +1,9 @@
+package model
+
+type SeqUser struct {
+ UserID string `bson:"user_id"`
+ ConversationID string `bson:"conversation_id"`
+ MinSeq int64 `bson:"min_seq"`
+ MaxSeq int64 `bson:"max_seq"`
+ ReadSeq int64 `bson:"read_seq"`
+}
diff --git a/pkg/common/storage/model/user.go b/pkg/common/storage/model/user.go
index c6a4f952c..f64d09e79 100644
--- a/pkg/common/storage/model/user.go
+++ b/pkg/common/storage/model/user.go
@@ -36,10 +36,10 @@ func (u *User) GetFaceURL() string {
return u.FaceURL
}
-func (u User) GetUserID() string {
+func (u *User) GetUserID() string {
return u.UserID
}
-func (u User) GetEx() string {
+func (u *User) GetEx() string {
return u.Ex
}
diff --git a/pkg/common/storage/model/version_log.go b/pkg/common/storage/model/version_log.go
new file mode 100644
index 000000000..6ed8d30f2
--- /dev/null
+++ b/pkg/common/storage/model/version_log.go
@@ -0,0 +1,74 @@
+package model
+
+import (
+ "context"
+ "errors"
+ "github.com/openimsdk/tools/log"
+ "go.mongodb.org/mongo-driver/bson/primitive"
+ "time"
+)
+
+const (
+ VersionStateInsert = iota + 1
+ VersionStateDelete
+ VersionStateUpdate
+)
+
+const (
+ VersionGroupChangeID = ""
+ VersionSortChangeID = "____S_O_R_T_I_D____"
+)
+
+type VersionLogElem struct {
+ EID string `bson:"e_id"`
+ State int32 `bson:"state"`
+ Version uint `bson:"version"`
+ LastUpdate time.Time `bson:"last_update"`
+}
+
+type VersionLogTable struct {
+ ID primitive.ObjectID `bson:"_id"`
+ DID string `bson:"d_id"`
+ Logs []VersionLogElem `bson:"logs"`
+ Version uint `bson:"version"`
+ Deleted uint `bson:"deleted"`
+ LastUpdate time.Time `bson:"last_update"`
+}
+
+func (v *VersionLogTable) VersionLog() *VersionLog {
+ return &VersionLog{
+ ID: v.ID,
+ DID: v.DID,
+ Logs: v.Logs,
+ Version: v.Version,
+ Deleted: v.Deleted,
+ LastUpdate: v.LastUpdate,
+ LogLen: len(v.Logs),
+ }
+}
+
+type VersionLog struct {
+ ID primitive.ObjectID `bson:"_id"`
+ DID string `bson:"d_id"`
+ Logs []VersionLogElem `bson:"logs"`
+ Version uint `bson:"version"`
+ Deleted uint `bson:"deleted"`
+ LastUpdate time.Time `bson:"last_update"`
+ LogLen int `bson:"log_len"`
+}
+
+func (v *VersionLog) DeleteAndChangeIDs() (insertIds, deleteIds, updateIds []string) {
+ for _, l := range v.Logs {
+ switch l.State {
+ case VersionStateInsert:
+ insertIds = append(insertIds, l.EID)
+ case VersionStateDelete:
+ deleteIds = append(deleteIds, l.EID)
+ case VersionStateUpdate:
+ updateIds = append(updateIds, l.EID)
+ default:
+ log.ZError(context.Background(), "invalid version status found", errors.New("dirty database data"), "objID", v.ID.Hex(), "did", v.DID, "elem", l)
+ }
+ }
+ return
+}
diff --git a/pkg/common/storage/versionctx/rpc.go b/pkg/common/storage/versionctx/rpc.go
new file mode 100644
index 000000000..67b95aebd
--- /dev/null
+++ b/pkg/common/storage/versionctx/rpc.go
@@ -0,0 +1,14 @@
+package versionctx
+
+import (
+ "context"
+ "google.golang.org/grpc"
+)
+
+func EnableVersionCtx() grpc.ServerOption {
+ return grpc.ChainUnaryInterceptor(enableVersionCtxInterceptor)
+}
+
+func enableVersionCtxInterceptor(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) {
+ return handler(WithVersionLog(ctx), req)
+}
diff --git a/pkg/common/storage/versionctx/version.go b/pkg/common/storage/versionctx/version.go
new file mode 100644
index 000000000..5db885640
--- /dev/null
+++ b/pkg/common/storage/versionctx/version.go
@@ -0,0 +1,48 @@
+package versionctx
+
+import (
+ "context"
+ tablerelation "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
+ "sync"
+)
+
+type Collection struct {
+ Name string
+ Doc *tablerelation.VersionLog
+}
+
+type versionKey struct{}
+
+func WithVersionLog(ctx context.Context) context.Context {
+ return context.WithValue(ctx, versionKey{}, &VersionLog{})
+}
+
+func GetVersionLog(ctx context.Context) *VersionLog {
+ if v, ok := ctx.Value(versionKey{}).(*VersionLog); ok {
+ return v
+ }
+ return nil
+}
+
+type VersionLog struct {
+ lock sync.Mutex
+ data []Collection
+}
+
+func (v *VersionLog) Append(data ...Collection) {
+ if v == nil || len(data) == 0 {
+ return
+ }
+ v.lock.Lock()
+ defer v.lock.Unlock()
+ v.data = append(v.data, data...)
+}
+
+func (v *VersionLog) Get() []Collection {
+ if v == nil {
+ return nil
+ }
+ v.lock.Lock()
+ defer v.lock.Unlock()
+ return v.data
+}
diff --git a/pkg/localcache/cache.go b/pkg/localcache/cache.go
index 0e040ad38..ba849f892 100644
--- a/pkg/localcache/cache.go
+++ b/pkg/localcache/cache.go
@@ -31,6 +31,12 @@ type Cache[V any] interface {
Stop()
}
+func LRUStringHash(key string) uint64 {
+ h := fnv.New64a()
+ h.Write(*(*[]byte)(unsafe.Pointer(&key)))
+ return h.Sum64()
+}
+
func New[V any](opts ...Option) Cache[V] {
opt := defaultOption()
for _, o := range opts {
@@ -49,11 +55,7 @@ func New[V any](opts ...Option) Cache[V] {
if opt.localSlotNum == 1 {
c.local = createSimpleLRU()
} else {
- c.local = lru.NewSlotLRU[string, V](opt.localSlotNum, func(key string) uint64 {
- h := fnv.New64a()
- h.Write(*(*[]byte)(unsafe.Pointer(&key)))
- return h.Sum64()
- }, createSimpleLRU)
+ c.local = lru.NewSlotLRU[string, V](opt.localSlotNum, LRUStringHash, createSimpleLRU)
}
if opt.linkSlotNum > 0 {
c.link = link.New(opt.linkSlotNum)
diff --git a/pkg/localcache/lru/lru.go b/pkg/localcache/lru/lru.go
index 64280f238..2fedffc48 100644
--- a/pkg/localcache/lru/lru.go
+++ b/pkg/localcache/lru/lru.go
@@ -20,6 +20,7 @@ type EvictCallback[K comparable, V any] simplelru.EvictCallback[K, V]
type LRU[K comparable, V any] interface {
Get(key K, fetch func() (V, error)) (V, error)
+ SetHas(key K, value V) bool
Del(key K) bool
Stop()
}
diff --git a/pkg/localcache/lru/lru_expiration.go b/pkg/localcache/lru/lru_expiration.go
index 970ac083e..d27e67057 100644
--- a/pkg/localcache/lru/lru_expiration.go
+++ b/pkg/localcache/lru/lru_expiration.go
@@ -89,5 +89,15 @@ func (x *ExpirationLRU[K, V]) Del(key K) bool {
return ok
}
+func (x *ExpirationLRU[K, V]) SetHas(key K, value V) bool {
+ x.lock.Lock()
+ defer x.lock.Unlock()
+ if x.core.Contains(key) {
+ x.core.Add(key, &expirationLruItem[V]{value: value})
+ return true
+ }
+ return false
+}
+
func (x *ExpirationLRU[K, V]) Stop() {
}
diff --git a/pkg/localcache/lru/lru_lazy.go b/pkg/localcache/lru/lru_lazy.go
index d6e64aae4..e935c687c 100644
--- a/pkg/localcache/lru/lru_lazy.go
+++ b/pkg/localcache/lru/lru_lazy.go
@@ -88,6 +88,28 @@ func (x *LayLRU[K, V]) Get(key K, fetch func() (V, error)) (V, error) {
return v.value, v.err
}
+//func (x *LayLRU[K, V]) Set(key K, value V) {
+// x.lock.Lock()
+// x.core.Add(key, &layLruItem[V]{value: value, expires: time.Now().Add(x.successTTL).UnixMilli()})
+// x.lock.Unlock()
+//}
+//
+//func (x *LayLRU[K, V]) Has(key K) bool {
+// x.lock.Lock()
+// defer x.lock.Unlock()
+// return x.core.Contains(key)
+//}
+
+func (x *LayLRU[K, V]) SetHas(key K, value V) bool {
+ x.lock.Lock()
+ defer x.lock.Unlock()
+ if x.core.Contains(key) {
+ x.core.Add(key, &layLruItem[V]{value: value, expires: time.Now().Add(x.successTTL).UnixMilli()})
+ return true
+ }
+ return false
+}
+
func (x *LayLRU[K, V]) Del(key K) bool {
x.lock.Lock()
ok := x.core.Remove(key)
diff --git a/pkg/localcache/lru/lru_slot.go b/pkg/localcache/lru/lru_slot.go
index d034e94d3..4538ca20e 100644
--- a/pkg/localcache/lru/lru_slot.go
+++ b/pkg/localcache/lru/lru_slot.go
@@ -40,6 +40,10 @@ func (x *slotLRU[K, V]) Get(key K, fetch func() (V, error)) (V, error) {
return x.slots[x.getIndex(key)].Get(key, fetch)
}
+func (x *slotLRU[K, V]) SetHas(key K, value V) bool {
+ return x.slots[x.getIndex(key)].SetHas(key, value)
+}
+
func (x *slotLRU[K, V]) Del(key K) bool {
return x.slots[x.getIndex(key)].Del(key)
}
diff --git a/pkg/localcache/option.go b/pkg/localcache/option.go
index 00bb9d044..7d91aba6c 100644
--- a/pkg/localcache/option.go
+++ b/pkg/localcache/option.go
@@ -30,7 +30,7 @@ func defaultOption() *option {
localSuccessTTL: time.Minute,
localFailedTTL: time.Second * 5,
delFn: make([]func(ctx context.Context, key ...string), 0, 2),
- target: emptyTarget{},
+ target: EmptyTarget{},
}
}
@@ -123,14 +123,14 @@ func WithDeleteKeyBefore(fn func(ctx context.Context, key ...string)) Option {
}
}
-type emptyTarget struct{}
+type EmptyTarget struct{}
-func (e emptyTarget) IncrGetHit() {}
+func (e EmptyTarget) IncrGetHit() {}
-func (e emptyTarget) IncrGetSuccess() {}
+func (e EmptyTarget) IncrGetSuccess() {}
-func (e emptyTarget) IncrGetFailed() {}
+func (e EmptyTarget) IncrGetFailed() {}
-func (e emptyTarget) IncrDelHit() {}
+func (e EmptyTarget) IncrDelHit() {}
-func (e emptyTarget) IncrDelNotFound() {}
+func (e EmptyTarget) IncrDelNotFound() {}
diff --git a/pkg/msgprocessor/conversation.go b/pkg/msgprocessor/conversation.go
index b369269cc..f8140cc7d 100644
--- a/pkg/msgprocessor/conversation.go
+++ b/pkg/msgprocessor/conversation.go
@@ -24,6 +24,10 @@ import (
"google.golang.org/protobuf/proto"
)
+func IsGroupConversationID(conversationID string) bool {
+ return strings.HasPrefix(conversationID, "g_") || strings.HasPrefix(conversationID, "sg_")
+}
+
func GetNotificationConversationIDByMsg(msg *sdkws.MsgData) string {
switch msg.SessionType {
case constant.SingleChatType:
diff --git a/pkg/rpccache/conversation.go b/pkg/rpccache/conversation.go
index 4c00dd1f7..0109f1b1d 100644
--- a/pkg/rpccache/conversation.go
+++ b/pkg/rpccache/conversation.go
@@ -16,15 +16,19 @@ package rpccache
import (
"context"
- "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
-
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
+ "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
"github.com/openimsdk/open-im-server/v3/pkg/localcache"
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
pbconversation "github.com/openimsdk/protocol/conversation"
"github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log"
"github.com/redis/go-redis/v9"
+ "golang.org/x/sync/errgroup"
+)
+
+const (
+ conversationWorkerCount = 20
)
func NewConversationLocalCache(client rpcclient.ConversationRpcClient, localCache *config.LocalCache, cli redis.UniversalClient) *ConversationLocalCache {
@@ -90,15 +94,33 @@ func (c *ConversationLocalCache) GetSingleConversationRecvMsgOpt(ctx context.Con
}
func (c *ConversationLocalCache) GetConversations(ctx context.Context, ownerUserID string, conversationIDs []string) ([]*pbconversation.Conversation, error) {
- conversations := make([]*pbconversation.Conversation, 0, len(conversationIDs))
+ var (
+ conversations = make([]*pbconversation.Conversation, 0, len(conversationIDs))
+ conversationsChan = make(chan *pbconversation.Conversation, len(conversationIDs))
+ )
+
+ g, ctx := errgroup.WithContext(ctx)
+ g.SetLimit(conversationWorkerCount)
+
for _, conversationID := range conversationIDs {
- conversation, err := c.GetConversation(ctx, ownerUserID, conversationID)
- if err != nil {
- if errs.ErrRecordNotFound.Is(err) {
- continue
+ conversationID := conversationID
+ g.Go(func() error {
+ conversation, err := c.GetConversation(ctx, ownerUserID, conversationID)
+ if err != nil {
+ if errs.ErrRecordNotFound.Is(err) {
+ return nil
+ }
+ return err
}
- return nil, err
- }
+ conversationsChan <- conversation
+ return nil
+ })
+ }
+ if err := g.Wait(); err != nil {
+ return nil, err
+ }
+ close(conversationsChan)
+ for conversation := range conversationsChan {
conversations = append(conversations, conversation)
}
return conversations, nil
diff --git a/pkg/rpccache/online.go b/pkg/rpccache/online.go
new file mode 100644
index 000000000..5db68d198
--- /dev/null
+++ b/pkg/rpccache/online.go
@@ -0,0 +1,100 @@
+package rpccache
+
+import (
+ "context"
+ "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
+ "github.com/openimsdk/open-im-server/v3/pkg/localcache"
+ "github.com/openimsdk/open-im-server/v3/pkg/localcache/lru"
+ "github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
+ "github.com/openimsdk/open-im-server/v3/pkg/util/useronline"
+ "github.com/openimsdk/tools/log"
+ "github.com/openimsdk/tools/mcontext"
+ "github.com/redis/go-redis/v9"
+ "math/rand"
+ "strconv"
+ "time"
+)
+
+func NewOnlineCache(user rpcclient.UserRpcClient, group *GroupLocalCache, rdb redis.UniversalClient, fn func(ctx context.Context, userID string, platformIDs []int32)) *OnlineCache {
+ x := &OnlineCache{
+ user: user,
+ group: group,
+ local: lru.NewSlotLRU(1024, localcache.LRUStringHash, func() lru.LRU[string, []int32] {
+ return lru.NewLayLRU[string, []int32](2048, cachekey.OnlineExpire/2, time.Second*3, localcache.EmptyTarget{}, func(key string, value []int32) {})
+ }),
+ }
+ go func() {
+ ctx := mcontext.SetOperationID(context.Background(), cachekey.OnlineChannel+strconv.FormatUint(rand.Uint64(), 10))
+ for message := range rdb.Subscribe(ctx, cachekey.OnlineChannel).Channel() {
+ userID, platformIDs, err := useronline.ParseUserOnlineStatus(message.Payload)
+ if err != nil {
+ log.ZError(ctx, "OnlineCache redis subscribe parseUserOnlineStatus", err, "payload", message.Payload, "channel", message.Channel)
+ continue
+ }
+ storageCache := x.setUserOnline(userID, platformIDs)
+ log.ZDebug(ctx, "OnlineCache setUserOnline", "userID", userID, "platformIDs", platformIDs, "payload", message.Payload, "storageCache", storageCache)
+ if fn != nil {
+ fn(ctx, userID, platformIDs)
+ }
+ }
+ }()
+ return x
+}
+
+type OnlineCache struct {
+ user rpcclient.UserRpcClient
+ group *GroupLocalCache
+ local lru.LRU[string, []int32]
+}
+
+func (o *OnlineCache) GetUserOnlinePlatform(ctx context.Context, userID string) ([]int32, error) {
+ return o.local.Get(userID, func() ([]int32, error) {
+ return o.user.GetUserOnlinePlatform(ctx, userID)
+ })
+}
+
+func (o *OnlineCache) GetUserOnline(ctx context.Context, userID string) (bool, error) {
+ platformIDs, err := o.GetUserOnlinePlatform(ctx, userID)
+ if err != nil {
+ return false, err
+ }
+ return len(platformIDs) > 0, nil
+}
+
+func (o *OnlineCache) GetUsersOnline(ctx context.Context, userIDs []string) ([]string, error) {
+ onlineUserIDs := make([]string, 0, len(userIDs))
+ for _, userID := range userIDs {
+ online, err := o.GetUserOnline(ctx, userID)
+ if err != nil {
+ return nil, err
+ }
+ if online {
+ onlineUserIDs = append(onlineUserIDs, userID)
+ }
+ }
+ log.ZDebug(ctx, "OnlineCache GetUsersOnline", "userIDs", userIDs, "onlineUserIDs", onlineUserIDs)
+ return onlineUserIDs, nil
+}
+
+func (o *OnlineCache) GetGroupOnline(ctx context.Context, groupID string) ([]string, error) {
+ userIDs, err := o.group.GetGroupMemberIDs(ctx, groupID)
+ if err != nil {
+ return nil, err
+ }
+ var onlineUserIDs []string
+ for _, userID := range userIDs {
+ online, err := o.GetUserOnline(ctx, userID)
+ if err != nil {
+ return nil, err
+ }
+ if online {
+ onlineUserIDs = append(onlineUserIDs, userID)
+ }
+ }
+ log.ZDebug(ctx, "OnlineCache GetGroupOnline", "groupID", groupID, "onlineUserIDs", onlineUserIDs, "allUserID", userIDs)
+ return onlineUserIDs, nil
+}
+
+func (o *OnlineCache) setUserOnline(userID string, platformIDs []int32) bool {
+ return o.local.SetHas(userID, platformIDs)
+}
diff --git a/pkg/rpccache/user.go b/pkg/rpccache/user.go
index 25a8eb20d..6126f5891 100644
--- a/pkg/rpccache/user.go
+++ b/pkg/rpccache/user.go
@@ -110,3 +110,18 @@ func (u *UserLocalCache) GetUsersInfoMap(ctx context.Context, userIDs []string)
}
return users, nil
}
+
+//func (u *UserLocalCache) GetUserOnlinePlatform(ctx context.Context, userID string) (val []int32, err error) {
+// log.ZDebug(ctx, "UserLocalCache GetUserOnlinePlatform req", "userID", userID)
+// defer func() {
+// if err == nil {
+// log.ZDebug(ctx, "UserLocalCache GetUserOnlinePlatform return", "value", val)
+// } else {
+// log.ZError(ctx, "UserLocalCache GetUserOnlinePlatform return", err)
+// }
+// }()
+// return localcache.AnyValue[[]int32](u.local.Get(ctx, cachekey.GetOnlineKey(userID), func(ctx context.Context) (any, error) {
+// log.ZDebug(ctx, "UserLocalCache GetUserGlobalMsgRecvOpt rpc", "userID", userID)
+// return u.client.GetUserGlobalMsgRecvOpt(ctx, userID)
+// }))
+//}
diff --git a/pkg/rpcclient/friend.go b/pkg/rpcclient/friend.go
index 5543afe4f..fd00be329 100644
--- a/pkg/rpcclient/friend.go
+++ b/pkg/rpcclient/friend.go
@@ -17,7 +17,7 @@ package rpcclient
import (
"context"
- "github.com/openimsdk/protocol/friend"
+ "github.com/openimsdk/protocol/relation"
sdkws "github.com/openimsdk/protocol/sdkws"
"github.com/openimsdk/tools/discovery"
"github.com/openimsdk/tools/system/program"
@@ -26,7 +26,7 @@ import (
type Friend struct {
conn grpc.ClientConnInterface
- Client friend.FriendClient
+ Client relation.FriendClient
discov discovery.SvcDiscoveryRegistry
}
@@ -35,7 +35,7 @@ func NewFriend(discov discovery.SvcDiscoveryRegistry, rpcRegisterName string) *F
if err != nil {
program.ExitWithError(err)
}
- client := friend.NewFriendClient(conn)
+ client := relation.NewFriendClient(conn)
return &Friend{discov: discov, conn: conn, Client: client}
}
@@ -47,11 +47,11 @@ func NewFriendRpcClient(discov discovery.SvcDiscoveryRegistry, rpcRegisterName s
func (f *FriendRpcClient) GetFriendsInfo(
ctx context.Context,
- ownerUserID, friendUserID string,
+ ownerUserID, relationUserID string,
) (resp *sdkws.FriendInfo, err error) {
r, err := f.Client.GetDesignatedFriends(
ctx,
- &friend.GetDesignatedFriendsReq{OwnerUserID: ownerUserID, FriendUserIDs: []string{friendUserID}},
+ &relation.GetDesignatedFriendsReq{OwnerUserID: ownerUserID, FriendUserIDs: []string{relationUserID}},
)
if err != nil {
return nil, err
@@ -60,17 +60,17 @@ func (f *FriendRpcClient) GetFriendsInfo(
return
}
-// possibleFriendUserID Is PossibleFriendUserId's friends.
+// possibleFriendUserID Is PossibleFriendUserId's relations.
func (f *FriendRpcClient) IsFriend(ctx context.Context, possibleFriendUserID, userID string) (bool, error) {
- resp, err := f.Client.IsFriend(ctx, &friend.IsFriendReq{UserID1: userID, UserID2: possibleFriendUserID})
+ resp, err := f.Client.IsFriend(ctx, &relation.IsFriendReq{UserID1: userID, UserID2: possibleFriendUserID})
if err != nil {
return false, err
}
return resp.InUser1Friends, nil
}
-func (f *FriendRpcClient) GetFriendIDs(ctx context.Context, ownerUserID string) (friendIDs []string, err error) {
- req := friend.GetFriendIDsReq{UserID: ownerUserID}
+func (f *FriendRpcClient) GetFriendIDs(ctx context.Context, ownerUserID string) (relationIDs []string, err error) {
+ req := relation.GetFriendIDsReq{UserID: ownerUserID}
resp, err := f.Client.GetFriendIDs(ctx, &req)
if err != nil {
return nil, err
@@ -79,7 +79,7 @@ func (f *FriendRpcClient) GetFriendIDs(ctx context.Context, ownerUserID string)
}
func (b *FriendRpcClient) IsBlack(ctx context.Context, possibleBlackUserID, userID string) (bool, error) {
- r, err := b.Client.IsBlack(ctx, &friend.IsBlackReq{UserID1: possibleBlackUserID, UserID2: userID})
+ r, err := b.Client.IsBlack(ctx, &relation.IsBlackReq{UserID1: possibleBlackUserID, UserID2: userID})
if err != nil {
return false, err
}
diff --git a/pkg/rpcclient/third.go b/pkg/rpcclient/third.go
index 4c71dff6a..7cdc60d52 100644
--- a/pkg/rpcclient/third.go
+++ b/pkg/rpcclient/third.go
@@ -41,3 +41,7 @@ func NewThird(discov discovery.SvcDiscoveryRegistry, rpcRegisterName, grafanaUrl
}
return &Third{discov: discov, Client: client, conn: conn, GrafanaUrl: grafanaUrl}
}
+func (t *Third) DeleteOutdatedData(ctx context.Context, expires int64) error {
+ _, err := t.Client.DeleteOutdatedData(ctx, &third.DeleteOutdatedDataReq{ExpireTime: expires})
+ return err
+}
diff --git a/pkg/rpcclient/user.go b/pkg/rpcclient/user.go
index aab96603e..eabe77b94 100644
--- a/pkg/rpcclient/user.go
+++ b/pkg/rpcclient/user.go
@@ -193,3 +193,25 @@ func (u *UserRpcClient) GetNotificationByID(ctx context.Context, userID string)
})
return err
}
+
+func (u *UserRpcClient) GetUsersOnlinePlatform(ctx context.Context, userIDs []string) ([]*user.OnlineStatus, error) {
+ if len(userIDs) == 0 {
+ return nil, nil
+ }
+ resp, err := u.Client.GetUserStatus(ctx, &user.GetUserStatusReq{UserIDs: userIDs, UserID: u.imAdminUserID[0]})
+ if err != nil {
+ return nil, err
+ }
+ return resp.StatusList, nil
+}
+
+func (u *UserRpcClient) GetUserOnlinePlatform(ctx context.Context, userID string) ([]int32, error) {
+ resp, err := u.GetUsersOnlinePlatform(ctx, []string{userID})
+ if err != nil {
+ return nil, err
+ }
+ if len(resp) == 0 {
+ return nil, nil
+ }
+ return resp[0].PlatformIDs, nil
+}
diff --git a/pkg/util/hashutil/id.go b/pkg/util/hashutil/id.go
new file mode 100644
index 000000000..52e7f4c6f
--- /dev/null
+++ b/pkg/util/hashutil/id.go
@@ -0,0 +1,16 @@
+package hashutil
+
+import (
+ "crypto/md5"
+ "encoding/binary"
+ "encoding/json"
+)
+
+func IdHash(ids []string) uint64 {
+ if len(ids) == 0 {
+ return 0
+ }
+ data, _ := json.Marshal(ids)
+ sum := md5.Sum(data)
+ return binary.BigEndian.Uint64(sum[:])
+}
diff --git a/pkg/util/useronline/split.go b/pkg/util/useronline/split.go
new file mode 100644
index 000000000..c39d31d15
--- /dev/null
+++ b/pkg/util/useronline/split.go
@@ -0,0 +1,27 @@
+package useronline
+
+import (
+ "errors"
+ "strconv"
+ "strings"
+)
+
+func ParseUserOnlineStatus(payload string) (string, []int32, error) {
+ arr := strings.Split(payload, ":")
+ if len(arr) == 0 {
+ return "", nil, errors.New("invalid data")
+ }
+ userID := arr[len(arr)-1]
+ if userID == "" {
+ return "", nil, errors.New("userID is empty")
+ }
+ platformIDs := make([]int32, len(arr)-1)
+ for i := range platformIDs {
+ platformID, err := strconv.Atoi(arr[i])
+ if err != nil {
+ return "", nil, err
+ }
+ platformIDs[i] = int32(platformID)
+ }
+ return userID, platformIDs, nil
+}
diff --git a/scripts/mongo-init.sh b/scripts/mongo-init.sh
index 01199c480..25bb2d654 100755
--- a/scripts/mongo-init.sh
+++ b/scripts/mongo-init.sh
@@ -11,38 +11,56 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
mongosh < 0) {
+ try {
+ db = connect('mongodb://127.0.0.1:27017/admin');
+ var authResult = db.auth(rootUsername, rootPassword);
+ if (authResult) {
+ print('Authentication successful for root user: ' + rootUsername);
+ connected = true;
+ } else {
+ print('Authentication failed for root user: ' + rootUsername + ' with password: ' + rootPassword);
+ quit(1);
+ }
+ } catch (e) {
+ maxRetries--;
+ print('Connection failed, retrying... Remaining attempts: ' + maxRetries);
+ sleep(1000); // Sleep for 1 second
+ }
+}
+
+if (connected) {
+ db = db.getSiblingDB(dbName);
+ var createUserResult = db.createUser({
+ user: openimUsername,
+ pwd: openimPassword,
+ roles: [{
+ role: 'readWrite',
+ db: dbName
+ }]
+ });
+
+ if (createUserResult.ok == 1) {
+ print('User creation successful. User: ' + openimUsername + ', Database: ' + dbName);
+ } else {
+ print('User creation failed for user: ' + openimUsername + ' in database: ' + dbName);
+ quit(1);
+ }
} else {
- print('User creation failed for user: ' + openimUsername + ' in database: ' + dbName);
- quit(1);
+ print('Failed to connect to MongoDB after 300 retries.');
+ quit(1);
}
EOF
+
diff --git a/start-config.yml b/start-config.yml
index a9c412b33..21436d7a9 100644
--- a/start-config.yml
+++ b/start-config.yml
@@ -14,4 +14,5 @@ serviceBinaries:
toolBinaries:
- check-free-memory
- check-component
+ - seq
maxFileDescriptors: 10000
diff --git a/tools/seq/internal/main.go b/tools/seq/internal/main.go
new file mode 100644
index 000000000..2bec5a8f1
--- /dev/null
+++ b/tools/seq/internal/main.go
@@ -0,0 +1,331 @@
+package internal
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "github.com/openimsdk/open-im-server/v3/pkg/common/cmd"
+ "github.com/openimsdk/open-im-server/v3/pkg/common/config"
+ "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
+ "github.com/openimsdk/tools/db/mongoutil"
+ "github.com/openimsdk/tools/db/redisutil"
+ "github.com/redis/go-redis/v9"
+ "go.mongodb.org/mongo-driver/bson"
+ "go.mongodb.org/mongo-driver/mongo"
+ "go.mongodb.org/mongo-driver/mongo/options"
+ "gopkg.in/yaml.v3"
+ "os"
+ "os/signal"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "syscall"
+ "time"
+)
+
+const (
+ MaxSeq = "MAX_SEQ:"
+ MinSeq = "MIN_SEQ:"
+ ConversationUserMinSeq = "CON_USER_MIN_SEQ:"
+ HasReadSeq = "HAS_READ_SEQ:"
+)
+
+const (
+ batchSize = 100
+ dataVersionCollection = "data_version"
+ seqKey = "seq"
+ seqVersion = 38
+)
+
+func readConfig[T any](dir string, name string) (*T, error) {
+ data, err := os.ReadFile(filepath.Join(dir, name))
+ if err != nil {
+ return nil, err
+ }
+ var conf T
+ if err := yaml.Unmarshal(data, &conf); err != nil {
+ return nil, err
+ }
+ return &conf, nil
+}
+
+func Main(conf string, del time.Duration) error {
+ redisConfig, err := readConfig[config.Redis](conf, cmd.RedisConfigFileName)
+ if err != nil {
+ return err
+ }
+ mongodbConfig, err := readConfig[config.Mongo](conf, cmd.MongodbConfigFileName)
+ if err != nil {
+ return err
+ }
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
+ defer cancel()
+ rdb, err := redisutil.NewRedisClient(ctx, redisConfig.Build())
+ if err != nil {
+ return err
+ }
+ mgocli, err := mongoutil.NewMongoDB(ctx, mongodbConfig.Build())
+ if err != nil {
+ return err
+ }
+ versionColl := mgocli.GetDB().Collection(dataVersionCollection)
+ converted, err := CheckVersion(versionColl, seqKey, seqVersion)
+ if err != nil {
+ return err
+ }
+ if converted {
+ fmt.Println("[seq] seq data has been converted")
+ return nil
+ }
+ if _, err := mgo.NewSeqConversationMongo(mgocli.GetDB()); err != nil {
+ return err
+ }
+ cSeq, err := mgo.NewSeqConversationMongo(mgocli.GetDB())
+ if err != nil {
+ return err
+ }
+ uSeq, err := mgo.NewSeqUserMongo(mgocli.GetDB())
+ if err != nil {
+ return err
+ }
+ uSpitHasReadSeq := func(id string) (conversationID string, userID string, err error) {
+ // HasReadSeq + userID + ":" + conversationID
+ arr := strings.Split(id, ":")
+ if len(arr) != 2 || arr[0] == "" || arr[1] == "" {
+ return "", "", fmt.Errorf("invalid has read seq id %s", id)
+ }
+ userID = arr[0]
+ conversationID = arr[1]
+ return
+ }
+ uSpitConversationUserMinSeq := func(id string) (conversationID string, userID string, err error) {
+ // ConversationUserMinSeq + conversationID + "u:" + userID
+ arr := strings.Split(id, "u:")
+ if len(arr) != 2 || arr[0] == "" || arr[1] == "" {
+ return "", "", fmt.Errorf("invalid has read seq id %s", id)
+ }
+ conversationID = arr[0]
+ userID = arr[1]
+ return
+ }
+
+ ts := []*taskSeq{
+ {
+ Prefix: MaxSeq,
+ GetSeq: cSeq.GetMaxSeq,
+ SetSeq: cSeq.SetMaxSeq,
+ },
+ {
+ Prefix: MinSeq,
+ GetSeq: cSeq.GetMinSeq,
+ SetSeq: cSeq.SetMinSeq,
+ },
+ {
+ Prefix: HasReadSeq,
+ GetSeq: func(ctx context.Context, id string) (int64, error) {
+ conversationID, userID, err := uSpitHasReadSeq(id)
+ if err != nil {
+ return 0, err
+ }
+ return uSeq.GetUserReadSeq(ctx, conversationID, userID)
+ },
+ SetSeq: func(ctx context.Context, id string, seq int64) error {
+ conversationID, userID, err := uSpitHasReadSeq(id)
+ if err != nil {
+ return err
+ }
+ return uSeq.SetUserReadSeq(ctx, conversationID, userID, seq)
+ },
+ },
+ {
+ Prefix: ConversationUserMinSeq,
+ GetSeq: func(ctx context.Context, id string) (int64, error) {
+ conversationID, userID, err := uSpitConversationUserMinSeq(id)
+ if err != nil {
+ return 0, err
+ }
+ return uSeq.GetUserMinSeq(ctx, conversationID, userID)
+ },
+ SetSeq: func(ctx context.Context, id string, seq int64) error {
+ conversationID, userID, err := uSpitConversationUserMinSeq(id)
+ if err != nil {
+ return err
+ }
+ return uSeq.SetUserMinSeq(ctx, conversationID, userID, seq)
+ },
+ },
+ }
+
+ cancel()
+ ctx = context.Background()
+
+ var wg sync.WaitGroup
+ wg.Add(len(ts))
+
+ for i := range ts {
+ go func(task *taskSeq) {
+ defer wg.Done()
+ err := seqRedisToMongo(ctx, rdb, task.GetSeq, task.SetSeq, task.Prefix, del, &task.Count)
+ task.End = time.Now()
+ task.Error = err
+ }(ts[i])
+ }
+ start := time.Now()
+ done := make(chan struct{})
+ go func() {
+ wg.Wait()
+ close(done)
+ }()
+
+ sigs := make(chan os.Signal, 1)
+ signal.Notify(sigs, syscall.SIGTERM)
+
+ ticker := time.NewTicker(time.Second)
+ defer ticker.Stop()
+ var buf bytes.Buffer
+
+ printTaskInfo := func(now time.Time) {
+ buf.Reset()
+ buf.WriteString(now.Format(time.DateTime))
+ buf.WriteString(" \n")
+ for i := range ts {
+ task := ts[i]
+ if task.Error == nil {
+ if task.End.IsZero() {
+ buf.WriteString(fmt.Sprintf("[%s] converting %s* count %d", now.Sub(start), task.Prefix, atomic.LoadInt64(&task.Count)))
+ } else {
+ buf.WriteString(fmt.Sprintf("[%s] success %s* count %d", task.End.Sub(start), task.Prefix, atomic.LoadInt64(&task.Count)))
+ }
+ } else {
+ buf.WriteString(fmt.Sprintf("[%s] failed %s* count %d error %s", task.End.Sub(start), task.Prefix, atomic.LoadInt64(&task.Count), task.Error))
+ }
+ buf.WriteString("\n")
+ }
+ fmt.Println(buf.String())
+ }
+
+ for {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case s := <-sigs:
+ return fmt.Errorf("exit by signal %s", s)
+ case <-done:
+ errs := make([]error, 0, len(ts))
+ for i := range ts {
+ task := ts[i]
+ if task.Error != nil {
+ errs = append(errs, fmt.Errorf("seq %s failed %w", task.Prefix, task.Error))
+ }
+ }
+ if len(errs) > 0 {
+ return errors.Join(errs...)
+ }
+ printTaskInfo(time.Now())
+ if err := SetVersion(versionColl, seqKey, seqVersion); err != nil {
+ return fmt.Errorf("set mongodb seq version %w", err)
+ }
+ return nil
+ case now := <-ticker.C:
+ printTaskInfo(now)
+ }
+ }
+}
+
+type taskSeq struct {
+ Prefix string
+ Count int64
+ Error error
+ End time.Time
+ GetSeq func(ctx context.Context, id string) (int64, error)
+ SetSeq func(ctx context.Context, id string, seq int64) error
+}
+
+func seqRedisToMongo(ctx context.Context, rdb redis.UniversalClient, getSeq func(ctx context.Context, id string) (int64, error), setSeq func(ctx context.Context, id string, seq int64) error, prefix string, delAfter time.Duration, count *int64) error {
+ var (
+ cursor uint64
+ keys []string
+ err error
+ )
+ for {
+ keys, cursor, err = rdb.Scan(ctx, cursor, prefix+"*", batchSize).Result()
+ if err != nil {
+ return err
+ }
+ if len(keys) > 0 {
+ for _, key := range keys {
+ seqStr, err := rdb.Get(ctx, key).Result()
+ if err != nil {
+ return fmt.Errorf("redis get %s failed %w", key, err)
+ }
+ seq, err := strconv.Atoi(seqStr)
+ if err != nil {
+ return fmt.Errorf("invalid %s seq %s", key, seqStr)
+ }
+ if seq < 0 {
+ return fmt.Errorf("invalid %s seq %s", key, seqStr)
+ }
+ id := strings.TrimPrefix(key, prefix)
+ redisSeq := int64(seq)
+ mongoSeq, err := getSeq(ctx, id)
+ if err != nil {
+ return fmt.Errorf("get mongo seq %s failed %w", key, err)
+ }
+ if mongoSeq < redisSeq {
+ if err := setSeq(ctx, id, redisSeq); err != nil {
+ return fmt.Errorf("set mongo seq %s failed %w", key, err)
+ }
+ }
+ if delAfter > 0 {
+ if err := rdb.Expire(ctx, key, delAfter).Err(); err != nil {
+ return fmt.Errorf("redis expire key %s failed %w", key, err)
+ }
+ } else {
+ if err := rdb.Del(ctx, key).Err(); err != nil {
+ return fmt.Errorf("redis del key %s failed %w", key, err)
+ }
+ }
+ atomic.AddInt64(count, 1)
+ }
+ }
+ if cursor == 0 {
+ return nil
+ }
+ }
+}
+
+func CheckVersion(coll *mongo.Collection, key string, currentVersion int) (converted bool, err error) {
+ type VersionTable struct {
+ Key string `bson:"key"`
+ Value string `bson:"value"`
+ }
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
+ defer cancel()
+ res, err := mongoutil.FindOne[VersionTable](ctx, coll, bson.M{"key": key})
+ if err == nil {
+ ver, err := strconv.Atoi(res.Value)
+ if err != nil {
+ return false, fmt.Errorf("version %s parse error %w", res.Value, err)
+ }
+ if ver >= currentVersion {
+ return true, nil
+ }
+ return false, nil
+ } else if errors.Is(err, mongo.ErrNoDocuments) {
+ return false, nil
+ } else {
+ return false, err
+ }
+}
+
+func SetVersion(coll *mongo.Collection, key string, version int) error {
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
+ defer cancel()
+ option := options.Update().SetUpsert(true)
+ filter := bson.M{"key": key, "value": strconv.Itoa(version)}
+ update := bson.M{"$set": bson.M{"key": key, "value": strconv.Itoa(version)}}
+ return mongoutil.UpdateOne(ctx, coll, filter, update, false, option)
+}
diff --git a/tools/seq/main.go b/tools/seq/main.go
new file mode 100644
index 000000000..16da9f156
--- /dev/null
+++ b/tools/seq/main.go
@@ -0,0 +1,22 @@
+package main
+
+import (
+ "flag"
+ "fmt"
+ "github.com/openimsdk/open-im-server/v3/tools/seq/internal"
+ "time"
+)
+
+func main() {
+ var (
+ config string
+ second int
+ )
+ flag.StringVar(&config, "c", "", "config directory")
+ flag.IntVar(&second, "sec", 3600*24, "delayed deletion of the original seq key after conversion")
+ flag.Parse()
+ if err := internal.Main(config, time.Duration(second)*time.Second); err != nil {
+ fmt.Println("seq task", err)
+ }
+ fmt.Println("seq task success!")
+}