ci: improve develop experience

pull/48/head
Yaxian 4 years ago
parent 7f2270844e
commit 4fc5a2f8d4

1
.gitignore vendored

@ -8,3 +8,4 @@ src/utils/config
# gosum # gosum
go.sum go.sum
tmp

@ -103,6 +103,43 @@ All images are available at https://hub.docker.com/r/lyt1123/open_im_server
![OpenIMServersondockerpng](https://github.com/OpenIMSDK/Open-IM-Server/blob/main/docs/Open-IM-Servers-on-docker.png) ![OpenIMServersondockerpng](https://github.com/OpenIMSDK/Open-IM-Server/blob/main/docs/Open-IM-Servers-on-docker.png)
#### How to develop
1. Install [Go environment](https://golang.org/doc/install). Make sure Go version is at least 1.15.
2. Install `Nodejs` and `pm2`
```
curl -sL https://deb.nodesource.com/setup_14.x | sudo -E bash -
npm install pm2 -g
```
2. Clone the Open-IM project to your server.
```
git clone https://github.com/OpenIMSDK/Open-IM-Server.git --recursive
```
3. Start Service.
```
# run etcd/mongodb/mysql/redis/kafka
docker-compose -f docker-compose.local.yaml up -d
# run open-im services
pm2 start app.yaml --watch
pm2 ls
pm2 logs
```
or try with `docker-compose`
```
docker-compose -f docker-compose.dev.yaml up -d
docker-compose -f docker-compose.dev.yaml ps
docker-compose -f docker-compose.dev.yaml logs -f
```
### CONFIGURATION INSTRUCTIONS ### CONFIGURATION INSTRUCTIONS
> Open-IM configuration is divided into basic component configuration and business internal service configuration. Developers need to fill in the address of each component as the address of their server component when using the product, and ensure that the internal service port of the business is not occupied > Open-IM configuration is divided into basic component configuration and business internal service configuration. Developers need to fill in the address of each component as the address of their server component when using the product, and ensure that the internal service port of the business is not occupied

@ -0,0 +1,41 @@
apps:
- name: open_im_api
script: "go run open_im_api.go"
cwd: "./src/api"
- name: open_im_auth
script: "go run open_im_auth.go"
cwd: "src/rpc/auth"
- name: open_im_offline_msg
script: "go run open_im_msg.go"
cwd: "src/rpc/chat"
- name: open_im_friend
script: "go run open_im_friend.go"
cwd: "src/rpc/friend"
- name: open_im_group
script: "go run open_im_group.go"
cwd: "src/rpc/group"
- name: open_im_user
script: "go run open_im_user.go"
cwd: "src/rpc/user"
- name: open_im_push
script: "go run open_im_push.go"
cwd: "src/push"
- name: open_im_timed_task
script: "go run open_im_timed_task.go"
cwd: "src/timed_task"
- name: open_im_msg_transfer
script: "go run open_im_msg_transfer.go"
cwd: "src/msg_transfer"
- name: open_im_msg_gateway
script: "go run open_im_msg_gateway.go"
cwd: "src/msg_gateway"

@ -0,0 +1,147 @@
# The class cannot be named by Pascal or camel case.
# If it is not used, the corresponding structure will not be set,
# and it will not be read naturally.
serverversion: 1.0.0
#---------------Infrastructure configuration---------------------#
etcd:
etcdSchema: openIM
etcdAddr: [ etcd:2379 ]
mysql:
dbMysqlAddress: [ mysql:3306 ]
dbMysqlUserName: root
dbMysqlPassword: openIM
dbMysqlDatabaseName: openIM
dbTableName: eMsg
dbMsgTableNum: 1
dbMaxOpenConns: 20
dbMaxIdleConns: 10
dbMaxLifeTime: 120
mongo:
dbAddress: [ mongodb:27017 ]
dbDirect: false
dbTimeout: 10
dbDatabase: openIM
dbSource: admin
dbUserName:
dbPassword:
dbMaxPoolSize: 20
dbRetainChatRecords: 7
redis:
dbAddress: redis:6379
dbMaxIdle: 128
dbMaxActive: 0
dbIdleTimeout: 120
dbPassWord: openIM
kafka:
ws2mschat:
addr: [ kafka:9092 ]
topic: "ws2ms_chat"
ms2pschat:
addr: [ kafka:9092 ]
topic: "ms2ps_chat"
consumergroupid:
msgToMongo: mongo
msgToMySql: mysql
msgToPush: push
#---------------Internal service configuration---------------------#
# The service ip default is empty,
# automatically obtain the machine's valid network card ip as the service ip,
# otherwise the configuration ip is preferred
serverip:
api:
openImApiPort: [ 10000 ]
sdk:
openImSdkWsPort: [ 30000 ]
credential:
tencent:
appID: 1302656840
region: ap-chengdu
bucket: echat-1302656840
secretID: AKIDGNYVChzIQinu7QEgtNp0hnNgqcV8vZTC
secretKey: kz15vW83qM6dBUWIq681eBZA0c0vlIbe
rpcport:
openImUserPort: [ 10100 ]
openImFriendPort: [ 10200 ]
openImOfflineMessagePort: [ 10300 ]
openImOnlineRelayPort: [ 10400 ]
openImGroupPort: [ 10500 ]
openImAuthPort: [ 10600 ]
openImPushPort: [ 10700 ]
rpcregistername:
openImUserName: User
openImFriendName: Friend
openImOfflineMessageName: OfflineMessage
openImPushName: Push
openImOnlineMessageRelayName: OnlineMessageRelay
openImGroupName: Group
openImAuthName: Auth
log:
storageLocation: ../logs/
rotationTime: 24
remainRotationCount: 5
remainLogLevel: 6
elasticSearchSwitch: false
elasticSearchAddr: [ 127.0.0.1:9201 ]
elasticSearchUser: ""
elasticSearchPassword: ""
modulename:
longConnSvrName: msg_gateway
msgTransferName: msg_transfer
pushName: push
longconnsvr:
openImWsPort: [ 17778 ]
websocketMaxConnNum: 10000
websocketMaxMsgLen: 4096
websocketTimeOut: 10
push:
tpns:
ios:
accessID: 1600018281
secretKey: 3cd68a77a95b89e5089a1aca523f318f
android:
accessID: 111
secretKey: 111
jpns:
appKey: 2783339cee4de379cc798fe1
masterSecret: 66e5f309e032c68cc668c28a
pushUrl: "https://api.jpush.cn/v3/push"
manager:
appManagerUid: ["openIM123456","openIM654321"]
secrets: ["openIM1","openIM2"]
secret: tuoyun
multiloginpolicy:
onlyOneTerminalAccess: false
mobileAndPCTerminalAccessButOtherTerminalKickEachOther: true
allTerminalAccess: false
#token config
tokenpolicy:
accessSecret: "open_im_server"
# Token effective time seconds as a unit
#Seven days 7*24*60*60
accessExpire: 604800
messagecallback:
callbackSwitch: false
callbackUrl: "http://www.xxx.com/msg/judge"

@ -0,0 +1,11 @@
FROM golang:1.16 as base
FROM base as dev
ENV GO111MODULE=on
ENV GOPROXY=https://goproxy.cn,direct
RUN curl -sSfL https://raw.githubusercontent.com/cosmtrek/air/master/install.sh | sh -s -- -b $(go env GOPATH)/bin
CMD ["air"]

@ -0,0 +1,351 @@
version: "3"
services:
mysql:
image: mysql:5.7
ports:
- 3306:3306
container_name: mysql
volumes:
- ./components/mysql/data:/var/lib/mysql
- /etc/localtime:/etc/localtime
environment:
MYSQL_ROOT_PASSWORD: openIM
restart: always
mongodb:
image: mongo:4.0
ports:
- 27017:27017
container_name: mongo
volumes:
- ./components/mongodb/data:/data/db
environment:
TZ: Asia/Shanghai
restart: always
redis:
image: redis
ports:
- 6379:6379
container_name: redis
volumes:
- ./components/redis/data:/data
#redis config file
#- ./components/redis/config/redis.conf:/usr/local/redis/config/redis.conf
environment:
TZ: Asia/Shanghai
restart: always
sysctls:
net.core.somaxconn: 1024
command: redis-server --requirepass openIM --appendonly yes
zookeeper:
image: wurstmeister/zookeeper
ports:
- 2181:2181
container_name: zookeeper
volumes:
- /etc/localtime:/etc/localtime
environment:
TZ: Asia/Shanghai
restart: always
kafka:
image: wurstmeister/kafka
container_name: kafka
restart: always
environment:
TZ: Asia/Shanghai
KAFKA_BROKER_ID: 0
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092
KAFKA_LISTENERS: PLAINTEXT://kafka:9092
depends_on:
- zookeeper
links:
- zookeeper
ports:
- 9092:9092
etcd:
image: quay.io/coreos/etcd
ports:
- 2379:2379
- 2380:2380
container_name: etcd
volumes:
- /etc/timezone:/etc/timezone
- /etc/localtime:/etc/localtime
environment:
ETCDCTL_API: 3
restart: always
command: /usr/local/bin/etcd --name etcd0 --data-dir /etcd-data --listen-client-urls http://0.0.0.0:2379 --advertise-client-urls http://0.0.0.0:2379 --listen-peer-urls http://0.0.0.0:2380 --initial-advertise-peer-urls http://0.0.0.0:2380 --initial-cluster etcd0=http://0.0.0.0:2380 --initial-cluster-token tkn --initial-cluster-state new
open_im_api:
build:
dockerfile: dev.Dockerfile
context: .
target: dev
network: host
depends_on:
- kafka
- mysql
- mongodb
- redis
- etcd
container_name: open_im_api
volumes:
- ./src:/Open-IM-Server/src
- ./config/config.dev.yaml:/Open-IM-Server/config/config.yaml
- ./go.mod:/Open-IM-Server/go.mod
working_dir: /Open-IM-Server/src/api
ports:
- 10000:10000
links:
- kafka
- mysql
- mongodb
- redis
- etcd
open_im_auth:
build:
dockerfile: dev.Dockerfile
context: .
target: dev
network: host
depends_on:
- kafka
- mysql
- mongodb
- redis
- etcd
links:
- kafka
- mysql
- mongodb
- redis
- etcd
container_name: open_im_auth
volumes:
- ./src:/Open-IM-Server/src
- ./config/config.dev.yaml:/Open-IM-Server/config/config.yaml
- ./go.mod:/Open-IM-Server/go.mod
working_dir: /Open-IM-Server/src/rpc/auth
ports:
- 10600:10600
open_im_user:
build:
dockerfile: dev.Dockerfile
context: .
target: dev
network: host
depends_on:
- kafka
- mysql
- mongodb
- redis
- etcd
links:
- kafka
- mysql
- mongodb
- redis
- etcd
container_name: open_im_user
volumes:
- ./src:/Open-IM-Server/src
- ./config/config.dev.yaml:/Open-IM-Server/config/config.yaml
- ./go.mod:/Open-IM-Server/go.mod
working_dir: /Open-IM-Server/src/rpc/user
ports:
- 10100:10100
open_im_friend:
build:
dockerfile: dev.Dockerfile
context: .
target: dev
network: host
depends_on:
- kafka
- mysql
- mongodb
- redis
- etcd
links:
- kafka
- mysql
- mongodb
- redis
- etcd
container_name: open_im_friend
volumes:
- ./src:/Open-IM-Server/src
- ./config/config.dev.yaml:/Open-IM-Server/config/config.yaml
- ./go.mod:/Open-IM-Server/go.mod
working_dir: /Open-IM-Server/src/rpc/friend
ports:
- 10200:10200
open_im_group:
build:
dockerfile: dev.Dockerfile
context: .
target: dev
network: host
depends_on:
- kafka
- mysql
- mongodb
- redis
- etcd
links:
- kafka
- mysql
- mongodb
- redis
- etcd
container_name: open_im_group
volumes:
- ./src:/Open-IM-Server/src
- ./config/config.dev.yaml:/Open-IM-Server/config/config.yaml
- ./go.mod:/Open-IM-Server/go.mod
working_dir: /Open-IM-Server/src/rpc/group
ports:
- 10500:10500
open_im_push:
build:
dockerfile: dev.Dockerfile
context: .
target: dev
network: host
depends_on:
- kafka
- mysql
- mongodb
- redis
- etcd
links:
- kafka
- mysql
- mongodb
- redis
- etcd
container_name: open_im_push
volumes:
- ./src:/Open-IM-Server/src
- ./config/config.dev.yaml:/Open-IM-Server/config/config.yaml
- ./go.mod:/Open-IM-Server/go.mod
working_dir: /Open-IM-Server/src/push
ports:
- 10700:10700
open_im_timed_task:
build:
dockerfile: dev.Dockerfile
context: .
target: dev
network: host
depends_on:
- kafka
- mysql
- mongodb
- redis
- etcd
links:
- kafka
- mysql
- mongodb
- redis
- etcd
container_name: open_im_timed_task
volumes:
- ./src:/Open-IM-Server/src
- ./config/config.dev.yaml:/Open-IM-Server/config/config.yaml
- ./go.mod:/Open-IM-Server/go.mod
working_dir: /Open-IM-Server/src/timed_task
open_im_offline_msg:
build:
dockerfile: dev.Dockerfile
context: .
target: dev
network: host
depends_on:
- kafka
- mysql
- mongodb
- redis
- etcd
links:
- kafka
- mysql
- mongodb
- redis
- etcd
container_name: open_im_offline_msg
volumes:
- ./src:/Open-IM-Server/src
- ./config/config.dev.yaml:/Open-IM-Server/config/config.yaml
- ./go.mod:/Open-IM-Server/go.mod
working_dir: /Open-IM-Server/src/rpc/chat
ports:
- 10300:10300
open_im_msg_transfer:
build:
dockerfile: dev.Dockerfile
context: .
target: dev
network: host
depends_on:
- kafka
- mysql
- mongodb
- redis
- etcd
links:
- kafka
- mysql
- mongodb
- redis
- etcd
container_name: open_im_msg_transfer
volumes:
- ./src:/Open-IM-Server/src
- ./config/config.dev.yaml:/Open-IM-Server/config/config.yaml
- ./go.mod:/Open-IM-Server/go.mod
working_dir: /Open-IM-Server/src/msg_transfer
open_im_msg_gateway:
build:
dockerfile: dev.Dockerfile
context: .
target: dev
network: host
depends_on:
- kafka
- mysql
- mongodb
- redis
- etcd
links:
- kafka
- mysql
- mongodb
- redis
- etcd
container_name: open_im_msg_gateway
volumes:
- ./src:/Open-IM-Server/src
- ./config/config.dev.yaml:/Open-IM-Server/config/config.yaml
- ./go.mod:/Open-IM-Server/go.mod
working_dir: /Open-IM-Server/src/msg_gateway
ports:
- 10400:10400
- 10800:10800

@ -0,0 +1,85 @@
version: "3"
services:
mysql:
image: mysql:5.7
ports:
- 3306:3306
container_name: mysql
volumes:
- ./components/mysql/data:/var/lib/mysql
- /etc/localtime:/etc/localtime
environment:
MYSQL_ROOT_PASSWORD: openIM
restart: always
mongodb:
image: mongo:4.0
ports:
- 27017:27017
container_name: mongo
volumes:
- ./components/mongodb/data:/data/db
environment:
TZ: Asia/Shanghai
restart: always
redis:
image: redis
ports:
- 6379:6379
container_name: redis
volumes:
- ./components/redis/data:/data
#redis config file
#- ./components/redis/config/redis.conf:/usr/local/redis/config/redis.conf
environment:
TZ: Asia/Shanghai
restart: always
sysctls:
net.core.somaxconn: 1024
command: redis-server --requirepass openIM --appendonly yes
zookeeper:
image: wurstmeister/zookeeper
ports:
- 2181:2181
container_name: zookeeper
volumes:
- /etc/localtime:/etc/localtime
environment:
TZ: Asia/Shanghai
restart: always
kafka:
image: wurstmeister/kafka
container_name: kafka
restart: always
environment:
TZ: Asia/Shanghai
KAFKA_BROKER_ID: 0
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://127.0.0.1:9092
KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9092
# network_mode: "host"
depends_on:
- zookeeper
links:
- zookeeper
ports:
- 9092:9092
etcd:
image: quay.io/coreos/etcd
ports:
- 2379:2379
- 2380:2380
container_name: etcd
volumes:
- /etc/timezone:/etc/timezone
- /etc/localtime:/etc/localtime
environment:
ETCDCTL_API: 3
restart: always
command: /usr/local/bin/etcd --name etcd0 --data-dir /etcd-data --listen-client-urls http://0.0.0.0:2379 --advertise-client-urls http://0.0.0.0:2379 --listen-peer-urls http://0.0.0.0:2380 --initial-advertise-peer-urls http://0.0.0.0:2380 --initial-cluster etcd0=http://0.0.0.0:2380 --initial-cluster-token tkn --initial-cluster-state new

@ -0,0 +1,49 @@
## 在 Docker 容器中开发
### 运行服务容器
```
# 这个会在运行容器的时候,会占用大量 CPU导致容器启动失败
docker-compose -f docker-compose.dev.yaml up -d
```
### 检查容器运行状态
```
docker-compose -f docker-compose.dev.yaml ps
```
应该能看到以下结果
```
NAME COMMAND SERVICE STATUS PORTS
etcd "/usr/local/bin/etcd…" etcd running 0.0.0.0:2379-2380->2379-2380/tcp
kafka "start-kafka.sh" kafka running 0.0.0.0:9092->9092/tcp
mongo "docker-entrypoint.s…" mongodb running 0.0.0.0:27017->27017/tcp
mysql "docker-entrypoint.s…" mysql running 0.0.0.0:3306->3306/tcp
open_im_api "air" open_im_api running 0.0.0.0:10000->10000/tcp
open_im_auth "air" open_im_auth running 0.0.0.0:10600->10600/tcp
open_im_friend "air" open_im_friend running 0.0.0.0:10200->10200/tcp
open_im_group "air" open_im_group running 0.0.0.0:10500->10500/tcp
open_im_msg_gateway "air" open_im_msg_gateway running
open_im_msg_transfer "air" open_im_msg_transfer running
open_im_push "air" open_im_push running 0.0.0.0:10700->10700/tcp
open_im_timed_task "air" open_im_timed_task running
open_im_user "air" open_im_user running 0.0.0.0:10100->10100/tcp
redis "docker-entrypoint.s…" redis running 0.0.0.0:6379->6379/tcp
zookeeper "/bin/sh -c '/usr/sb…" zookeeper running 0.0.0.0:2181->2181/tcp
```
### 检查容器日志
```
docker-compose -f docker-compose.dev.yaml logs -f
```
若要检查指定容器的日志,比如 `open_im_api`,则
```
docker-compose -f docker-compose.dev.yaml logs -f open_im_api
```

@ -0,0 +1,53 @@
## 使用 PM2 管理开发多个服务
### 安装 Node.js 和 PM2
```
curl -sL https://deb.nodesource.com/setup_14.x | sudo -E bash -
npm install pm2 -g
```
### PM2 开发
```
pm2 start pm2.yaml
```
### PM2 检查进程
```
pm2 ls
┌─────┬─────────────────────────┬─────────────┬─────────┬─────────┬──────────┬────────┬──────┬───────────┬──────────┬──────────┬──────────┐
│ id │ name │ namespace │ version │ mode │ pid │ uptime │ ↺ │ status │ cpu │ mem │ watching │
├─────┼─────────────────────────┼─────────────┼─────────┼─────────┼──────────┼────────┼──────┼───────────┼──────────┼──────────┼──────────┤
│ 0 │ open_im_api │ default │ N/A │ fork │ 38641 │ 74s │ 0 │ online │ 0% │ 32.5mb │ disabled │
│ 1 │ open_im_auth │ default │ N/A │ fork │ 38642 │ 74s │ 0 │ online │ 0% │ 30.4mb │ disabled │
│ 3 │ open_im_friend │ default │ N/A │ fork │ 38644 │ 74s │ 0 │ online │ 0% │ 37.9mb │ disabled │
│ 4 │ open_im_group │ default │ N/A │ fork │ 40594 │ 0s │ 46 │ online │ 0% │ 17.1mb │ disabled │
│ 2 │ open_im_msg │ default │ N/A │ fork │ 38643 │ 74s │ 0 │ online │ 0% │ 35.8mb │ disabled │
│ 9 │ open_im_msg_gateway │ default │ N/A │ fork │ 38666 │ 74s │ 0 │ online │ 0% │ 33.4mb │ disabled │
│ 8 │ open_im_msg_transfer │ default │ N/A │ fork │ 38660 │ 74s │ 0 │ online │ 0% │ 34.7mb │ disabled │
│ 6 │ open_im_push │ default │ N/A │ fork │ 38647 │ 74s │ 0 │ online │ 0% │ 33.7mb │ disabled │
│ 7 │ open_im_timed_task │ default │ N/A │ fork │ 38657 │ 74s │ 0 │ online │ 0% │ 27.3mb │ disabled │
│ 5 │ open_im_user │ default │ N/A │ fork │ 38646 │ 74s │ 0 │ online │ 0% │ 34.2mb │ disabled │
└─────┴─────────────────────────┴─────────────┴─────────┴─────────┴──────────┴────────┴──────┴───────────┴──────────┴──────────┴──────────┘
```
### PM2 检查日志
```
pm2 logs
```
### PM2 删除进程
```
pm2 delete
pm2 flush
```

@ -0,0 +1,25 @@
package apiAuth
import (
"bytes"
"net/http"
"net/http/httptest"
"testing"
"github.com/gin-gonic/gin"
"github.com/stretchr/testify/assert"
)
func init() {
gin.SetMode(gin.TestMode)
}
func Test_UserRegister(t *testing.T) {
res := httptest.NewRecorder()
c, _ := gin.CreateTestContext(res)
c.Request, _ = http.NewRequest("POST", "/", bytes.NewBufferString(`{"secret": "111", "platform": 1, "uid": "1", "name": "1"}`))
UserRegister(c)
assert.Equal(t, res.Code, 200)
}

@ -3,17 +3,20 @@ package config
import ( import (
"io/ioutil" "io/ioutil"
"os"
"path/filepath" "path/filepath"
"runtime" "runtime"
"gopkg.in/yaml.v3" "gopkg.in/yaml.v3"
) )
var Config config var Config config
var (
_, b, _, _ = runtime.Caller(0)
// Root folder of this project
Root = filepath.Join(filepath.Dir(b), "../../..")
)
type config struct { type config struct {
ServerIP string `yaml:"serverip"` ServerIP string `yaml:"serverip"`
@ -154,8 +157,9 @@ type config struct {
} }
func init() { func init() {
path, _ := os.Getwd() // if we cd Open-IM-Server/src/utils and run go test
bytes, err := ioutil.ReadFile(path + "/config/config.yaml") // it will panic cannot find config/config.yaml
bytes, err := ioutil.ReadFile(Root + "/config/config.yaml")
if err != nil { if err != nil {
panic(err) panic(err)
} }

@ -7,7 +7,7 @@ import (
) )
func main() { func main() {
rpcPort := flag.Int("rpc_port", 10500, "rpc listening port") rpcPort := flag.Int("rpc_port", 10400, "rpc listening port")
wsPort := flag.Int("ws_port", 10800, "ws listening port") wsPort := flag.Int("ws_port", 10800, "ws listening port")
flag.Parse() flag.Parse()
var wg sync.WaitGroup var wg sync.WaitGroup

@ -7,7 +7,7 @@ import (
) )
func main() { func main() {
rpcPort := flag.Int("port", -1, "rpc listening port") rpcPort := flag.Int("port", 10700, "rpc listening port")
flag.Parse() flag.Parse()
var wg sync.WaitGroup var wg sync.WaitGroup
wg.Add(1) wg.Add(1)

@ -6,7 +6,7 @@ import (
) )
func main() { func main() {
rpcPort := flag.Int("port", 10600, "RpcToken default listen port 10800") rpcPort := flag.Int("port", 10600, "RpcToken default listen port 10600")
flag.Parse() flag.Parse()
rpcServer := rpcAuth.NewRpcAuthServer(*rpcPort) rpcServer := rpcAuth.NewRpcAuthServer(*rpcPort)
rpcServer.Run() rpcServer.Run()

@ -2,13 +2,12 @@ package main
import ( import (
rpcChat "Open_IM/src/rpc/chat/chat" rpcChat "Open_IM/src/rpc/chat/chat"
"Open_IM/src/utils"
"flag" "flag"
) )
func main() { func main() {
rpcPort := flag.String("port", "", "rpc listening port") rpcPort := flag.Int("port", 10300, "rpc listening port")
flag.Parse() flag.Parse()
rpcServer := rpcChat.NewRpcChatServer(utils.StringToInt(*rpcPort)) rpcServer := rpcChat.NewRpcChatServer(*rpcPort)
rpcServer.Run() rpcServer.Run()
} }

Loading…
Cancel
Save