pull/241/head
wangchuxiao 3 years ago
parent e5ccc1dd36
commit 4e22970784

@ -1,38 +1,80 @@
#### openIM k8s部署文档
### 1. 修改配置文件
在Open-IM-SERVER目录下修改config/config.yaml配置文件, 将MySQL, Kafka, MongoDB等配置修改。
使用demo需要修改demo/imAPIURL地址 让demo能请求到im的api
在Open-IM-SERVER根目录下修改config/config.yaml配置文件, 请确保以下修改的所有地址必须保证k8s pod能够访问
1. 修改ETCD配置为自己的ETCD ip地址, 最好和k8s本身使用的ETCD分开
2. 修改MySQL配置
3. 修改Mongo配置
4. 修改Redis配置
5. 修改Kafka配置
6. 将rpcRegisterIP修改为空, 此地址为每个rpc注册到ETCD的地址, 置空每个rpc将会将pod地址注册到ETCD, 才能正确rpc请求(重要)
7. 如果使用minio作为对象存储, 还需要修改minio的地址
8. 其他如果使用离线推送,需要修改push离线推送配置
9. 修改demo中的imAPIURL字段为openIM api的ingress或者service地址, 需要让demo的pod能正确请求到(重要)
10. 其他非必须配置修改, 如短信,推送等
### 2. 项目根目录创建im configMap到k8s openim namespace
kubectl create namespace openim
kubectl -n openim create configmap config --from-file=config/config.yaml
openim 为im项目的namespace, 可选
查看configmap
kubectl -n openim get configmap
1. 为open-IM项目创建单独命名空间
```
kubectl create namespace openim
```
2. 在项目根目录通过config/config.yaml
```
kubectl -n openim create configmap config --from-file=config/config.yaml
```
查看configmap
```
kubectl -n openim get configmap
```
### 3(可选). 修改每个deployment.yml
kubectl get nodes
kubectl label node k8s-node1 role=kube-Node
应需要调度的node打上标签
nodeSelector:
node: kube-Node
创建资源清单时添加上nodeSelector属性对应即可
修改每种服务数量建议至少每种2个rpc。
如果修改了config/config.yaml某些配置比如端口同时需要修改对应deployment端口和ingress端口
每个rpc的deployment在Open-IM-SERVER根目录deploy_k8s下
给需要调度的node打上标签
```
kubectl get nodes
kubectl label node k8s-node1 role=openIMworker
```
在deployment的spec.template.spec加上
```
nodeSelector:
role: openIMworker
```
创建资源清单时添加上nodeSelector属性对应即可,
修改每种服务数量建议至少每种2个rpc。
如果修改了config/config.yaml某些配置比如端口同时需要修改对应deployment端口和ingress端口
### 4. 修改ingress.yaml配置文件
需要安装ingress controller 这里使用的是ingress-nginx 其他ingress需要修改配置文件
进行域名修改等操作
1. 需要安装ingress controller, 这里使用的是ingress-nginx, 使用其他类型的ingress controller需要更改ingress.class, 将host改为自己部署服务的host
### 5. 执行./kubectl_start.sh脚本
chmod +x ./kubectl_start.sh ./kubectl_stop.sh
./kubectl_start.sh
kubectl -n openim apply -f ingress.yaml
kubectl 启动所有deploymentservicesingress
1. 脚本给予可执行权限
```
chmod +x ./kubectl_start.sh ./kubectl_stop.sh
```
2. 启动k8s service和deployment
```
./kubectl_start.sh
```
3. 启动k8s ingress
```
kubectl -n openim apply -f ingress.yaml
```
kubectl 启动所有deployment, services, ingress
### 6. 查看k8s deployment service ingress状态
kubectl -n openim get services
kubectl -n openim get deployment
kubectl -n openim get ingress
kubectl -n openim get pods
```
kubectl -n openim get services
kubectl -n openim get deployment
kubectl -n openim get ingress
kubectl -n openim get pods
```
检测服务可达
```
telnet msg-gateway.openim.xxx.com {{your_ingress_port}}
telnet sdk-server.openim.xxx.com {{your_ingress_port}}
telnet api.openim.xxx.com {{your_ingress_port}}
telnet cms-api.openim.xxx.com {{your_ingress_port}}
telnet demo.openim.xxx.com {{your_ingress_port}}
```

@ -29,8 +29,8 @@ services:
- TZ=Asia/Shanghai
# cache
- wiredTigerCacheSizeGB=1
- MONGO_INITDB_ROOT_USERNAME=openIM
- MONGO_INITDB_ROOT_PASSWORD=openIM
#- MONGO_INITDB_ROOT_USERNAME=openIM
#- MONGO_INITDB_ROOT_PASSWORD=openIM
#TZ: Asia/Shanghai
restart: always

@ -5,9 +5,9 @@ import (
"Open_IM/pkg/common/log"
"Open_IM/pkg/utils"
"context"
"fmt"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/minio/minio-go/v7/pkg/policy"
url2 "net/url"
)
@ -51,52 +51,45 @@ func MinioInit() {
}
err = MinioClient.MakeBucket(context.Background(), config.Config.Credential.Minio.Bucket, opt)
if err != nil {
log.NewError(operationID, utils.GetSelfFuncName(), "MakeBucket failed ", err.Error())
log.NewInfo(operationID, utils.GetSelfFuncName(), "MakeBucket failed ", err.Error())
exists, err := MinioClient.BucketExists(context.Background(), config.Config.Credential.Minio.Bucket)
if err == nil && exists {
log.NewWarn(operationID, utils.GetSelfFuncName(), "We already own ", config.Config.Credential.Minio.Bucket)
log.NewInfo(operationID, utils.GetSelfFuncName(), "We already own ", config.Config.Credential.Minio.Bucket)
} else {
if err != nil {
log.NewError(operationID, utils.GetSelfFuncName(), err.Error())
log.NewInfo(operationID, utils.GetSelfFuncName(), err.Error())
}
log.NewError(operationID, utils.GetSelfFuncName(), "create bucket failed and bucket not exists")
log.NewInfo(operationID, utils.GetSelfFuncName(), "create bucket failed and bucket not exists")
return
}
}
// make app bucket
err = MinioClient.MakeBucket(context.Background(), config.Config.Credential.Minio.AppBucket, opt)
if err != nil {
log.NewError(operationID, utils.GetSelfFuncName(), "MakeBucket failed ", err.Error())
log.NewInfo(operationID, utils.GetSelfFuncName(), "MakeBucket failed ", err.Error())
exists, err := MinioClient.BucketExists(context.Background(), config.Config.Credential.Minio.Bucket)
if err == nil && exists {
log.NewWarn(operationID, utils.GetSelfFuncName(), "We already own ", config.Config.Credential.Minio.Bucket)
log.NewInfo(operationID, utils.GetSelfFuncName(), "We already own ", config.Config.Credential.Minio.Bucket)
} else {
if err != nil {
log.NewError(operationID, utils.GetSelfFuncName(), err.Error())
log.NewInfo(operationID, utils.GetSelfFuncName(), err.Error())
}
log.NewError(operationID, utils.GetSelfFuncName(), "create bucket failed and bucket not exists")
log.NewInfo(operationID, utils.GetSelfFuncName(), "create bucket failed and bucket not exists")
return
}
}
// 自动化桶public的代码
err = MinioClient.SetBucketPolicy(context.Background(), config.Config.Credential.Minio.Bucket, "public")
policyJsonString := fmt.Sprintf(`{"Version": "2012-10-17","Statement": [{"Action": ["s3:GetObject","s3:PutObject"],
"Effect": "Allow","Principal": {"AWS": ["*"]},"Resource": ["arn:aws:s3:::%s/*"],"Sid": ""}]}`, config.Config.Credential.Minio.Bucket)
err = MinioClient.SetBucketPolicy(context.Background(), config.Config.Credential.Minio.Bucket, policyJsonString)
if err != nil {
log.NewInfo("", utils.GetSelfFuncName(), "SetBucketPolicy failed please set in web", err.Error())
}
err = MinioClient.SetBucketPolicy(context.Background(), config.Config.Credential.Minio.AppBucket, "public")
policyJsonString = fmt.Sprintf(`{"Version": "2012-10-17","Statement": [{"Action": ["s3:GetObject","s3:PutObject"],
"Effect": "Allow","Principal": {"AWS": ["*"]},"Resource": ["arn:aws:s3:::%s/*"],"Sid": ""}]}`, config.Config.Credential.Minio.AppBucket)
err = MinioClient.SetBucketPolicy(context.Background(), config.Config.Credential.Minio.AppBucket, policyJsonString)
if err != nil {
log.NewInfo("", utils.GetSelfFuncName(), "SetBucketPolicy failed please set in web", err.Error())
}
policyType, err := MinioClient.GetBucketPolicy(context.Background(), config.Config.Credential.Minio.Bucket)
if err != nil {
log.NewInfo("", utils.GetSelfFuncName(), err.Error())
}
log.NewInfo("", utils.GetSelfFuncName(), "policy: ", policyType)
policyType, err = MinioClient.GetBucketPolicy(context.Background(), config.Config.Credential.Minio.AppBucket)
if err != nil {
log.NewInfo("", utils.GetSelfFuncName(), err.Error())
}
log.NewInfo("", utils.GetSelfFuncName(), "policy: ", policyType)
log.NewInfo(operationID, utils.GetSelfFuncName(), "minio create and set policy success")
}

Loading…
Cancel
Save