Merge branch 'code-conventions' of github.com:FGadvancer/Open-IM-Server into code-conventions

pull/2001/head
luhaoling 2 years ago
commit 88415111fc

@ -23,18 +23,13 @@ import (
"strconv" "strconv"
"time" "time"
"github.com/google/uuid"
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
"github.com/openimsdk/open-im-server/v3/pkg/common/db/s3"
"github.com/OpenIMSDK/protocol/third" "github.com/OpenIMSDK/protocol/third"
"github.com/OpenIMSDK/tools/errs" "github.com/OpenIMSDK/tools/errs"
"github.com/OpenIMSDK/tools/log" "github.com/OpenIMSDK/tools/log"
"github.com/OpenIMSDK/tools/mcontext" "github.com/OpenIMSDK/tools/mcontext"
"github.com/OpenIMSDK/tools/utils" "github.com/OpenIMSDK/tools/utils"
"github.com/google/uuid"
"github.com/openimsdk/open-im-server/v3/pkg/common/db/s3"
"github.com/openimsdk/open-im-server/v3/pkg/common/db/s3/cont" "github.com/openimsdk/open-im-server/v3/pkg/common/db/s3/cont"
"github.com/openimsdk/open-im-server/v3/pkg/common/db/table/relation" "github.com/openimsdk/open-im-server/v3/pkg/common/db/table/relation"
) )
@ -57,7 +52,8 @@ func (t *thirdServer) PartSize(ctx context.Context, req *third.PartSizeReq) (*th
} }
func (t *thirdServer) InitiateMultipartUpload(ctx context.Context, req *third.InitiateMultipartUploadReq) (*third.InitiateMultipartUploadResp, error) { func (t *thirdServer) InitiateMultipartUpload(ctx context.Context, req *third.InitiateMultipartUploadReq) (*third.InitiateMultipartUploadResp, error) {
if err := checkUploadName(ctx, req.Name, t.config); err != nil { defer log.ZDebug(ctx, "return")
if err := t.checkUploadName(ctx, req.Name); err != nil {
return nil, err return nil, err
} }
expireTime := time.Now().Add(t.defaultExpire) expireTime := time.Now().Add(t.defaultExpire)
@ -136,7 +132,7 @@ func (t *thirdServer) AuthSign(ctx context.Context, req *third.AuthSignReq) (*th
func (t *thirdServer) CompleteMultipartUpload(ctx context.Context, req *third.CompleteMultipartUploadReq) (*third.CompleteMultipartUploadResp, error) { func (t *thirdServer) CompleteMultipartUpload(ctx context.Context, req *third.CompleteMultipartUploadReq) (*third.CompleteMultipartUploadResp, error) {
defer log.ZDebug(ctx, "return") defer log.ZDebug(ctx, "return")
if err := checkUploadName(ctx, req.Name, t.config); err != nil { if err := t.checkUploadName(ctx, req.Name); err != nil {
return nil, err return nil, err
} }
result, err := t.s3dataBase.CompleteMultipartUpload(ctx, req.UploadID, req.Parts) result, err := t.s3dataBase.CompleteMultipartUpload(ctx, req.UploadID, req.Parts)
@ -193,13 +189,13 @@ func (t *thirdServer) InitiateFormData(ctx context.Context, req *third.InitiateF
if req.Size <= 0 { if req.Size <= 0 {
return nil, errs.ErrArgs.Wrap("size must be greater than 0") return nil, errs.ErrArgs.Wrap("size must be greater than 0")
} }
if err := checkUploadName(ctx, req.Name, t.config); err != nil { if err := t.checkUploadName(ctx, req.Name); err != nil {
return nil, err return nil, err
} }
var duration time.Duration var duration time.Duration
opUserID := mcontext.GetOpUserID(ctx) opUserID := mcontext.GetOpUserID(ctx)
var key string var key string
if authverify.IsManagerUserID(opUserID, t.config) { if t.IsManagerUserID(opUserID) {
if req.Millisecond <= 0 { if req.Millisecond <= 0 {
duration = time.Minute * 10 duration = time.Minute * 10
} else { } else {
@ -259,7 +255,7 @@ func (t *thirdServer) CompleteFormData(ctx context.Context, req *third.CompleteF
if err := json.Unmarshal(data, &mate); err != nil { if err := json.Unmarshal(data, &mate); err != nil {
return nil, errs.ErrArgs.Wrap("invalid id " + err.Error()) return nil, errs.ErrArgs.Wrap("invalid id " + err.Error())
} }
if err := checkUploadName(ctx, mate.Name, t.config); err != nil { if err := t.checkUploadName(ctx, mate.Name); err != nil {
return nil, err return nil, err
} }
info, err := t.s3dataBase.StatObject(ctx, mate.Key) info, err := t.s3dataBase.StatObject(ctx, mate.Key)
@ -279,7 +275,7 @@ func (t *thirdServer) CompleteFormData(ctx context.Context, req *third.CompleteF
Group: mate.Group, Group: mate.Group,
CreateTime: time.Now(), CreateTime: time.Now(),
} }
if err = t.s3dataBase.SetObject(ctx, obj); err != nil { if err := t.s3dataBase.SetObject(ctx, obj); err != nil {
return nil, err return nil, err
} }
return &third.CompleteFormDataResp{Url: t.apiAddress(mate.Name)}, nil return &third.CompleteFormDataResp{Url: t.apiAddress(mate.Name)}, nil

@ -17,6 +17,7 @@ package third
import ( import (
"context" "context"
"fmt" "fmt"
"github.com/OpenIMSDK/tools/log"
"net/url" "net/url"
"time" "time"
@ -40,6 +41,7 @@ import (
) )
func Start(config *config.GlobalConfig, client discoveryregistry.SvcDiscoveryRegistry, server *grpc.Server) error { func Start(config *config.GlobalConfig, client discoveryregistry.SvcDiscoveryRegistry, server *grpc.Server) error {
log.ZDebug(context.Background(), "config19999999999999999999999999999999999", config, "javadfdas")
mongo, err := unrelation.NewMongo(config) mongo, err := unrelation.NewMongo(config)
if err != nil { if err != nil {
@ -57,7 +59,7 @@ func Start(config *config.GlobalConfig, client discoveryregistry.SvcDiscoveryReg
if apiURL == "" { if apiURL == "" {
return fmt.Errorf("api url is empty") return fmt.Errorf("api url is empty")
} }
if _, err := url.Parse(config.Object.ApiURL); err != nil { if _, parseErr := url.Parse(config.Object.ApiURL); parseErr != nil {
return err return err
} }
if apiURL[len(apiURL)-1] != '/' { if apiURL[len(apiURL)-1] != '/' {
@ -68,16 +70,16 @@ func Start(config *config.GlobalConfig, client discoveryregistry.SvcDiscoveryReg
if err != nil { if err != nil {
return err return err
} }
// Select based on the configuration file strategy // 根据配置文件策略选择 oss 方式
enable := config.Object.Enable enable := config.Object.Enable
var o s3.Interface var o s3.Interface
switch config.Object.Enable { switch config.Object.Enable {
case "minio": case "minio":
o, err = minio.NewMinio(cache.NewMinioCache(rdb), config) o, err = minio.NewMinio(cache.NewMinioCache(rdb), minio.Config(config.Object.Minio))
case "cos": case "cos":
o, err = cos.NewCos(config) o, err = cos.NewCos(cos.Config(config.Object.Cos))
case "oss": case "oss":
o, err = oss.NewOSS(config) o, err = oss.NewOSS(oss.Config(config.Object.Oss))
default: default:
err = fmt.Errorf("invalid object enable: %s", enable) err = fmt.Errorf("invalid object enable: %s", enable)
} }

@ -18,14 +18,14 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"strings" "strings"
"unicode/utf8" "unicode/utf8"
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
"github.com/OpenIMSDK/protocol/third" "github.com/OpenIMSDK/protocol/third"
"github.com/OpenIMSDK/tools/errs" "github.com/OpenIMSDK/tools/errs"
"github.com/OpenIMSDK/tools/mcontext" "github.com/OpenIMSDK/tools/mcontext"
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
) )
func toPbMapArray(m map[string][]string) []*third.KeyValues { func toPbMapArray(m map[string][]string) []*third.KeyValues {
@ -42,7 +42,7 @@ func toPbMapArray(m map[string][]string) []*third.KeyValues {
return res return res
} }
func checkUploadName(ctx context.Context, name string, config *config.GlobalConfig) error { func (t *thirdServer) checkUploadName(ctx context.Context, name string) error {
if name == "" { if name == "" {
return errs.ErrArgs.Wrap("name is empty") return errs.ErrArgs.Wrap("name is empty")
} }
@ -56,7 +56,7 @@ func checkUploadName(ctx context.Context, name string, config *config.GlobalConf
if opUserID == "" { if opUserID == "" {
return errs.ErrNoPermission.Wrap("opUserID is empty") return errs.ErrNoPermission.Wrap("opUserID is empty")
} }
if !authverify.IsManagerUserID(opUserID, config) { if !authverify.IsManagerUserID(opUserID, t.config) {
if !strings.HasPrefix(name, opUserID+"/") { if !strings.HasPrefix(name, opUserID+"/") {
return errs.ErrNoPermission.Wrap(fmt.Sprintf("name must start with `%s/`", opUserID)) return errs.ErrNoPermission.Wrap(fmt.Sprintf("name must start with `%s/`", opUserID))
} }
@ -80,3 +80,7 @@ func checkValidObjectName(objectName string) error {
} }
return checkValidObjectNamePrefix(objectName) return checkValidObjectNamePrefix(objectName)
} }
func (t *thirdServer) IsManagerUserID(opUserID string) bool {
return authverify.IsManagerUserID(opUserID, t.config)
}

@ -1,282 +0,0 @@
// Copyright © 2023 OpenIM. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// docURL: https://docs.aws.amazon.com/AmazonS3/latest/API/Welcome.html
package aws
import (
"context"
"errors"
"fmt"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
sdk "github.com/aws/aws-sdk-go/service/s3"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/db/s3"
)
const (
minPartSize int64 = 1024 * 1024 * 1 // 1MB
maxPartSize int64 = 1024 * 1024 * 1024 * 5 // 5GB
maxNumSize int64 = 10000
)
// const (
// imagePng = "png"
// imageJpg = "jpg"
// imageJpeg = "jpeg"
// imageGif = "gif"
// imageWebp = "webp"
// )
// const successCode = http.StatusOK
// const (
// videoSnapshotImagePng = "png"
// videoSnapshotImageJpg = "jpg"
// )
func NewAWS() (s3.Interface, error) {
configGlobal := config.NewGlobalConfig()
err := config.InitConfig(configGlobal, "../../config")
if err != nil {
return nil, err
}
conf := configGlobal.Object.Aws
credential := credentials.NewStaticCredentials(
conf.AccessKeyID, // accessKey
conf.AccessKeySecret, // secretKey
"") // stoken
sess, err := session.NewSession(&aws.Config{
Region: aws.String(conf.Region), // The area where the bucket is located
Credentials: credential,
})
if err != nil {
return nil, err
}
return &Aws{
bucket: conf.Bucket,
client: sdk.New(sess),
credential: credential,
}, nil
}
type Aws struct {
bucket string
client *sdk.S3
credential *credentials.Credentials
}
func (a *Aws) Engine() string {
return "aws"
}
func (a *Aws) InitiateMultipartUpload(ctx context.Context, name string) (*s3.InitiateMultipartUploadResult, error) {
input := &sdk.CreateMultipartUploadInput{
Bucket: aws.String(a.bucket), // TODO: To be verified whether it is required
Key: aws.String(name),
}
result, err := a.client.CreateMultipartUploadWithContext(ctx, input)
if err != nil {
return nil, err
}
return &s3.InitiateMultipartUploadResult{
Bucket: *result.Bucket,
Key: *result.Key,
UploadID: *result.UploadId,
}, nil
}
func (a *Aws) CompleteMultipartUpload(ctx context.Context, uploadID string, name string, parts []s3.Part) (*s3.CompleteMultipartUploadResult, error) {
sdkParts := make([]*sdk.CompletedPart, len(parts))
for i, part := range parts {
sdkParts[i] = &sdk.CompletedPart{
ETag: aws.String(part.ETag),
PartNumber: aws.Int64(int64(part.PartNumber)),
}
}
input := &sdk.CompleteMultipartUploadInput{
Bucket: aws.String(a.bucket), // TODO: To be verified whether it is required
Key: aws.String(name),
UploadId: aws.String(uploadID),
MultipartUpload: &sdk.CompletedMultipartUpload{
Parts: sdkParts,
},
}
result, err := a.client.CompleteMultipartUploadWithContext(ctx, input)
if err != nil {
return nil, err
}
return &s3.CompleteMultipartUploadResult{
Location: *result.Location,
Bucket: *result.Bucket,
Key: *result.Key,
ETag: *result.ETag,
}, nil
}
func (a *Aws) PartSize(ctx context.Context, size int64) (int64, error) {
if size <= 0 {
return 0, errors.New("size must be greater than 0")
}
if size > maxPartSize*maxNumSize {
return 0, fmt.Errorf("AWS size must be less than the maximum allowed limit")
}
if size <= minPartSize*maxNumSize {
return minPartSize, nil
}
partSize := size / maxNumSize
if size%maxNumSize != 0 {
partSize++
}
return partSize, nil
}
func (a *Aws) DeleteObject(ctx context.Context, name string) error {
_, err := a.client.DeleteObjectWithContext(ctx, &sdk.DeleteObjectInput{
Bucket: aws.String(a.bucket),
Key: aws.String(name),
})
return err
}
func (a *Aws) CopyObject(ctx context.Context, src string, dst string) (*s3.CopyObjectInfo, error) {
result, err := a.client.CopyObjectWithContext(ctx, &sdk.CopyObjectInput{
Bucket: aws.String(a.bucket),
Key: aws.String(dst),
CopySource: aws.String(src),
})
if err != nil {
return nil, err
}
return &s3.CopyObjectInfo{
ETag: *result.CopyObjectResult.ETag,
Key: dst,
}, nil
}
func (a *Aws) IsNotFound(err error) bool {
if err == nil {
return false
}
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case sdk.ErrCodeNoSuchKey:
return true
default:
return false
}
}
return false
}
func (a *Aws) AbortMultipartUpload(ctx context.Context, uploadID string, name string) error {
_, err := a.client.AbortMultipartUploadWithContext(ctx, &sdk.AbortMultipartUploadInput{
Bucket: aws.String(a.bucket),
Key: aws.String(name),
UploadId: aws.String(uploadID),
})
return err
}
func (a *Aws) ListUploadedParts(ctx context.Context, uploadID string, name string, partNumberMarker int, maxParts int) (*s3.ListUploadedPartsResult, error) {
result, err := a.client.ListPartsWithContext(ctx, &sdk.ListPartsInput{
Bucket: aws.String(a.bucket),
Key: aws.String(name),
UploadId: aws.String(uploadID),
MaxParts: aws.Int64(int64(maxParts)),
PartNumberMarker: aws.Int64(int64(partNumberMarker)),
})
if err != nil {
return nil, err
}
parts := make([]s3.UploadedPart, len(result.Parts))
for i, part := range result.Parts {
parts[i] = s3.UploadedPart{
PartNumber: int(*part.PartNumber),
LastModified: *part.LastModified,
Size: *part.Size,
ETag: *part.ETag,
}
}
return &s3.ListUploadedPartsResult{
Key: *result.Key,
UploadID: *result.UploadId,
NextPartNumberMarker: int(*result.NextPartNumberMarker),
MaxParts: int(*result.MaxParts),
UploadedParts: parts,
}, nil
}
func (a *Aws) PartLimit() *s3.PartLimit {
return &s3.PartLimit{
MinPartSize: minPartSize,
MaxPartSize: maxPartSize,
MaxNumSize: maxNumSize,
}
}
func (a *Aws) PresignedPutObject(ctx context.Context, name string, expire time.Duration) (string, error) {
req, _ := a.client.PutObjectRequest(&sdk.PutObjectInput{
Bucket: aws.String(a.bucket),
Key: aws.String(name),
})
url, err := req.Presign(expire)
if err != nil {
return "", err
}
return url, nil
}
func (a *Aws) StatObject(ctx context.Context, name string) (*s3.ObjectInfo, error) {
result, err := a.client.GetObjectWithContext(ctx, &sdk.GetObjectInput{
Bucket: aws.String(a.bucket),
Key: aws.String(name),
})
if err != nil {
return nil, err
}
res := &s3.ObjectInfo{
Key: name,
ETag: *result.ETag,
Size: *result.ContentLength,
LastModified: *result.LastModified,
}
return res, nil
}
// AccessURL todo.
func (a *Aws) AccessURL(ctx context.Context, name string, expire time.Duration, opt *s3.AccessURLOption) (string, error) {
// todo
return "", nil
}
func (a *Aws) FormData(ctx context.Context, name string, size int64, contentType string, duration time.Duration) (*s3.FormData, error) {
// todo
return nil, nil
}
func (a *Aws) AuthSign(ctx context.Context, uploadID string, name string, expire time.Duration, partNumbers []int) (*s3.AuthSignResult, error) {
// todo
return nil, nil
}

@ -86,7 +86,6 @@ func (c *Controller) GetHashObject(ctx context.Context, hash string) (*s3.Object
func (c *Controller) InitiateUpload(ctx context.Context, hash string, size int64, expire time.Duration, maxParts int) (*InitiateUploadResult, error) { func (c *Controller) InitiateUpload(ctx context.Context, hash string, size int64, expire time.Duration, maxParts int) (*InitiateUploadResult, error) {
defer log.ZDebug(ctx, "return") defer log.ZDebug(ctx, "return")
log.ZInfo(ctx, "InitiateUpload", "hash", hash, "size", expire)
if size < 0 { if size < 0 {
return nil, errors.New("invalid size") return nil, errors.New("invalid size")
} }
@ -95,7 +94,6 @@ func (c *Controller) InitiateUpload(ctx context.Context, hash string, size int64
} else if len(hashBytes) != md5.Size { } else if len(hashBytes) != md5.Size {
return nil, errors.New("invalid md5") return nil, errors.New("invalid md5")
} }
log.ZDebug(ctx, "InitiateUpload 33333333333333333333333")
partSize, err := c.impl.PartSize(ctx, size) partSize, err := c.impl.PartSize(ctx, size)
if err != nil { if err != nil {
return nil, err return nil, err
@ -107,15 +105,11 @@ func (c *Controller) InitiateUpload(ctx context.Context, hash string, size int64
if maxParts > 0 && partNumber > 0 && partNumber < maxParts { if maxParts > 0 && partNumber > 0 && partNumber < maxParts {
return nil, fmt.Errorf("too many parts: %d", partNumber) return nil, fmt.Errorf("too many parts: %d", partNumber)
} }
log.ZDebug(ctx, "InitiateUpload 4444444444444444444444444444444")
if info, err := c.StatObject(ctx, c.HashPath(hash)); err == nil { if info, err := c.StatObject(ctx, c.HashPath(hash)); err == nil {
log.ZDebug(ctx, "InitiateUpload 55555555555555555555555555555555")
return nil, &HashAlreadyExistsError{Object: info} return nil, &HashAlreadyExistsError{Object: info}
} else if !c.impl.IsNotFound(err) { } else if !c.impl.IsNotFound(err) {
log.ZDebug(ctx, "InitiateUpload 66666666666666666666666666666666", err)
return nil, err return nil, err
} }
if size <= partSize { if size <= partSize {
// Pre-signed upload // Pre-signed upload
key := path.Join(tempPath, c.NowPath(), fmt.Sprintf("%s_%d_%s.presigned", hash, size, c.UUID())) key := path.Join(tempPath, c.NowPath(), fmt.Sprintf("%s_%d_%s.presigned", hash, size, c.UUID()))

@ -29,7 +29,6 @@ import (
"strings" "strings"
"time" "time"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/db/s3" "github.com/openimsdk/open-im-server/v3/pkg/common/db/s3"
"github.com/tencentyun/cos-go-sdk-v5" "github.com/tencentyun/cos-go-sdk-v5"
) )
@ -50,13 +49,15 @@ const (
const successCode = http.StatusOK const successCode = http.StatusOK
const ( type Config struct {
// videoSnapshotImagePng = "png" BucketURL string
// videoSnapshotImageJpg = "jpg" SecretID string
) SecretKey string
SessionToken string
PublicRead bool
}
func NewCos(config *config.GlobalConfig) (s3.Interface, error) { func NewCos(conf Config) (s3.Interface, error) {
conf := config.Object.Cos
u, err := url.Parse(conf.BucketURL) u, err := url.Parse(conf.BucketURL)
if err != nil { if err != nil {
panic(err) panic(err)
@ -69,18 +70,18 @@ func NewCos(config *config.GlobalConfig) (s3.Interface, error) {
}, },
}) })
return &Cos{ return &Cos{
publicRead: conf.PublicRead,
copyURL: u.Host + "/", copyURL: u.Host + "/",
client: client, client: client,
credential: client.GetCredential(), credential: client.GetCredential(),
config: config,
}, nil }, nil
} }
type Cos struct { type Cos struct {
publicRead bool
copyURL string copyURL string
client *cos.Client client *cos.Client
credential *cos.Credential credential *cos.Credential
config *config.GlobalConfig
} }
func (c *Cos) Engine() string { func (c *Cos) Engine() string {
@ -329,7 +330,7 @@ func (c *Cos) AccessURL(ctx context.Context, name string, expire time.Duration,
} }
func (c *Cos) getPresignedURL(ctx context.Context, name string, expire time.Duration, opt *cos.PresignedURLOptions) (*url.URL, error) { func (c *Cos) getPresignedURL(ctx context.Context, name string, expire time.Duration, opt *cos.PresignedURLOptions) (*url.URL, error) {
if !c.config.Object.Cos.PublicRead { if !c.publicRead {
return c.client.Object.GetPresignedURL(ctx, http.MethodGet, name, c.credential.SecretID, c.credential.SecretKey, expire, opt) return c.client.Object.GetPresignedURL(ctx, http.MethodGet, name, c.credential.SecretID, c.credential.SecretKey, expire, opt)
} }
return c.client.Object.GetObjectURL(name), nil return c.client.Object.GetObjectURL(name), nil

@ -42,51 +42,79 @@ func ImageWidthHeight(img image.Image) (int, int) {
return bounds.X, bounds.Y return bounds.X, bounds.Y
} }
// resizeImage resizes an image to a specified maximum width and height, maintaining the aspect ratio.
// If both maxWidth and maxHeight are set to 0, the original image is returned.
// If both are non-zero, the image is scaled to fit within the constraints while maintaining aspect ratio.
// If only one of maxWidth or maxHeight is non-zero, the image is scaled accordingly.
func resizeImage(img image.Image, maxWidth, maxHeight int) image.Image { func resizeImage(img image.Image, maxWidth, maxHeight int) image.Image {
bounds := img.Bounds() bounds := img.Bounds()
imgWidth, imgHeight := bounds.Dx(), bounds.Dy() imgWidth := bounds.Max.X
imgHeight := bounds.Max.Y
// Return original image if no resizing is needed. // 计算缩放比例
scaleWidth := float64(maxWidth) / float64(imgWidth)
scaleHeight := float64(maxHeight) / float64(imgHeight)
// 如果都为0则不缩放返回原始图片
if maxWidth == 0 && maxHeight == 0 { if maxWidth == 0 && maxHeight == 0 {
return img return img
} }
var scale float64 = 1 // 如果宽度和高度都大于0则选择较小的缩放比例以保持宽高比
if maxWidth > 0 && maxHeight > 0 { if maxWidth > 0 && maxHeight > 0 {
scaleWidth := float64(maxWidth) / float64(imgWidth) scale := scaleWidth
scaleHeight := float64(maxHeight) / float64(imgHeight) if scaleHeight < scaleWidth {
// Choose the smaller scale to fit both constraints. scale = scaleHeight
scale = min(scaleWidth, scaleHeight) }
} else if maxWidth > 0 {
scale = float64(maxWidth) / float64(imgWidth) // 计算缩略图尺寸
} else if maxHeight > 0 { thumbnailWidth := int(float64(imgWidth) * scale)
scale = float64(maxHeight) / float64(imgHeight) thumbnailHeight := int(float64(imgHeight) * scale)
// 使用"image"库的Resample方法生成缩略图
thumbnail := image.NewRGBA(image.Rect(0, 0, thumbnailWidth, thumbnailHeight))
for y := 0; y < thumbnailHeight; y++ {
for x := 0; x < thumbnailWidth; x++ {
srcX := int(float64(x) / scale)
srcY := int(float64(y) / scale)
thumbnail.Set(x, y, img.At(srcX, srcY))
}
}
return thumbnail
} }
newWidth := int(float64(imgWidth) * scale) // 如果只指定了宽度或高度,则根据最大不超过的规则生成缩略图
newHeight := int(float64(imgHeight) * scale) if maxWidth > 0 {
thumbnailWidth := maxWidth
thumbnailHeight := int(float64(imgHeight) * scaleWidth)
// Resize the image by creating a new image and manually copying pixels. // 使用"image"库的Resample方法生成缩略图
thumbnail := image.NewRGBA(image.Rect(0, 0, newWidth, newHeight)) thumbnail := image.NewRGBA(image.Rect(0, 0, thumbnailWidth, thumbnailHeight))
for y := 0; y < newHeight; y++ { for y := 0; y < thumbnailHeight; y++ {
for x := 0; x < newWidth; x++ { for x := 0; x < thumbnailWidth; x++ {
srcX := int(float64(x) / scale) srcX := int(float64(x) / scaleWidth)
srcY := int(float64(y) / scale) srcY := int(float64(y) / scaleWidth)
thumbnail.Set(x, y, img.At(srcX, srcY)) thumbnail.Set(x, y, img.At(srcX, srcY))
}
} }
return thumbnail
} }
return thumbnail if maxHeight > 0 {
} thumbnailWidth := int(float64(imgWidth) * scaleHeight)
thumbnailHeight := maxHeight
// min returns the smaller of x or y. // 使用"image"库的Resample方法生成缩略图
func min(x, y float64) float64 { thumbnail := image.NewRGBA(image.Rect(0, 0, thumbnailWidth, thumbnailHeight))
if x < y { for y := 0; y < thumbnailHeight; y++ {
return x for x := 0; x < thumbnailWidth; x++ {
srcX := int(float64(x) / scaleHeight)
srcY := int(float64(y) / scaleHeight)
thumbnail.Set(x, y, img.At(srcX, srcY))
}
}
return thumbnail
} }
return y
// 默认情况下,返回原始图片
return img
} }

@ -33,7 +33,6 @@ import (
"github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials" "github.com/minio/minio-go/v7/pkg/credentials"
"github.com/minio/minio-go/v7/pkg/signer" "github.com/minio/minio-go/v7/pkg/signer"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/db/cache" "github.com/openimsdk/open-im-server/v3/pkg/common/db/cache"
"github.com/openimsdk/open-im-server/v3/pkg/common/db/s3" "github.com/openimsdk/open-im-server/v3/pkg/common/db/s3"
) )
@ -43,7 +42,7 @@ const (
) )
const ( const (
minPartSize int64 = 1024 * 1024 * 5 // 1MB minPartSize int64 = 1024 * 1024 * 5 // 5MB
maxPartSize int64 = 1024 * 1024 * 1024 * 5 // 5GB maxPartSize int64 = 1024 * 1024 * 1024 * 5 // 5GB
maxNumSize int64 = 10000 maxNumSize int64 = 10000
) )
@ -57,13 +56,23 @@ const (
const successCode = http.StatusOK const successCode = http.StatusOK
func NewMinio(cache cache.MinioCache, config *config.GlobalConfig) (s3.Interface, error) { type Config struct {
u, err := url.Parse(config.Object.Minio.Endpoint) Bucket string
Endpoint string
AccessKeyID string
SecretAccessKey string
SessionToken string
SignEndpoint string
PublicRead bool
}
func NewMinio(cache cache.MinioCache, conf Config) (s3.Interface, error) {
u, err := url.Parse(conf.Endpoint)
if err != nil { if err != nil {
return nil, err return nil, err
} }
opts := &minio.Options{ opts := &minio.Options{
Creds: credentials.NewStaticV4(config.Object.Minio.AccessKeyID, config.Object.Minio.SecretAccessKey, config.Object.Minio.SessionToken), Creds: credentials.NewStaticV4(conf.AccessKeyID, conf.SecretAccessKey, conf.SessionToken),
Secure: u.Scheme == "https", Secure: u.Scheme == "https",
} }
client, err := minio.New(u.Host, opts) client, err := minio.New(u.Host, opts)
@ -71,27 +80,26 @@ func NewMinio(cache cache.MinioCache, config *config.GlobalConfig) (s3.Interface
return nil, err return nil, err
} }
m := &Minio{ m := &Minio{
bucket: config.Object.Minio.Bucket, bucket: conf.Bucket,
core: &minio.Core{Client: client}, core: &minio.Core{Client: client},
lock: &sync.Mutex{}, lock: &sync.Mutex{},
init: false, init: false,
cache: cache, cache: cache,
config: config,
} }
if config.Object.Minio.SignEndpoint == "" || config.Object.Minio.SignEndpoint == config.Object.Minio.Endpoint { if conf.SignEndpoint == "" || conf.SignEndpoint == conf.Endpoint {
m.opts = opts m.opts = opts
m.sign = m.core.Client m.sign = m.core.Client
m.prefix = u.Path m.prefix = u.Path
u.Path = "" u.Path = ""
config.Object.Minio.Endpoint = u.String() conf.Endpoint = u.String()
m.signEndpoint = config.Object.Minio.Endpoint m.signEndpoint = conf.Endpoint
} else { } else {
su, err := url.Parse(config.Object.Minio.SignEndpoint) su, err := url.Parse(conf.SignEndpoint)
if err != nil { if err != nil {
return nil, err return nil, err
} }
m.opts = &minio.Options{ m.opts = &minio.Options{
Creds: credentials.NewStaticV4(config.Object.Minio.AccessKeyID, config.Object.Minio.SecretAccessKey, config.Object.Minio.SessionToken), Creds: credentials.NewStaticV4(conf.AccessKeyID, conf.SecretAccessKey, conf.SessionToken),
Secure: su.Scheme == "https", Secure: su.Scheme == "https",
} }
m.sign, err = minio.New(su.Host, m.opts) m.sign, err = minio.New(su.Host, m.opts)
@ -100,8 +108,8 @@ func NewMinio(cache cache.MinioCache, config *config.GlobalConfig) (s3.Interface
} }
m.prefix = su.Path m.prefix = su.Path
su.Path = "" su.Path = ""
config.Object.Minio.SignEndpoint = su.String() conf.SignEndpoint = su.String()
m.signEndpoint = config.Object.Minio.SignEndpoint m.signEndpoint = conf.SignEndpoint
} }
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel() defer cancel()
@ -112,6 +120,7 @@ func NewMinio(cache cache.MinioCache, config *config.GlobalConfig) (s3.Interface
} }
type Minio struct { type Minio struct {
conf Config
bucket string bucket string
signEndpoint string signEndpoint string
location string location string
@ -122,7 +131,6 @@ type Minio struct {
init bool init bool
prefix string prefix string
cache cache.MinioCache cache cache.MinioCache
config *config.GlobalConfig
} }
func (m *Minio) initMinio(ctx context.Context) error { func (m *Minio) initMinio(ctx context.Context) error {
@ -134,35 +142,30 @@ func (m *Minio) initMinio(ctx context.Context) error {
if m.init { if m.init {
return nil return nil
} }
conf := m.config.Object.Minio exists, err := m.core.Client.BucketExists(ctx, m.conf.Bucket)
log.ZDebug(ctx, "conf!11111111111111111111111111111111111111111111111111111111111111111111111111111", "conf", conf.Bucket, "openopen")
if conf.Bucket != "openim" {
log.ZError(ctx, "ppppppppppppppppppppppppppppppp", fmt.Errorf("aaa"))
}
exists, err := m.core.Client.BucketExists(ctx, conf.Bucket)
if err != nil { if err != nil {
return fmt.Errorf("check bucket exists error: %w", err) return fmt.Errorf("check bucket exists error: %w", err)
} }
if !exists { if !exists {
if err = m.core.Client.MakeBucket(ctx, conf.Bucket, minio.MakeBucketOptions{}); err != nil { if err = m.core.Client.MakeBucket(ctx, m.conf.Bucket, minio.MakeBucketOptions{}); err != nil {
return fmt.Errorf("make bucket error: %w", err) return fmt.Errorf("make bucket error: %w", err)
} }
} }
if conf.PublicRead { if m.conf.PublicRead {
policy := fmt.Sprintf( policy := fmt.Sprintf(
`{"Version": "2012-10-17","Statement": [{"Action": ["s3:GetObject","s3:PutObject"],"Effect": "Allow","Principal": {"AWS": ["*"]},"Resource": ["arn:aws:s3:::%s/*"],"Sid": ""}]}`, `{"Version": "2012-10-17","Statement": [{"Action": ["s3:GetObject","s3:PutObject"],"Effect": "Allow","Principal": {"AWS": ["*"]},"Resource": ["arn:aws:s3:::%s/*"],"Sid": ""}]}`,
conf.Bucket, m.conf.Bucket,
) )
if err = m.core.Client.SetBucketPolicy(ctx, conf.Bucket, policy); err != nil { if err = m.core.Client.SetBucketPolicy(ctx, m.conf.Bucket, policy); err != nil {
return err return err
} }
} }
m.location, err = m.core.Client.GetBucketLocation(ctx, conf.Bucket) m.location, err = m.core.Client.GetBucketLocation(ctx, m.conf.Bucket)
if err != nil { if err != nil {
return err return err
} }
func() { func() {
if conf.SignEndpoint == "" || conf.SignEndpoint == conf.Endpoint { if m.conf.SignEndpoint == "" || m.conf.SignEndpoint == m.conf.Endpoint {
return return
} }
defer func() { defer func() {
@ -182,7 +185,7 @@ func (m *Minio) initMinio(ctx context.Context) error {
blc := reflect.ValueOf(m.sign).Elem().FieldByName("bucketLocCache") blc := reflect.ValueOf(m.sign).Elem().FieldByName("bucketLocCache")
vblc := reflect.New(reflect.PtrTo(blc.Type())) vblc := reflect.New(reflect.PtrTo(blc.Type()))
*(*unsafe.Pointer)(vblc.UnsafePointer()) = unsafe.Pointer(blc.UnsafeAddr()) *(*unsafe.Pointer)(vblc.UnsafePointer()) = unsafe.Pointer(blc.UnsafeAddr())
vblc.Elem().Elem().Interface().(interface{ Set(string, string) }).Set(conf.Bucket, m.location) vblc.Elem().Elem().Interface().(interface{ Set(string, string) }).Set(m.conf.Bucket, m.location)
}() }()
m.init = true m.init = true
return nil return nil
@ -314,14 +317,10 @@ func (m *Minio) StatObject(ctx context.Context, name string) (*s3.ObjectInfo, er
if err := m.initMinio(ctx); err != nil { if err := m.initMinio(ctx); err != nil {
return nil, err return nil, err
} }
log.ZDebug(ctx, "StatObject !!!!!!1111111111111111111111111111111111")
log.ZInfo(ctx, "StatObject", "bucket", m.bucket, "name", name)
info, err := m.core.Client.StatObject(ctx, m.bucket, name, minio.StatObjectOptions{}) info, err := m.core.Client.StatObject(ctx, m.bucket, name, minio.StatObjectOptions{})
if err != nil { if err != nil {
log.ZDebug(ctx, "StatObject !!!!!!555555555555", err)
return nil, err return nil, err
} }
log.ZDebug(ctx, "StatObject !!!!!!22222222222222222222222222222222222")
return &s3.ObjectInfo{ return &s3.ObjectInfo{
ETag: strings.ToLower(info.ETag), ETag: strings.ToLower(info.ETag),
Key: info.Key, Key: info.Key,
@ -407,7 +406,7 @@ func (m *Minio) PresignedGetObject(ctx context.Context, name string, expire time
rawURL *url.URL rawURL *url.URL
err error err error
) )
if m.config.Object.Minio.PublicRead { if m.conf.PublicRead {
rawURL, err = makeTargetURL(m.sign, m.bucket, name, m.location, false, query) rawURL, err = makeTargetURL(m.sign, m.bucket, name, m.location, false, query)
} else { } else {
rawURL, err = m.sign.PresignedGetObject(ctx, m.bucket, name, expire, query) rawURL, err = m.sign.PresignedGetObject(ctx, m.bucket, name, expire, query)

@ -32,7 +32,6 @@ import (
"github.com/OpenIMSDK/tools/errs" "github.com/OpenIMSDK/tools/errs"
"github.com/aliyun/aliyun-oss-go-sdk/oss" "github.com/aliyun/aliyun-oss-go-sdk/oss"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/db/s3" "github.com/openimsdk/open-im-server/v3/pkg/common/db/s3"
) )
@ -52,13 +51,17 @@ const (
const successCode = http.StatusOK const successCode = http.StatusOK
/* const ( type Config struct {
videoSnapshotImagePng = "png" Endpoint string
videoSnapshotImageJpg = "jpg" Bucket string
) */ BucketURL string
AccessKeyID string
AccessKeySecret string
SessionToken string
PublicRead bool
}
func NewOSS(config *config.GlobalConfig) (s3.Interface, error) { func NewOSS(conf Config) (s3.Interface, error) {
conf := config.Object.Oss
if conf.BucketURL == "" { if conf.BucketURL == "" {
return nil, errs.Wrap(errors.New("bucket url is empty")) return nil, errs.Wrap(errors.New("bucket url is empty"))
} }
@ -78,7 +81,7 @@ func NewOSS(config *config.GlobalConfig) (s3.Interface, error) {
bucket: bucket, bucket: bucket,
credentials: client.Config.GetCredentials(), credentials: client.Config.GetCredentials(),
um: *(*urlMaker)(reflect.ValueOf(bucket.Client.Conn).Elem().FieldByName("url").UnsafePointer()), um: *(*urlMaker)(reflect.ValueOf(bucket.Client.Conn).Elem().FieldByName("url").UnsafePointer()),
PublicRead: conf.PublicRead, publicRead: conf.PublicRead,
}, nil }, nil
} }
@ -87,7 +90,7 @@ type OSS struct {
bucket *oss.Bucket bucket *oss.Bucket
credentials oss.Credentials credentials oss.Credentials
um urlMaker um urlMaker
PublicRead bool publicRead bool
} }
func (o *OSS) Engine() string { func (o *OSS) Engine() string {
@ -284,7 +287,6 @@ func (o *OSS) ListUploadedParts(ctx context.Context, uploadID string, name strin
} }
func (o *OSS) AccessURL(ctx context.Context, name string, expire time.Duration, opt *s3.AccessURLOption) (string, error) { func (o *OSS) AccessURL(ctx context.Context, name string, expire time.Duration, opt *s3.AccessURLOption) (string, error) {
publicRead := o.PublicRead
var opts []oss.Option var opts []oss.Option
if opt != nil { if opt != nil {
if opt.Image != nil { if opt.Image != nil {
@ -312,7 +314,7 @@ func (o *OSS) AccessURL(ctx context.Context, name string, expire time.Duration,
process += ",format," + format process += ",format," + format
opts = append(opts, oss.Process(process)) opts = append(opts, oss.Process(process))
} }
if !publicRead { if !o.publicRead {
if opt.ContentType != "" { if opt.ContentType != "" {
opts = append(opts, oss.ResponseContentType(opt.ContentType)) opts = append(opts, oss.ResponseContentType(opt.ContentType))
} }
@ -326,7 +328,7 @@ func (o *OSS) AccessURL(ctx context.Context, name string, expire time.Duration,
} else if expire < time.Second { } else if expire < time.Second {
expire = time.Second expire = time.Second
} }
if !publicRead { if !o.publicRead {
return o.bucket.SignURL(name, http.MethodGet, int64(expire/time.Second), opts...) return o.bucket.SignURL(name, http.MethodGet, int64(expire/time.Second), opts...)
} }
rawParams, err := oss.GetRawParams(opts) rawParams, err := oss.GetRawParams(opts)

Loading…
Cancel
Save