feat: S3 server cache (#1329)

* optimize scheduled deletion

* optimize scheduled deletion

* optimize scheduled deletion

* optimize scheduled deletion

* minio cache

* fix: conflicts

* feat: minio cache

* feat: cache optimize

* feat: cache optimize

* feat: cache optimize

* feat: cache optimize

* feat: cache optimize
pull/1333/head
chao 1 year ago committed by GitHub
parent 62e9980f3c
commit cb0bf64435
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -67,7 +67,7 @@ func Start(client discoveryregistry.SvcDiscoveryRegistry, server *grpc.Server) e
var o s3.Interface
switch config.Config.Object.Enable {
case "minio":
o, err = minio.NewMinio()
o, err = minio.NewMinio(cache.NewMinioCache(rdb))
case "cos":
o, err = cos.NewCos()
case "oss":
@ -78,11 +78,17 @@ func Start(client discoveryregistry.SvcDiscoveryRegistry, server *grpc.Server) e
if err != nil {
return err
}
//specialerror.AddErrHandler(func(err error) errs.CodeError {
// if o.IsNotFound(err) {
// return errs.ErrRecordNotFound
// }
// return nil
//})
third.RegisterThirdServer(server, &thirdServer{
apiURL: apiURL,
thirdDatabase: controller.NewThirdDatabase(cache.NewMsgCacheModel(rdb), db),
userRpcClient: rpcclient.NewUserRpcClient(client),
s3dataBase: controller.NewS3Database(o, relation.NewObjectInfo(db)),
s3dataBase: controller.NewS3Database(rdb, o, relation.NewObjectInfo(db)),
defaultExpire: time.Hour * 24 * 7,
})
return nil

@ -18,6 +18,7 @@ import (
"context"
"encoding/json"
"errors"
"github.com/OpenIMSDK/tools/mw/specialerror"
"time"
"github.com/dtm-labs/rockscache"
@ -209,6 +210,9 @@ func batchGetCache2[T any, K comparable](
return fns(ctx, key)
})
if err != nil {
if errs.ErrRecordNotFound.Is(specialerror.ErrCode(errs.Unwrap(err))) {
continue
}
return nil, err
}
res = append(res, val)

@ -0,0 +1,190 @@
package cache
import (
"context"
"github.com/dtm-labs/rockscache"
"github.com/openimsdk/open-im-server/v3/pkg/common/db/s3"
relationtb "github.com/openimsdk/open-im-server/v3/pkg/common/db/table/relation"
"github.com/redis/go-redis/v9"
"strconv"
"time"
)
type ObjectCache interface {
metaCache
GetName(ctx context.Context, name string) (*relationtb.ObjectModel, error)
DelObjectName(names ...string) ObjectCache
}
func NewObjectCacheRedis(rdb redis.UniversalClient, objDB relationtb.ObjectInfoModelInterface) ObjectCache {
rcClient := rockscache.NewClient(rdb, rockscache.NewDefaultOptions())
return &objectCacheRedis{
rcClient: rcClient,
expireTime: time.Hour * 12,
objDB: objDB,
metaCache: NewMetaCacheRedis(rcClient),
}
}
type objectCacheRedis struct {
metaCache
objDB relationtb.ObjectInfoModelInterface
rcClient *rockscache.Client
expireTime time.Duration
}
func (g *objectCacheRedis) NewCache() ObjectCache {
return &objectCacheRedis{
rcClient: g.rcClient,
expireTime: g.expireTime,
objDB: g.objDB,
metaCache: NewMetaCacheRedis(g.rcClient, g.metaCache.GetPreDelKeys()...),
}
}
func (g *objectCacheRedis) DelObjectName(names ...string) ObjectCache {
objectCache := g.NewCache()
keys := make([]string, 0, len(names))
for _, name := range names {
keys = append(keys, g.getObjectKey(name))
}
objectCache.AddKeys(keys...)
return objectCache
}
func (g *objectCacheRedis) getObjectKey(name string) string {
return "OBJECT:" + name
}
func (g *objectCacheRedis) GetName(ctx context.Context, name string) (*relationtb.ObjectModel, error) {
return getCache(ctx, g.rcClient, g.getObjectKey(name), g.expireTime, func(ctx context.Context) (*relationtb.ObjectModel, error) {
return g.objDB.Take(ctx, name)
})
}
type S3Cache interface {
metaCache
GetKey(ctx context.Context, engine string, key string) (*s3.ObjectInfo, error)
DelS3Key(engine string, keys ...string) S3Cache
}
func NewS3Cache(rdb redis.UniversalClient, s3 s3.Interface) S3Cache {
rcClient := rockscache.NewClient(rdb, rockscache.NewDefaultOptions())
return &s3CacheRedis{
rcClient: rcClient,
expireTime: time.Hour * 12,
s3: s3,
metaCache: NewMetaCacheRedis(rcClient),
}
}
type s3CacheRedis struct {
metaCache
s3 s3.Interface
rcClient *rockscache.Client
expireTime time.Duration
}
func (g *s3CacheRedis) NewCache() S3Cache {
return &s3CacheRedis{
rcClient: g.rcClient,
expireTime: g.expireTime,
s3: g.s3,
metaCache: NewMetaCacheRedis(g.rcClient, g.metaCache.GetPreDelKeys()...),
}
}
func (g *s3CacheRedis) DelS3Key(engine string, keys ...string) S3Cache {
s3cache := g.NewCache()
ks := make([]string, 0, len(keys))
for _, key := range keys {
ks = append(ks, g.getS3Key(engine, key))
}
s3cache.AddKeys(ks...)
return s3cache
}
func (g *s3CacheRedis) getS3Key(engine string, name string) string {
return "S3:" + engine + ":" + name
}
func (g *s3CacheRedis) GetKey(ctx context.Context, engine string, name string) (*s3.ObjectInfo, error) {
return getCache(ctx, g.rcClient, g.getS3Key(engine, name), g.expireTime, func(ctx context.Context) (*s3.ObjectInfo, error) {
return g.s3.StatObject(ctx, name)
})
}
type MinioCache interface {
metaCache
GetImageObjectKeyInfo(ctx context.Context, key string, fn func(ctx context.Context) (*MinioImageInfo, error)) (*MinioImageInfo, error)
GetThumbnailKey(ctx context.Context, key string, format string, width int, height int, minioCache func(ctx context.Context) (string, error)) (string, error)
DelObjectImageInfoKey(keys ...string) MinioCache
DelImageThumbnailKey(key string, format string, width int, height int) MinioCache
}
func NewMinioCache(rdb redis.UniversalClient) MinioCache {
rcClient := rockscache.NewClient(rdb, rockscache.NewDefaultOptions())
return &minioCacheRedis{
rcClient: rcClient,
expireTime: time.Hour * 24 * 7,
metaCache: NewMetaCacheRedis(rcClient),
}
}
type minioCacheRedis struct {
metaCache
rcClient *rockscache.Client
expireTime time.Duration
}
func (g *minioCacheRedis) NewCache() MinioCache {
return &minioCacheRedis{
rcClient: g.rcClient,
expireTime: g.expireTime,
metaCache: NewMetaCacheRedis(g.rcClient, g.metaCache.GetPreDelKeys()...),
}
}
func (g *minioCacheRedis) DelObjectImageInfoKey(keys ...string) MinioCache {
s3cache := g.NewCache()
ks := make([]string, 0, len(keys))
for _, key := range keys {
ks = append(ks, g.getObjectImageInfoKey(key))
}
s3cache.AddKeys(ks...)
return s3cache
}
func (g *minioCacheRedis) DelImageThumbnailKey(key string, format string, width int, height int) MinioCache {
s3cache := g.NewCache()
s3cache.AddKeys(g.getMinioImageThumbnailKey(key, format, width, height))
return s3cache
}
func (g *minioCacheRedis) getObjectImageInfoKey(key string) string {
return "MINIO:IMAGE:" + key
}
func (g *minioCacheRedis) getMinioImageThumbnailKey(key string, format string, width int, height int) string {
return "MINIO:THUMBNAIL:" + format + ":w" + strconv.Itoa(width) + ":h" + strconv.Itoa(height) + ":" + key
}
func (g *minioCacheRedis) GetImageObjectKeyInfo(ctx context.Context, key string, fn func(ctx context.Context) (*MinioImageInfo, error)) (*MinioImageInfo, error) {
info, err := getCache(ctx, g.rcClient, g.getObjectImageInfoKey(key), g.expireTime, fn)
if err != nil {
return nil, err
}
return info, nil
}
func (g *minioCacheRedis) GetThumbnailKey(ctx context.Context, key string, format string, width int, height int, minioCache func(ctx context.Context) (string, error)) (string, error) {
return getCache(ctx, g.rcClient, g.getMinioImageThumbnailKey(key, format, width, height), g.expireTime, minioCache)
}
type MinioImageInfo struct {
IsImg bool `json:"isImg"`
Width int `json:"width"`
Height int `json:"height"`
Format string `json:"format"`
Etag string `json:"etag"`
}

@ -16,12 +16,13 @@ package controller
import (
"context"
"path/filepath"
"time"
"github.com/openimsdk/open-im-server/v3/pkg/common/db/cache"
"github.com/openimsdk/open-im-server/v3/pkg/common/db/s3"
"github.com/openimsdk/open-im-server/v3/pkg/common/db/s3/cont"
"github.com/openimsdk/open-im-server/v3/pkg/common/db/table/relation"
"github.com/redis/go-redis/v9"
"path/filepath"
"time"
)
type S3Database interface {
@ -34,16 +35,18 @@ type S3Database interface {
SetObject(ctx context.Context, info *relation.ObjectModel) error
}
func NewS3Database(s3 s3.Interface, obj relation.ObjectInfoModelInterface) S3Database {
func NewS3Database(rdb redis.UniversalClient, s3 s3.Interface, obj relation.ObjectInfoModelInterface) S3Database {
return &s3Database{
s3: cont.New(s3),
obj: obj,
s3: cont.New(cache.NewS3Cache(rdb, s3), s3),
cache: cache.NewObjectCacheRedis(rdb, obj),
db: obj,
}
}
type s3Database struct {
s3 *cont.Controller
obj relation.ObjectInfoModelInterface
s3 *cont.Controller
cache cache.ObjectCache
db relation.ObjectInfoModelInterface
}
func (s *s3Database) PartSize(ctx context.Context, size int64) (int64, error) {
@ -67,11 +70,14 @@ func (s *s3Database) CompleteMultipartUpload(ctx context.Context, uploadID strin
}
func (s *s3Database) SetObject(ctx context.Context, info *relation.ObjectModel) error {
return s.obj.SetObject(ctx, info)
if err := s.db.SetObject(ctx, info); err != nil {
return err
}
return s.cache.DelObjectName(info.Name).ExecDel(ctx)
}
func (s *s3Database) AccessURL(ctx context.Context, name string, expire time.Duration, opt *s3.AccessURLOption) (time.Time, string, error) {
obj, err := s.obj.Take(ctx, name)
obj, err := s.cache.GetName(ctx, name)
if err != nil {
return time.Time{}, "", err
}

@ -20,6 +20,7 @@ import (
"encoding/hex"
"errors"
"fmt"
"github.com/openimsdk/open-im-server/v3/pkg/common/db/cache"
"path"
"strings"
"time"
@ -32,12 +33,16 @@ import (
"github.com/openimsdk/open-im-server/v3/pkg/common/db/s3"
)
func New(impl s3.Interface) *Controller {
return &Controller{impl: impl}
func New(cache cache.S3Cache, impl s3.Interface) *Controller {
return &Controller{
cache: cache,
impl: impl,
}
}
type Controller struct {
impl s3.Interface
cache cache.S3Cache
impl s3.Interface
}
func (c *Controller) HashPath(md5 string) string {
@ -69,8 +74,12 @@ func (c *Controller) PartLimit() *s3.PartLimit {
return c.impl.PartLimit()
}
func (c *Controller) StatObject(ctx context.Context, name string) (*s3.ObjectInfo, error) {
return c.cache.GetKey(ctx, c.impl.Engine(), name)
}
func (c *Controller) GetHashObject(ctx context.Context, hash string) (*s3.ObjectInfo, error) {
return c.impl.StatObject(ctx, c.HashPath(hash))
return c.StatObject(ctx, c.HashPath(hash))
}
func (c *Controller) InitiateUpload(ctx context.Context, hash string, size int64, expire time.Duration, maxParts int) (*InitiateUploadResult, error) {
@ -94,7 +103,7 @@ func (c *Controller) InitiateUpload(ctx context.Context, hash string, size int64
if maxParts > 0 && partNumber > 0 && partNumber < maxParts {
return nil, errors.New(fmt.Sprintf("too many parts: %d", partNumber))
}
if info, err := c.impl.StatObject(ctx, c.HashPath(hash)); err == nil {
if info, err := c.StatObject(ctx, c.HashPath(hash)); err == nil {
return nil, &HashAlreadyExistsError{Object: info}
} else if !c.impl.IsNotFound(err) {
return nil, err
@ -168,13 +177,13 @@ func (c *Controller) CompleteUpload(ctx context.Context, uploadID string, partHa
fmt.Println("CompleteUpload sum:", hex.EncodeToString(md5Sum[:]), "upload hash:", upload.Hash)
return nil, errors.New("md5 mismatching")
}
if info, err := c.impl.StatObject(ctx, c.HashPath(upload.Hash)); err == nil {
if info, err := c.StatObject(ctx, c.HashPath(upload.Hash)); err == nil {
return &UploadResult{
Key: info.Key,
Size: info.Size,
Hash: info.ETag,
}, nil
} else if !c.impl.IsNotFound(err) {
} else if !c.IsNotFound(err) {
return nil, err
}
cleanObject := make(map[string]struct{})
@ -200,7 +209,7 @@ func (c *Controller) CompleteUpload(ctx context.Context, uploadID string, partHa
}
targetKey = result.Key
case UploadTypePresigned:
uploadInfo, err := c.impl.StatObject(ctx, upload.Key)
uploadInfo, err := c.StatObject(ctx, upload.Key)
if err != nil {
return nil, err
}
@ -230,6 +239,9 @@ func (c *Controller) CompleteUpload(ctx context.Context, uploadID string, partHa
default:
return nil, errors.New("invalid upload id type")
}
if err := c.cache.DelS3Key(c.impl.Engine(), targetKey).ExecDel(ctx); err != nil {
return nil, err
}
return &UploadResult{
Key: targetKey,
Size: upload.Size,
@ -253,7 +265,7 @@ func (c *Controller) AuthSign(ctx context.Context, uploadID string, partNumbers
}
func (c *Controller) IsNotFound(err error) bool {
return c.impl.IsNotFound(err)
return c.impl.IsNotFound(err) || errs.ErrRecordNotFound.Is(err)
}
func (c *Controller) AccessURL(ctx context.Context, name string, expire time.Duration, opt *s3.AccessURLOption) (string, error) {

@ -15,20 +15,14 @@
package minio
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"image"
"image/gif"
"image/jpeg"
"image/png"
"github.com/openimsdk/open-im-server/v3/pkg/common/db/cache"
"io"
"net/http"
"net/url"
"path"
"path/filepath"
"reflect"
"strconv"
"strings"
@ -56,13 +50,13 @@ const (
)
const (
maxImageWidth = 1024
maxImageHeight = 1024
maxImageSize = 1024 * 1024 * 50
pathInfo = "openim/thumbnail"
maxImageWidth = 1024
maxImageHeight = 1024
maxImageSize = 1024 * 1024 * 50
imageThumbnailPath = "openim/thumbnail"
)
func NewMinio() (s3.Interface, error) {
func NewMinio(cache cache.MinioCache) (s3.Interface, error) {
u, err := url.Parse(config.Config.Object.Minio.Endpoint)
if err != nil {
return nil, err
@ -80,6 +74,7 @@ func NewMinio() (s3.Interface, error) {
core: &minio.Core{Client: client},
lock: &sync.Mutex{},
init: false,
cache: cache,
}
if config.Config.Object.Minio.SignEndpoint == "" || config.Config.Object.Minio.SignEndpoint == config.Config.Object.Minio.Endpoint {
m.opts = opts
@ -124,6 +119,7 @@ type Minio struct {
lock sync.Locker
init bool
prefix string
cache cache.MinioCache
}
func (m *Minio) initMinio(ctx context.Context) error {
@ -227,6 +223,7 @@ func (m *Minio) CompleteMultipartUpload(ctx context.Context, uploadID string, na
if err != nil {
return nil, err
}
m.delObjectImageInfoKey(ctx, name, upload.Size)
return &s3.CompleteMultipartUploadResult{
Location: upload.Location,
Bucket: upload.Bucket,
@ -389,7 +386,7 @@ func (m *Minio) ListUploadedParts(ctx context.Context, uploadID string, name str
return res, nil
}
func (m *Minio) presignedGetObject(ctx context.Context, name string, expire time.Duration, query url.Values) (string, error) {
func (m *Minio) PresignedGetObject(ctx context.Context, name string, expire time.Duration, query url.Values) (string, error) {
if expire <= 0 {
expire = time.Hour * 24 * 365 * 99 // 99 years
} else if expire < time.Second {
@ -427,109 +424,9 @@ func (m *Minio) AccessURL(ctx context.Context, name string, expire time.Duration
}
}
if opt.Image == nil || (opt.Image.Width < 0 && opt.Image.Height < 0 && opt.Image.Format == "") || (opt.Image.Width > maxImageWidth || opt.Image.Height > maxImageHeight) {
return m.presignedGetObject(ctx, name, expire, reqParams)
return m.PresignedGetObject(ctx, name, expire, reqParams)
}
fileInfo, err := m.StatObject(ctx, name)
if err != nil {
return "", err
}
if fileInfo.Size > maxImageSize {
return "", errors.New("file size too large")
}
objectInfoPath := path.Join(pathInfo, fileInfo.ETag, "image.json")
var (
img image.Image
info minioImageInfo
)
data, err := m.getObjectData(ctx, objectInfoPath, 1024)
if err == nil {
if err := json.Unmarshal(data, &info); err != nil {
return "", fmt.Errorf("unmarshal minio image info.json error: %w", err)
}
if info.NotImage {
return "", errors.New("not image")
}
} else if m.IsNotFound(err) {
reader, err := m.core.Client.GetObject(ctx, m.bucket, name, minio.GetObjectOptions{})
if err != nil {
return "", err
}
defer reader.Close()
imageInfo, format, err := ImageStat(reader)
if err == nil {
info.NotImage = false
info.Format = format
info.Width, info.Height = ImageWidthHeight(imageInfo)
img = imageInfo
} else {
info.NotImage = true
}
data, err := json.Marshal(&info)
if err != nil {
return "", err
}
if _, err := m.core.Client.PutObject(ctx, m.bucket, objectInfoPath, bytes.NewReader(data), int64(len(data)), minio.PutObjectOptions{}); err != nil {
return "", err
}
} else {
return "", err
}
if opt.Image.Width > info.Width || opt.Image.Width <= 0 {
opt.Image.Width = info.Width
}
if opt.Image.Height > info.Height || opt.Image.Height <= 0 {
opt.Image.Height = info.Height
}
opt.Image.Format = strings.ToLower(opt.Image.Format)
if opt.Image.Format == formatJpg {
opt.Image.Format = formatJpeg
}
switch opt.Image.Format {
case formatPng:
case formatJpeg:
case formatGif:
default:
if info.Format == formatGif {
opt.Image.Format = formatGif
} else {
opt.Image.Format = formatJpeg
}
}
reqParams.Set("response-content-type", "image/"+opt.Image.Format)
if opt.Image.Width == info.Width && opt.Image.Height == info.Height && opt.Image.Format == info.Format {
return m.presignedGetObject(ctx, name, expire, reqParams)
}
cacheKey := filepath.Join(pathInfo, fileInfo.ETag, fmt.Sprintf("image_w%d_h%d.%s", opt.Image.Width, opt.Image.Height, opt.Image.Format))
if _, err := m.core.Client.StatObject(ctx, m.bucket, cacheKey, minio.StatObjectOptions{}); err == nil {
return m.presignedGetObject(ctx, cacheKey, expire, reqParams)
} else if !m.IsNotFound(err) {
return "", err
}
if img == nil {
reader, err := m.core.Client.GetObject(ctx, m.bucket, name, minio.GetObjectOptions{})
if err != nil {
return "", err
}
defer reader.Close()
img, _, err = ImageStat(reader)
if err != nil {
return "", err
}
}
thumbnail := resizeImage(img, opt.Image.Width, opt.Image.Height)
buf := bytes.NewBuffer(nil)
switch opt.Image.Format {
case formatPng:
err = png.Encode(buf, thumbnail)
case formatJpeg:
err = jpeg.Encode(buf, thumbnail, nil)
case formatGif:
err = gif.Encode(buf, thumbnail, nil)
}
if _, err := m.core.Client.PutObject(ctx, m.bucket, cacheKey, buf, int64(buf.Len()), minio.PutObjectOptions{}); err != nil {
return "", err
}
return m.presignedGetObject(ctx, cacheKey, expire, reqParams)
return m.getImageThumbnailURL(ctx, name, expire, opt.Image)
}
func (m *Minio) getObjectData(ctx context.Context, name string, limit int64) ([]byte, error) {
@ -541,5 +438,5 @@ func (m *Minio) getObjectData(ctx context.Context, name string, limit int64) ([]
if limit < 0 {
return io.ReadAll(object)
}
return io.ReadAll(io.LimitReader(object, 1024))
return io.ReadAll(io.LimitReader(object, limit))
}

@ -1,22 +0,0 @@
// Copyright © 2023 OpenIM. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package minio
type minioImageInfo struct {
NotImage bool `json:"notImage,omitempty"`
Width int `json:"width,omitempty"`
Height int `json:"height,omitempty"`
Format string `json:"format,omitempty"`
}

@ -0,0 +1,134 @@
package minio
import (
"bytes"
"context"
"errors"
"fmt"
"github.com/OpenIMSDK/tools/errs"
"github.com/OpenIMSDK/tools/log"
"github.com/minio/minio-go/v7"
"github.com/openimsdk/open-im-server/v3/pkg/common/db/cache"
"github.com/openimsdk/open-im-server/v3/pkg/common/db/s3"
"image"
"image/gif"
"image/jpeg"
"image/png"
"net/url"
"path/filepath"
"strings"
"time"
)
func (m *Minio) getImageThumbnailURL(ctx context.Context, name string, expire time.Duration, opt *s3.Image) (string, error) {
var img image.Image
info, err := m.cache.GetImageObjectKeyInfo(ctx, name, func(ctx context.Context) (info *cache.MinioImageInfo, err error) {
info, img, err = m.getObjectImageInfo(ctx, name)
return
})
if err != nil {
return "", err
}
if !info.IsImg {
return "", errs.ErrData.Wrap("object not image")
}
if opt.Width > info.Width || opt.Width <= 0 {
opt.Width = info.Width
}
if opt.Height > info.Height || opt.Height <= 0 {
opt.Height = info.Height
}
opt.Format = strings.ToLower(opt.Format)
if opt.Format == formatJpg {
opt.Format = formatJpeg
}
switch opt.Format {
case formatPng, formatJpeg, formatGif:
default:
opt.Format = ""
}
reqParams := make(url.Values)
if opt.Width == info.Width && opt.Height == info.Height && (opt.Format == info.Format || opt.Format == "") {
reqParams.Set("response-content-type", "image/"+info.Format)
return m.PresignedGetObject(ctx, name, expire, reqParams)
}
if opt.Format == "" {
switch opt.Format {
case formatGif:
opt.Format = formatGif
case formatJpeg:
opt.Format = formatJpeg
case formatPng:
opt.Format = formatPng
default:
opt.Format = formatPng
}
}
key, err := m.cache.GetThumbnailKey(ctx, name, opt.Format, opt.Width, opt.Height, func(ctx context.Context) (string, error) {
if img == nil {
reader, err := m.core.Client.GetObject(ctx, m.bucket, name, minio.GetObjectOptions{})
if err != nil {
return "", err
}
defer reader.Close()
img, _, err = ImageStat(reader)
if err != nil {
return "", err
}
}
thumbnail := resizeImage(img, opt.Width, opt.Height)
buf := bytes.NewBuffer(nil)
switch opt.Format {
case formatPng:
err = png.Encode(buf, thumbnail)
case formatJpeg:
err = jpeg.Encode(buf, thumbnail, nil)
case formatGif:
err = gif.Encode(buf, thumbnail, nil)
}
cacheKey := filepath.Join(imageThumbnailPath, info.Etag, fmt.Sprintf("image_w%d_h%d.%s", opt.Width, opt.Height, opt.Format))
if _, err := m.core.Client.PutObject(ctx, m.bucket, cacheKey, buf, int64(buf.Len()), minio.PutObjectOptions{}); err != nil {
return "", err
}
return cacheKey, nil
})
if err != nil {
return "", err
}
reqParams.Set("response-content-type", "image/"+opt.Format)
return m.PresignedGetObject(ctx, key, expire, reqParams)
}
func (m *Minio) getObjectImageInfo(ctx context.Context, name string) (*cache.MinioImageInfo, image.Image, error) {
fileInfo, err := m.StatObject(ctx, name)
if err != nil {
return nil, nil, err
}
if fileInfo.Size > maxImageSize {
return nil, nil, errors.New("file size too large")
}
imageData, err := m.getObjectData(ctx, name, fileInfo.Size)
if err != nil {
return nil, nil, err
}
var info cache.MinioImageInfo
imageInfo, format, err := ImageStat(bytes.NewReader(imageData))
if err == nil {
info.IsImg = true
info.Format = format
info.Width, info.Height = ImageWidthHeight(imageInfo)
} else {
info.IsImg = false
}
info.Etag = fileInfo.ETag
return &info, imageInfo, nil
}
func (m *Minio) delObjectImageInfoKey(ctx context.Context, key string, size int64) {
if size > 0 && size > maxImageSize {
return
}
if err := m.cache.DelObjectImageInfoKey(key).ExecDel(ctx); err != nil {
log.ZError(ctx, "DelObjectImageInfoKey failed", err, "key", key)
}
}
Loading…
Cancel
Save