Merge branch 'master' into feature-p2p-quic-server

pull/2507/head
yixinin 1 month ago
commit 1b42f2ce9d

@ -3,7 +3,7 @@ FROM alpine:latest
WORKDIR /cloudreve
RUN apk update \
&& apk add --no-cache tzdata vips-tools ffmpeg libreoffice aria2 supervisor font-noto font-noto-cjk libheif\
&& apk add --no-cache tzdata vips-tools ffmpeg libreoffice aria2 supervisor font-noto font-noto-cjk libheif libraw-tools\
&& cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime \
&& echo "Asia/Shanghai" > /etc/timezone \
&& mkdir -p ./data/temp/aria2 \
@ -13,7 +13,8 @@ ENV CR_ENABLE_ARIA2=1 \
CR_SETTING_DEFAULT_thumb_ffmpeg_enabled=1 \
CR_SETTING_DEFAULT_thumb_vips_enabled=1 \
CR_SETTING_DEFAULT_thumb_libreoffice_enabled=1 \
CR_SETTING_DEFAULT_media_meta_ffprobe=1
CR_SETTING_DEFAULT_media_meta_ffprobe=1 \
CR_SETTING_DEFAULT_thumb_libraw_enabled=1
COPY .build/aria2.supervisor.conf .build/entrypoint.sh ./
COPY cloudreve ./cloudreve

@ -7,7 +7,7 @@
Cloudreve
<br>
</h1>
<h4 align="center">Self-hosted file management system with muilt-cloud support.</h4>
<h4 align="center">Self-hosted file management system with multi-cloud support.</h4>
<p align="center">
<a href="https://dev.azure.com/abslantliu/Cloudreve/_build?definitionId=6">
@ -38,18 +38,18 @@
## :sparkles: Features
- :cloud: Support storing files into Local, Remote node, OneDrive, S3 compatible API, Qiniu, Aliyun OSS, Tencent COS, Upyun.
- :cloud: Support storing files into Local, Remote node, OneDrive, S3 compatible API, Qiniu Kodo, Aliyun OSS, Tencent COS, Huawei Cloud OBS, Kingsoft Cloud KS3, Upyun.
- :outbox_tray: Upload/Download in directly transmission from client to storage providers.
- 💾 Integrate with Aria2/qBittorrent to download files in background, use multiple download nodes to share the load.
- 📚 Compress/Extract files, download files in batch.
- 📚 Compress/Extract/Preview archived files, download files in batch.
- 💻 WebDAV support covering all storage providers.
- :zap:Drag&Drop to upload files or folders, with resumeable upload support.
- :zap:Drag&Drop to upload files or folders, with parallel resumable upload support.
- :card_file_box: Extract media metadata from files, search files by metadata or tags.
- :family_woman_girl_boy: Multi-users with multi-groups.
- :link: Create share links for files and folders with expiration date.
- :eye_speech_bubble: Preview videos, images, audios, ePub files online; edit texts, diagrams, Markdown, images, Office documents online.
- :art: Customize theme colors, dark mode, PWA application, SPA, i18n.
- :rocket: All-In-One packing, with all features out-of-the-box.
- :rocket: All-in-one packaging, with all features out of the box.
- 🌈 ... ...
## :hammer_and_wrench: Deploy

@ -39,12 +39,12 @@
## :sparkles: 特性
- :cloud: 支持本机、从机、七牛、阿里云 OSS、腾讯云 COS、华为云 OBS、又拍云、OneDrive (包括世纪互联版) 、S3 兼容协议 作为存储端
- :cloud: 支持本机、从机、七牛 Kodo、阿里云 OSS、腾讯云 COS、华为云 OBS、金山云 KS3、又拍云、OneDrive (包括世纪互联版) 、S3 兼容协议 作为存储端
- :outbox_tray: 上传/下载 支持客户端直传,支持下载限速
- 💾 可对接 Aria2 离线下载,可使用多个从机节点分担下载任务
- 📚 在线 压缩/解压缩、多文件打包下载
- 💾 可对接 Aria2/qBittorrent 离线下载,可使用多个从机节点分担下载任务
- 📚 在线 压缩/解压缩/压缩包预览、多文件打包下载
- 💻 覆盖全部存储策略的 WebDAV 协议支持
- :zap: 拖拽上传、目录上传、分片上传
- :zap: 拖拽上传、目录上传、并行分片上传
- :card_file_box: 提取媒体元数据,通过元数据或标签搜索文件
- :family_woman_girl_boy: 多用户、用户组、多存储策略
- :link: 创建文件、目录的分享链接,可设定自动过期

@ -3,7 +3,7 @@ package constants
// These values will be injected at build time, DO NOT EDIT.
// BackendVersion 当前后端版本号
var BackendVersion = "4.1.0"
var BackendVersion = "4.7.0"
// IsPro 是否为Pro版本
var IsPro = "false"

@ -17,6 +17,7 @@ import (
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
"github.com/cloudreve/Cloudreve/v4/pkg/credmanager"
"github.com/cloudreve/Cloudreve/v4/pkg/email"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/encrypt"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/mime"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/lock"
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
@ -129,55 +130,59 @@ type Dep interface {
WebAuthn(ctx context.Context) (*webauthn.WebAuthn, error)
// UAParser Get a singleton uaparser.Parser instance for user agent parsing.
UAParser() *uaparser.Parser
// MasterEncryptKeyVault Get a singleton encrypt.MasterEncryptKeyVault instance for master encrypt key vault.
MasterEncryptKeyVault(ctx context.Context) encrypt.MasterEncryptKeyVault
// EncryptorFactory Get a new encrypt.CryptorFactory instance.
EncryptorFactory(ctx context.Context) encrypt.CryptorFactory
}
type dependency struct {
configProvider conf.ConfigProvider
logger logging.Logger
statics iofs.FS
serverStaticFS static.ServeFileSystem
dbClient *ent.Client
rawEntClient *ent.Client
kv cache.Driver
navigatorStateKv cache.Driver
settingClient inventory.SettingClient
fileClient inventory.FileClient
shareClient inventory.ShareClient
settingProvider setting.Provider
userClient inventory.UserClient
groupClient inventory.GroupClient
storagePolicyClient inventory.StoragePolicyClient
taskClient inventory.TaskClient
nodeClient inventory.NodeClient
davAccountClient inventory.DavAccountClient
directLinkClient inventory.DirectLinkClient
emailClient email.Driver
generalAuth auth.Auth
hashidEncoder hashid.Encoder
tokenAuth auth.TokenAuth
lockSystem lock.LockSystem
requestClient request.Client
ioIntenseQueue queue.Queue
thumbQueue queue.Queue
mediaMetaQueue queue.Queue
entityRecycleQueue queue.Queue
slaveQueue queue.Queue
remoteDownloadQueue queue.Queue
ioIntenseQueueTask queue.Task
mediaMeta mediameta.Extractor
thumbPipeline thumb.Generator
mimeDetector mime.MimeDetector
credManager credmanager.CredManager
nodePool cluster.NodePool
taskRegistry queue.TaskRegistry
webauthn *webauthn.WebAuthn
parser *uaparser.Parser
cron *cron.Cron
configProvider conf.ConfigProvider
logger logging.Logger
statics iofs.FS
serverStaticFS static.ServeFileSystem
dbClient *ent.Client
rawEntClient *ent.Client
kv cache.Driver
navigatorStateKv cache.Driver
settingClient inventory.SettingClient
fileClient inventory.FileClient
shareClient inventory.ShareClient
settingProvider setting.Provider
userClient inventory.UserClient
groupClient inventory.GroupClient
storagePolicyClient inventory.StoragePolicyClient
taskClient inventory.TaskClient
nodeClient inventory.NodeClient
davAccountClient inventory.DavAccountClient
directLinkClient inventory.DirectLinkClient
emailClient email.Driver
generalAuth auth.Auth
hashidEncoder hashid.Encoder
tokenAuth auth.TokenAuth
lockSystem lock.LockSystem
requestClient request.Client
ioIntenseQueue queue.Queue
thumbQueue queue.Queue
mediaMetaQueue queue.Queue
entityRecycleQueue queue.Queue
slaveQueue queue.Queue
remoteDownloadQueue queue.Queue
ioIntenseQueueTask queue.Task
mediaMeta mediameta.Extractor
thumbPipeline thumb.Generator
mimeDetector mime.MimeDetector
credManager credmanager.CredManager
nodePool cluster.NodePool
taskRegistry queue.TaskRegistry
webauthn *webauthn.WebAuthn
parser *uaparser.Parser
cron *cron.Cron
masterEncryptKeyVault encrypt.MasterEncryptKeyVault
configPath string
isPro bool
requiredDbVersion string
licenseKey string
// Protects inner deps that can be reloaded at runtime.
mu sync.Mutex
@ -206,6 +211,19 @@ func (d *dependency) RequestClient(opts ...request.Option) request.Client {
return request.NewClient(d.ConfigProvider(), opts...)
}
func (d *dependency) MasterEncryptKeyVault(ctx context.Context) encrypt.MasterEncryptKeyVault {
if d.masterEncryptKeyVault != nil {
return d.masterEncryptKeyVault
}
d.masterEncryptKeyVault = encrypt.NewMasterEncryptKeyVault(ctx, d.SettingProvider())
return d.masterEncryptKeyVault
}
func (d *dependency) EncryptorFactory(ctx context.Context) encrypt.CryptorFactory {
return encrypt.NewCryptorFactory(d.MasterEncryptKeyVault(ctx))
}
func (d *dependency) WebAuthn(ctx context.Context) (*webauthn.WebAuthn, error) {
if d.webauthn != nil {
return d.webauthn, nil
@ -467,7 +485,7 @@ func (d *dependency) MediaMetaExtractor(ctx context.Context) mediameta.Extractor
return d.mediaMeta
}
d.mediaMeta = mediameta.NewExtractorManager(ctx, d.SettingProvider(), d.Logger())
d.mediaMeta = mediameta.NewExtractorManager(ctx, d.SettingProvider(), d.Logger(), d.RequestClient())
return d.mediaMeta
}

@ -1,6 +1,8 @@
package dependency
import (
"io/fs"
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/inventory"
"github.com/cloudreve/Cloudreve/v4/pkg/auth"
@ -11,7 +13,6 @@ import (
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
"github.com/gin-contrib/static"
"io/fs"
)
// Option 发送请求的额外设置
@ -67,12 +68,6 @@ func WithProFlag(c bool) Option {
})
}
func WithLicenseKey(c string) Option {
return optionFunc(func(o *dependency) {
o.licenseKey = c
})
}
// WithRawEntClient Set the default raw ent client.
func WithRawEntClient(c *ent.Client) Option {
return optionFunc(func(o *dependency) {

@ -27,8 +27,8 @@ type system struct {
Debug bool
SessionSecret string
HashIDSalt string
GracePeriod int `validate:"gte=0"`
ProxyHeader string `validate:"required_with=Listen"`
GracePeriod int `validate:"gte=0"`
ProxyHeader string
}
type ssl struct {

@ -22,7 +22,7 @@ var SystemConfig = &system{
Debug: false,
Mode: "master",
Listen: ":5212",
ProxyHeader: "X-Forwarded-For",
ProxyHeader: "",
}
// CORSConfig 跨域配置

@ -103,10 +103,6 @@ func (m *Migrator) migratePolicy() (map[int]bool, error) {
settings.ProxyServer = policy.OptionsSerialized.OdProxy
}
if policy.DirNameRule == "" {
policy.DirNameRule = "uploads/{uid}/{path}"
}
if policy.Type == types.PolicyTypeCos {
settings.ChunkSize = 1024 * 1024 * 25
}
@ -122,8 +118,16 @@ func (m *Migrator) migratePolicy() (map[int]bool, error) {
hasRandomElement = true
break
}
if strings.Contains(policy.DirNameRule, c) {
hasRandomElement = true
break
}
}
if !hasRandomElement {
if policy.DirNameRule == "" {
policy.DirNameRule = "uploads/{uid}/{path}"
}
policy.FileNameRule = "{uid}_{randomkey8}_{originname}"
m.l.Warning("Storage policy %q has no random element in file name rule, using default file name rule.", policy.Name)
}

@ -1 +1 @@
Subproject commit e9b91c4e03654d5968f8a676a13fc4badf530b5d
Subproject commit 1b1f9f4c8e35d72ac60216af611c81355bd4f7ce

@ -0,0 +1,230 @@
package cmd
import (
"context"
"crypto/rand"
"encoding/base64"
"fmt"
"io"
"os"
"github.com/cloudreve/Cloudreve/v4/application/dependency"
"github.com/cloudreve/Cloudreve/v4/ent/entity"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/encrypt"
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
"github.com/spf13/cobra"
)
var (
outputToFile string
newMasterKeyFile string
)
func init() {
rootCmd.AddCommand(masterKeyCmd)
masterKeyCmd.AddCommand(masterKeyGenerateCmd)
masterKeyCmd.AddCommand(masterKeyGetCmd)
masterKeyCmd.AddCommand(masterKeyRotateCmd)
masterKeyGenerateCmd.Flags().StringVarP(&outputToFile, "output", "o", "", "Output master key to file instead of stdout")
masterKeyRotateCmd.Flags().StringVarP(&newMasterKeyFile, "new-key", "n", "", "Path to file containing the new master key (base64 encoded).")
}
var masterKeyCmd = &cobra.Command{
Use: "master-key",
Short: "Master encryption key management",
Long: "Manage master encryption keys for file encryption. Use subcommands to generate, get, or rotate keys.",
Run: func(cmd *cobra.Command, args []string) {
_ = cmd.Help()
},
}
var masterKeyGenerateCmd = &cobra.Command{
Use: "generate",
Short: "Generate a new master encryption key",
Long: "Generate a new random 32-byte (256-bit) master encryption key and output it in base64 format.",
Run: func(cmd *cobra.Command, args []string) {
// Generate 32-byte random key
key := make([]byte, 32)
if _, err := io.ReadFull(rand.Reader, key); err != nil {
fmt.Fprintf(os.Stderr, "Error: Failed to generate random key: %v\n", err)
os.Exit(1)
}
// Encode to base64
encodedKey := base64.StdEncoding.EncodeToString(key)
if outputToFile != "" {
// Write to file
if err := os.WriteFile(outputToFile, []byte(encodedKey), 0600); err != nil {
fmt.Fprintf(os.Stderr, "Error: Failed to write key to file: %v\n", err)
os.Exit(1)
}
fmt.Printf("Master key generated and saved to: %s\n", outputToFile)
} else {
// Output to stdout
fmt.Println(encodedKey)
}
},
}
var masterKeyGetCmd = &cobra.Command{
Use: "get",
Short: "Get the current master encryption key",
Long: "Retrieve and display the current master encryption key from the configured vault (setting, env, or file).",
Run: func(cmd *cobra.Command, args []string) {
ctx := context.Background()
dep := dependency.NewDependency(
dependency.WithConfigPath(confPath),
)
logger := dep.Logger()
// Get the master key vault
vault := encrypt.NewMasterEncryptKeyVault(ctx, dep.SettingProvider())
// Retrieve the master key
key, err := vault.GetMasterKey(ctx)
if err != nil {
logger.Error("Failed to get master key: %s", err)
os.Exit(1)
}
// Encode to base64 and display
encodedKey := base64.StdEncoding.EncodeToString(key)
fmt.Println("")
fmt.Println(encodedKey)
},
}
var masterKeyRotateCmd = &cobra.Command{
Use: "rotate",
Short: "Rotate the master encryption key",
Long: `Rotate the master encryption key by re-encrypting all encrypted file keys with a new master key.
This operation:
1. Retrieves the current master key
2. Loads a new master key from file
3. Re-encrypts all file encryption keys with the new master key
4. Updates the master key in the settings database
Warning: This is a critical operation. Make sure to backup your database before proceeding.`,
Run: func(cmd *cobra.Command, args []string) {
ctx := context.Background()
dep := dependency.NewDependency(
dependency.WithConfigPath(confPath),
)
logger := dep.Logger()
logger.Info("Starting master key rotation...")
// Get the old master key
vault := encrypt.NewMasterEncryptKeyVault(ctx, dep.SettingProvider())
oldMasterKey, err := vault.GetMasterKey(ctx)
if err != nil {
logger.Error("Failed to get current master key: %s", err)
os.Exit(1)
}
logger.Info("Retrieved current master key")
// Get or generate the new master key
var newMasterKey []byte
// Load from file
keyData, err := os.ReadFile(newMasterKeyFile)
if err != nil {
logger.Error("Failed to read new master key file: %s", err)
os.Exit(1)
}
newMasterKey, err = base64.StdEncoding.DecodeString(string(keyData))
if err != nil {
logger.Error("Failed to decode new master key: %s", err)
os.Exit(1)
}
if len(newMasterKey) != 32 {
logger.Error("Invalid new master key: must be 32 bytes (256 bits), got %d bytes", len(newMasterKey))
os.Exit(1)
}
logger.Info("Loaded new master key from file: %s", newMasterKeyFile)
// Query all entities with encryption metadata
db := dep.DBClient()
entities, err := db.Entity.Query().
Where(entity.Not(entity.PropsIsNil())).
All(ctx)
if err != nil {
logger.Error("Failed to query entities: %s", err)
os.Exit(1)
}
logger.Info("Found %d entities to check for encryption", len(entities))
// Re-encrypt each entity's encryption key
encryptedCount := 0
for _, ent := range entities {
if ent.Props == nil || ent.Props.EncryptMetadata == nil {
continue
}
encMeta := ent.Props.EncryptMetadata
// Decrypt the file key with old master key
decryptedFileKey, err := encrypt.DecryptWithMasterKey(oldMasterKey, encMeta.Key)
if err != nil {
logger.Error("Failed to decrypt key for entity %d: %s", ent.ID, err)
os.Exit(1)
}
// Re-encrypt the file key with new master key
newEncryptedKey, err := encrypt.EncryptWithMasterKey(newMasterKey, decryptedFileKey)
if err != nil {
logger.Error("Failed to re-encrypt key for entity %d: %s", ent.ID, err)
os.Exit(1)
}
// Update the entity
newProps := *ent.Props
newProps.EncryptMetadata = &types.EncryptMetadata{
Algorithm: encMeta.Algorithm,
Key: newEncryptedKey,
KeyPlainText: nil, // Don't store plaintext
IV: encMeta.IV,
}
err = db.Entity.UpdateOne(ent).
SetProps(&newProps).
Exec(ctx)
if err != nil {
logger.Error("Failed to update entity %d: %s", ent.ID, err)
os.Exit(1)
}
encryptedCount++
}
logger.Info("Re-encrypted %d file keys", encryptedCount)
// Update the master key in settings
keyStore := dep.SettingProvider().MasterEncryptKeyVault(ctx)
if keyStore == setting.MasterEncryptKeyVaultTypeSetting {
encodedNewKey := base64.StdEncoding.EncodeToString(newMasterKey)
err = dep.SettingClient().Set(ctx, map[string]string{
"encrypt_master_key": encodedNewKey,
})
if err != nil {
logger.Error("Failed to update master key in settings: %s", err)
logger.Error("WARNING: File keys have been re-encrypted but master key update failed!")
logger.Error("Please manually update the encrypt_master_key setting.")
os.Exit(1)
}
} else {
logger.Info("Current master key is stored in %q", keyStore)
if keyStore == setting.MasterEncryptKeyVaultTypeEnv {
logger.Info("Please update the new master encryption key in your \"CR_ENCRYPT_MASTER_KEY\" environment variable.")
} else if keyStore == setting.MasterEncryptKeyVaultTypeFile {
logger.Info("Please update the new master encryption key in your key file: %q", dep.SettingProvider().MasterEncryptKeyFile(ctx))
}
logger.Info("Last step: Please manually update the new master encryption key in your ENV or key file.")
}
logger.Info("Master key rotation completed successfully")
},
}

@ -2,14 +2,16 @@ package cmd
import (
"fmt"
"os"
"github.com/cloudreve/Cloudreve/v4/pkg/util"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"os"
)
var (
confPath string
confPath string
licenseKey string
)
func init() {

@ -12,10 +12,6 @@ import (
"github.com/spf13/cobra"
)
var (
licenseKey string
)
func init() {
rootCmd.AddCommand(serverCmd)
serverCmd.PersistentFlags().StringVarP(&licenseKey, "license-key", "l", "", "License key of your Cloudreve Pro")
@ -29,7 +25,6 @@ var serverCmd = &cobra.Command{
dependency.WithConfigPath(confPath),
dependency.WithProFlag(constants.IsProBool),
dependency.WithRequiredDbVersion(constants.BackendVersion),
dependency.WithLicenseKey(licenseKey),
)
server := application.NewServer(dep)
logger := dep.Logger()

@ -1,13 +1,15 @@
services:
pro:
cloudreve:
image: cloudreve/cloudreve:latest
container_name: cloudreve-backend
depends_on:
- postgresql
- redis
restart: always
restart: unless-stopped
ports:
- 5212:5212
- 6888:6888
- 6888:6888/udp
environment:
- CR_CONF_Database.Type=postgres
- CR_CONF_Database.Host=postgresql
@ -19,8 +21,12 @@ services:
- backend_data:/cloudreve/data
postgresql:
image: postgres:latest
# Best practice: Pin to major version.
# NOTE: For major version jumps:
# backup & consult https://www.postgresql.org/docs/current/pgupgrade.html
image: postgres:17
container_name: postgresql
restart: unless-stopped
environment:
- POSTGRES_USER=cloudreve
- POSTGRES_DB=cloudreve
@ -31,6 +37,7 @@ services:
redis:
image: redis:latest
container_name: redis
restart: unless-stopped
volumes:
- redis_data:/data

@ -1034,8 +1034,7 @@ func (c *FileClient) Hooks() []Hook {
// Interceptors returns the client interceptors.
func (c *FileClient) Interceptors() []Interceptor {
inters := c.inters.File
return append(inters[:len(inters):len(inters)], file.Interceptors[:]...)
return c.inters.File
}
func (c *FileClient) mutate(ctx context.Context, m *FileMutation) (Value, error) {

@ -42,8 +42,8 @@ type Entity struct {
CreatedBy int `json:"created_by,omitempty"`
// UploadSessionID holds the value of the "upload_session_id" field.
UploadSessionID *uuid.UUID `json:"upload_session_id,omitempty"`
// RecycleOptions holds the value of the "recycle_options" field.
RecycleOptions *types.EntityRecycleOption `json:"recycle_options,omitempty"`
// Props holds the value of the "props" field.
Props *types.EntityProps `json:"props,omitempty"`
// Edges holds the relations/edges for other nodes in the graph.
// The values are being populated by the EntityQuery when eager-loading is set.
Edges EntityEdges `json:"edges"`
@ -105,7 +105,7 @@ func (*Entity) scanValues(columns []string) ([]any, error) {
switch columns[i] {
case entity.FieldUploadSessionID:
values[i] = &sql.NullScanner{S: new(uuid.UUID)}
case entity.FieldRecycleOptions:
case entity.FieldProps:
values[i] = new([]byte)
case entity.FieldID, entity.FieldType, entity.FieldSize, entity.FieldReferenceCount, entity.FieldStoragePolicyEntities, entity.FieldCreatedBy:
values[i] = new(sql.NullInt64)
@ -196,12 +196,12 @@ func (e *Entity) assignValues(columns []string, values []any) error {
e.UploadSessionID = new(uuid.UUID)
*e.UploadSessionID = *value.S.(*uuid.UUID)
}
case entity.FieldRecycleOptions:
case entity.FieldProps:
if value, ok := values[i].(*[]byte); !ok {
return fmt.Errorf("unexpected type %T for field recycle_options", values[i])
return fmt.Errorf("unexpected type %T for field props", values[i])
} else if value != nil && len(*value) > 0 {
if err := json.Unmarshal(*value, &e.RecycleOptions); err != nil {
return fmt.Errorf("unmarshal field recycle_options: %w", err)
if err := json.Unmarshal(*value, &e.Props); err != nil {
return fmt.Errorf("unmarshal field props: %w", err)
}
}
default:
@ -289,8 +289,8 @@ func (e *Entity) String() string {
builder.WriteString(fmt.Sprintf("%v", *v))
}
builder.WriteString(", ")
builder.WriteString("recycle_options=")
builder.WriteString(fmt.Sprintf("%v", e.RecycleOptions))
builder.WriteString("props=")
builder.WriteString(fmt.Sprintf("%v", e.Props))
builder.WriteByte(')')
return builder.String()
}

@ -35,8 +35,8 @@ const (
FieldCreatedBy = "created_by"
// FieldUploadSessionID holds the string denoting the upload_session_id field in the database.
FieldUploadSessionID = "upload_session_id"
// FieldRecycleOptions holds the string denoting the recycle_options field in the database.
FieldRecycleOptions = "recycle_options"
// FieldProps holds the string denoting the props field in the database.
FieldProps = "recycle_options"
// EdgeFile holds the string denoting the file edge name in mutations.
EdgeFile = "file"
// EdgeUser holds the string denoting the user edge name in mutations.
@ -79,7 +79,7 @@ var Columns = []string{
FieldStoragePolicyEntities,
FieldCreatedBy,
FieldUploadSessionID,
FieldRecycleOptions,
FieldProps,
}
var (

@ -521,14 +521,14 @@ func UploadSessionIDNotNil() predicate.Entity {
return predicate.Entity(sql.FieldNotNull(FieldUploadSessionID))
}
// RecycleOptionsIsNil applies the IsNil predicate on the "recycle_options" field.
func RecycleOptionsIsNil() predicate.Entity {
return predicate.Entity(sql.FieldIsNull(FieldRecycleOptions))
// PropsIsNil applies the IsNil predicate on the "props" field.
func PropsIsNil() predicate.Entity {
return predicate.Entity(sql.FieldIsNull(FieldProps))
}
// RecycleOptionsNotNil applies the NotNil predicate on the "recycle_options" field.
func RecycleOptionsNotNil() predicate.Entity {
return predicate.Entity(sql.FieldNotNull(FieldRecycleOptions))
// PropsNotNil applies the NotNil predicate on the "props" field.
func PropsNotNil() predicate.Entity {
return predicate.Entity(sql.FieldNotNull(FieldProps))
}
// HasFile applies the HasEdge predicate on the "file" edge.

@ -135,9 +135,9 @@ func (ec *EntityCreate) SetNillableUploadSessionID(u *uuid.UUID) *EntityCreate {
return ec
}
// SetRecycleOptions sets the "recycle_options" field.
func (ec *EntityCreate) SetRecycleOptions(tro *types.EntityRecycleOption) *EntityCreate {
ec.mutation.SetRecycleOptions(tro)
// SetProps sets the "props" field.
func (ec *EntityCreate) SetProps(tp *types.EntityProps) *EntityCreate {
ec.mutation.SetProps(tp)
return ec
}
@ -336,9 +336,9 @@ func (ec *EntityCreate) createSpec() (*Entity, *sqlgraph.CreateSpec) {
_spec.SetField(entity.FieldUploadSessionID, field.TypeUUID, value)
_node.UploadSessionID = &value
}
if value, ok := ec.mutation.RecycleOptions(); ok {
_spec.SetField(entity.FieldRecycleOptions, field.TypeJSON, value)
_node.RecycleOptions = value
if value, ok := ec.mutation.Props(); ok {
_spec.SetField(entity.FieldProps, field.TypeJSON, value)
_node.Props = value
}
if nodes := ec.mutation.FileIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
@ -586,21 +586,21 @@ func (u *EntityUpsert) ClearUploadSessionID() *EntityUpsert {
return u
}
// SetRecycleOptions sets the "recycle_options" field.
func (u *EntityUpsert) SetRecycleOptions(v *types.EntityRecycleOption) *EntityUpsert {
u.Set(entity.FieldRecycleOptions, v)
// SetProps sets the "props" field.
func (u *EntityUpsert) SetProps(v *types.EntityProps) *EntityUpsert {
u.Set(entity.FieldProps, v)
return u
}
// UpdateRecycleOptions sets the "recycle_options" field to the value that was provided on create.
func (u *EntityUpsert) UpdateRecycleOptions() *EntityUpsert {
u.SetExcluded(entity.FieldRecycleOptions)
// UpdateProps sets the "props" field to the value that was provided on create.
func (u *EntityUpsert) UpdateProps() *EntityUpsert {
u.SetExcluded(entity.FieldProps)
return u
}
// ClearRecycleOptions clears the value of the "recycle_options" field.
func (u *EntityUpsert) ClearRecycleOptions() *EntityUpsert {
u.SetNull(entity.FieldRecycleOptions)
// ClearProps clears the value of the "props" field.
func (u *EntityUpsert) ClearProps() *EntityUpsert {
u.SetNull(entity.FieldProps)
return u
}
@ -817,24 +817,24 @@ func (u *EntityUpsertOne) ClearUploadSessionID() *EntityUpsertOne {
})
}
// SetRecycleOptions sets the "recycle_options" field.
func (u *EntityUpsertOne) SetRecycleOptions(v *types.EntityRecycleOption) *EntityUpsertOne {
// SetProps sets the "props" field.
func (u *EntityUpsertOne) SetProps(v *types.EntityProps) *EntityUpsertOne {
return u.Update(func(s *EntityUpsert) {
s.SetRecycleOptions(v)
s.SetProps(v)
})
}
// UpdateRecycleOptions sets the "recycle_options" field to the value that was provided on create.
func (u *EntityUpsertOne) UpdateRecycleOptions() *EntityUpsertOne {
// UpdateProps sets the "props" field to the value that was provided on create.
func (u *EntityUpsertOne) UpdateProps() *EntityUpsertOne {
return u.Update(func(s *EntityUpsert) {
s.UpdateRecycleOptions()
s.UpdateProps()
})
}
// ClearRecycleOptions clears the value of the "recycle_options" field.
func (u *EntityUpsertOne) ClearRecycleOptions() *EntityUpsertOne {
// ClearProps clears the value of the "props" field.
func (u *EntityUpsertOne) ClearProps() *EntityUpsertOne {
return u.Update(func(s *EntityUpsert) {
s.ClearRecycleOptions()
s.ClearProps()
})
}
@ -1222,24 +1222,24 @@ func (u *EntityUpsertBulk) ClearUploadSessionID() *EntityUpsertBulk {
})
}
// SetRecycleOptions sets the "recycle_options" field.
func (u *EntityUpsertBulk) SetRecycleOptions(v *types.EntityRecycleOption) *EntityUpsertBulk {
// SetProps sets the "props" field.
func (u *EntityUpsertBulk) SetProps(v *types.EntityProps) *EntityUpsertBulk {
return u.Update(func(s *EntityUpsert) {
s.SetRecycleOptions(v)
s.SetProps(v)
})
}
// UpdateRecycleOptions sets the "recycle_options" field to the value that was provided on create.
func (u *EntityUpsertBulk) UpdateRecycleOptions() *EntityUpsertBulk {
// UpdateProps sets the "props" field to the value that was provided on create.
func (u *EntityUpsertBulk) UpdateProps() *EntityUpsertBulk {
return u.Update(func(s *EntityUpsert) {
s.UpdateRecycleOptions()
s.UpdateProps()
})
}
// ClearRecycleOptions clears the value of the "recycle_options" field.
func (u *EntityUpsertBulk) ClearRecycleOptions() *EntityUpsertBulk {
// ClearProps clears the value of the "props" field.
func (u *EntityUpsertBulk) ClearProps() *EntityUpsertBulk {
return u.Update(func(s *EntityUpsert) {
s.ClearRecycleOptions()
s.ClearProps()
})
}

@ -190,15 +190,15 @@ func (eu *EntityUpdate) ClearUploadSessionID() *EntityUpdate {
return eu
}
// SetRecycleOptions sets the "recycle_options" field.
func (eu *EntityUpdate) SetRecycleOptions(tro *types.EntityRecycleOption) *EntityUpdate {
eu.mutation.SetRecycleOptions(tro)
// SetProps sets the "props" field.
func (eu *EntityUpdate) SetProps(tp *types.EntityProps) *EntityUpdate {
eu.mutation.SetProps(tp)
return eu
}
// ClearRecycleOptions clears the value of the "recycle_options" field.
func (eu *EntityUpdate) ClearRecycleOptions() *EntityUpdate {
eu.mutation.ClearRecycleOptions()
// ClearProps clears the value of the "props" field.
func (eu *EntityUpdate) ClearProps() *EntityUpdate {
eu.mutation.ClearProps()
return eu
}
@ -383,11 +383,11 @@ func (eu *EntityUpdate) sqlSave(ctx context.Context) (n int, err error) {
if eu.mutation.UploadSessionIDCleared() {
_spec.ClearField(entity.FieldUploadSessionID, field.TypeUUID)
}
if value, ok := eu.mutation.RecycleOptions(); ok {
_spec.SetField(entity.FieldRecycleOptions, field.TypeJSON, value)
if value, ok := eu.mutation.Props(); ok {
_spec.SetField(entity.FieldProps, field.TypeJSON, value)
}
if eu.mutation.RecycleOptionsCleared() {
_spec.ClearField(entity.FieldRecycleOptions, field.TypeJSON)
if eu.mutation.PropsCleared() {
_spec.ClearField(entity.FieldProps, field.TypeJSON)
}
if eu.mutation.FileCleared() {
edge := &sqlgraph.EdgeSpec{
@ -669,15 +669,15 @@ func (euo *EntityUpdateOne) ClearUploadSessionID() *EntityUpdateOne {
return euo
}
// SetRecycleOptions sets the "recycle_options" field.
func (euo *EntityUpdateOne) SetRecycleOptions(tro *types.EntityRecycleOption) *EntityUpdateOne {
euo.mutation.SetRecycleOptions(tro)
// SetProps sets the "props" field.
func (euo *EntityUpdateOne) SetProps(tp *types.EntityProps) *EntityUpdateOne {
euo.mutation.SetProps(tp)
return euo
}
// ClearRecycleOptions clears the value of the "recycle_options" field.
func (euo *EntityUpdateOne) ClearRecycleOptions() *EntityUpdateOne {
euo.mutation.ClearRecycleOptions()
// ClearProps clears the value of the "props" field.
func (euo *EntityUpdateOne) ClearProps() *EntityUpdateOne {
euo.mutation.ClearProps()
return euo
}
@ -892,11 +892,11 @@ func (euo *EntityUpdateOne) sqlSave(ctx context.Context) (_node *Entity, err err
if euo.mutation.UploadSessionIDCleared() {
_spec.ClearField(entity.FieldUploadSessionID, field.TypeUUID)
}
if value, ok := euo.mutation.RecycleOptions(); ok {
_spec.SetField(entity.FieldRecycleOptions, field.TypeJSON, value)
if value, ok := euo.mutation.Props(); ok {
_spec.SetField(entity.FieldProps, field.TypeJSON, value)
}
if euo.mutation.RecycleOptionsCleared() {
_spec.ClearField(entity.FieldRecycleOptions, field.TypeJSON)
if euo.mutation.PropsCleared() {
_spec.ClearField(entity.FieldProps, field.TypeJSON)
}
if euo.mutation.FileCleared() {
edge := &sqlgraph.EdgeSpec{

@ -25,8 +25,6 @@ type File struct {
CreatedAt time.Time `json:"created_at,omitempty"`
// UpdatedAt holds the value of the "updated_at" field.
UpdatedAt time.Time `json:"updated_at,omitempty"`
// DeletedAt holds the value of the "deleted_at" field.
DeletedAt *time.Time `json:"deleted_at,omitempty"`
// Type holds the value of the "type" field.
Type int `json:"type,omitempty"`
// Name holds the value of the "name" field.
@ -171,7 +169,7 @@ func (*File) scanValues(columns []string) ([]any, error) {
values[i] = new(sql.NullInt64)
case file.FieldName:
values[i] = new(sql.NullString)
case file.FieldCreatedAt, file.FieldUpdatedAt, file.FieldDeletedAt:
case file.FieldCreatedAt, file.FieldUpdatedAt:
values[i] = new(sql.NullTime)
default:
values[i] = new(sql.UnknownType)
@ -206,13 +204,6 @@ func (f *File) assignValues(columns []string, values []any) error {
} else if value.Valid {
f.UpdatedAt = value.Time
}
case file.FieldDeletedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field deleted_at", values[i])
} else if value.Valid {
f.DeletedAt = new(time.Time)
*f.DeletedAt = value.Time
}
case file.FieldType:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field type", values[i])
@ -351,11 +342,6 @@ func (f *File) String() string {
builder.WriteString("updated_at=")
builder.WriteString(f.UpdatedAt.Format(time.ANSIC))
builder.WriteString(", ")
if v := f.DeletedAt; v != nil {
builder.WriteString("deleted_at=")
builder.WriteString(v.Format(time.ANSIC))
}
builder.WriteString(", ")
builder.WriteString("type=")
builder.WriteString(fmt.Sprintf("%v", f.Type))
builder.WriteString(", ")

@ -19,8 +19,6 @@ const (
FieldCreatedAt = "created_at"
// FieldUpdatedAt holds the string denoting the updated_at field in the database.
FieldUpdatedAt = "updated_at"
// FieldDeletedAt holds the string denoting the deleted_at field in the database.
FieldDeletedAt = "deleted_at"
// FieldType holds the string denoting the type field in the database.
FieldType = "type"
// FieldName holds the string denoting the name field in the database.
@ -112,7 +110,6 @@ var Columns = []string{
FieldID,
FieldCreatedAt,
FieldUpdatedAt,
FieldDeletedAt,
FieldType,
FieldName,
FieldOwnerID,
@ -146,14 +143,11 @@ func ValidColumn(column string) bool {
//
// import _ "github.com/cloudreve/Cloudreve/v4/ent/runtime"
var (
Hooks [1]ent.Hook
Interceptors [1]ent.Interceptor
Hooks [1]ent.Hook
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
DefaultCreatedAt func() time.Time
// DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
DefaultUpdatedAt func() time.Time
// UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
UpdateDefaultUpdatedAt func() time.Time
// DefaultSize holds the default value on creation for the "size" field.
DefaultSize int64
// DefaultIsSymbolic holds the default value on creation for the "is_symbolic" field.
@ -178,11 +172,6 @@ func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
}
// ByDeletedAt orders the results by the deleted_at field.
func ByDeletedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldDeletedAt, opts...).ToFunc()
}
// ByType orders the results by the type field.
func ByType(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldType, opts...).ToFunc()

@ -65,11 +65,6 @@ func UpdatedAt(v time.Time) predicate.File {
return predicate.File(sql.FieldEQ(FieldUpdatedAt, v))
}
// DeletedAt applies equality check predicate on the "deleted_at" field. It's identical to DeletedAtEQ.
func DeletedAt(v time.Time) predicate.File {
return predicate.File(sql.FieldEQ(FieldDeletedAt, v))
}
// Type applies equality check predicate on the "type" field. It's identical to TypeEQ.
func Type(v int) predicate.File {
return predicate.File(sql.FieldEQ(FieldType, v))
@ -190,56 +185,6 @@ func UpdatedAtLTE(v time.Time) predicate.File {
return predicate.File(sql.FieldLTE(FieldUpdatedAt, v))
}
// DeletedAtEQ applies the EQ predicate on the "deleted_at" field.
func DeletedAtEQ(v time.Time) predicate.File {
return predicate.File(sql.FieldEQ(FieldDeletedAt, v))
}
// DeletedAtNEQ applies the NEQ predicate on the "deleted_at" field.
func DeletedAtNEQ(v time.Time) predicate.File {
return predicate.File(sql.FieldNEQ(FieldDeletedAt, v))
}
// DeletedAtIn applies the In predicate on the "deleted_at" field.
func DeletedAtIn(vs ...time.Time) predicate.File {
return predicate.File(sql.FieldIn(FieldDeletedAt, vs...))
}
// DeletedAtNotIn applies the NotIn predicate on the "deleted_at" field.
func DeletedAtNotIn(vs ...time.Time) predicate.File {
return predicate.File(sql.FieldNotIn(FieldDeletedAt, vs...))
}
// DeletedAtGT applies the GT predicate on the "deleted_at" field.
func DeletedAtGT(v time.Time) predicate.File {
return predicate.File(sql.FieldGT(FieldDeletedAt, v))
}
// DeletedAtGTE applies the GTE predicate on the "deleted_at" field.
func DeletedAtGTE(v time.Time) predicate.File {
return predicate.File(sql.FieldGTE(FieldDeletedAt, v))
}
// DeletedAtLT applies the LT predicate on the "deleted_at" field.
func DeletedAtLT(v time.Time) predicate.File {
return predicate.File(sql.FieldLT(FieldDeletedAt, v))
}
// DeletedAtLTE applies the LTE predicate on the "deleted_at" field.
func DeletedAtLTE(v time.Time) predicate.File {
return predicate.File(sql.FieldLTE(FieldDeletedAt, v))
}
// DeletedAtIsNil applies the IsNil predicate on the "deleted_at" field.
func DeletedAtIsNil() predicate.File {
return predicate.File(sql.FieldIsNull(FieldDeletedAt))
}
// DeletedAtNotNil applies the NotNil predicate on the "deleted_at" field.
func DeletedAtNotNil() predicate.File {
return predicate.File(sql.FieldNotNull(FieldDeletedAt))
}
// TypeEQ applies the EQ predicate on the "type" field.
func TypeEQ(v int) predicate.File {
return predicate.File(sql.FieldEQ(FieldType, v))

@ -57,20 +57,6 @@ func (fc *FileCreate) SetNillableUpdatedAt(t *time.Time) *FileCreate {
return fc
}
// SetDeletedAt sets the "deleted_at" field.
func (fc *FileCreate) SetDeletedAt(t time.Time) *FileCreate {
fc.mutation.SetDeletedAt(t)
return fc
}
// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil.
func (fc *FileCreate) SetNillableDeletedAt(t *time.Time) *FileCreate {
if t != nil {
fc.SetDeletedAt(*t)
}
return fc
}
// SetType sets the "type" field.
func (fc *FileCreate) SetType(i int) *FileCreate {
fc.mutation.SetType(i)
@ -413,10 +399,6 @@ func (fc *FileCreate) createSpec() (*File, *sqlgraph.CreateSpec) {
_spec.SetField(file.FieldUpdatedAt, field.TypeTime, value)
_node.UpdatedAt = value
}
if value, ok := fc.mutation.DeletedAt(); ok {
_spec.SetField(file.FieldDeletedAt, field.TypeTime, value)
_node.DeletedAt = &value
}
if value, ok := fc.mutation.GetType(); ok {
_spec.SetField(file.FieldType, field.TypeInt, value)
_node.Type = value
@ -636,24 +618,6 @@ func (u *FileUpsert) UpdateUpdatedAt() *FileUpsert {
return u
}
// SetDeletedAt sets the "deleted_at" field.
func (u *FileUpsert) SetDeletedAt(v time.Time) *FileUpsert {
u.Set(file.FieldDeletedAt, v)
return u
}
// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create.
func (u *FileUpsert) UpdateDeletedAt() *FileUpsert {
u.SetExcluded(file.FieldDeletedAt)
return u
}
// ClearDeletedAt clears the value of the "deleted_at" field.
func (u *FileUpsert) ClearDeletedAt() *FileUpsert {
u.SetNull(file.FieldDeletedAt)
return u
}
// SetType sets the "type" field.
func (u *FileUpsert) SetType(v int) *FileUpsert {
u.Set(file.FieldType, v)
@ -863,27 +827,6 @@ func (u *FileUpsertOne) UpdateUpdatedAt() *FileUpsertOne {
})
}
// SetDeletedAt sets the "deleted_at" field.
func (u *FileUpsertOne) SetDeletedAt(v time.Time) *FileUpsertOne {
return u.Update(func(s *FileUpsert) {
s.SetDeletedAt(v)
})
}
// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create.
func (u *FileUpsertOne) UpdateDeletedAt() *FileUpsertOne {
return u.Update(func(s *FileUpsert) {
s.UpdateDeletedAt()
})
}
// ClearDeletedAt clears the value of the "deleted_at" field.
func (u *FileUpsertOne) ClearDeletedAt() *FileUpsertOne {
return u.Update(func(s *FileUpsert) {
s.ClearDeletedAt()
})
}
// SetType sets the "type" field.
func (u *FileUpsertOne) SetType(v int) *FileUpsertOne {
return u.Update(func(s *FileUpsert) {
@ -1289,27 +1232,6 @@ func (u *FileUpsertBulk) UpdateUpdatedAt() *FileUpsertBulk {
})
}
// SetDeletedAt sets the "deleted_at" field.
func (u *FileUpsertBulk) SetDeletedAt(v time.Time) *FileUpsertBulk {
return u.Update(func(s *FileUpsert) {
s.SetDeletedAt(v)
})
}
// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create.
func (u *FileUpsertBulk) UpdateDeletedAt() *FileUpsertBulk {
return u.Update(func(s *FileUpsert) {
s.UpdateDeletedAt()
})
}
// ClearDeletedAt clears the value of the "deleted_at" field.
func (u *FileUpsertBulk) ClearDeletedAt() *FileUpsertBulk {
return u.Update(func(s *FileUpsert) {
s.ClearDeletedAt()
})
}
// SetType sets the "type" field.
func (u *FileUpsertBulk) SetType(v int) *FileUpsertBulk {
return u.Update(func(s *FileUpsert) {

@ -41,26 +41,14 @@ func (fu *FileUpdate) SetUpdatedAt(t time.Time) *FileUpdate {
return fu
}
// SetDeletedAt sets the "deleted_at" field.
func (fu *FileUpdate) SetDeletedAt(t time.Time) *FileUpdate {
fu.mutation.SetDeletedAt(t)
return fu
}
// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil.
func (fu *FileUpdate) SetNillableDeletedAt(t *time.Time) *FileUpdate {
// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil.
func (fu *FileUpdate) SetNillableUpdatedAt(t *time.Time) *FileUpdate {
if t != nil {
fu.SetDeletedAt(*t)
fu.SetUpdatedAt(*t)
}
return fu
}
// ClearDeletedAt clears the value of the "deleted_at" field.
func (fu *FileUpdate) ClearDeletedAt() *FileUpdate {
fu.mutation.ClearDeletedAt()
return fu
}
// SetType sets the "type" field.
func (fu *FileUpdate) SetType(i int) *FileUpdate {
fu.mutation.ResetType()
@ -472,9 +460,6 @@ func (fu *FileUpdate) RemoveDirectLinks(d ...*DirectLink) *FileUpdate {
// Save executes the query and returns the number of nodes affected by the update operation.
func (fu *FileUpdate) Save(ctx context.Context) (int, error) {
if err := fu.defaults(); err != nil {
return 0, err
}
return withHooks(ctx, fu.sqlSave, fu.mutation, fu.hooks)
}
@ -500,18 +485,6 @@ func (fu *FileUpdate) ExecX(ctx context.Context) {
}
}
// defaults sets the default values of the builder before save.
func (fu *FileUpdate) defaults() error {
if _, ok := fu.mutation.UpdatedAt(); !ok {
if file.UpdateDefaultUpdatedAt == nil {
return fmt.Errorf("ent: uninitialized file.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)")
}
v := file.UpdateDefaultUpdatedAt()
fu.mutation.SetUpdatedAt(v)
}
return nil
}
// check runs all checks and user-defined validators on the builder.
func (fu *FileUpdate) check() error {
if _, ok := fu.mutation.OwnerID(); fu.mutation.OwnerCleared() && !ok {
@ -535,12 +508,6 @@ func (fu *FileUpdate) sqlSave(ctx context.Context) (n int, err error) {
if value, ok := fu.mutation.UpdatedAt(); ok {
_spec.SetField(file.FieldUpdatedAt, field.TypeTime, value)
}
if value, ok := fu.mutation.DeletedAt(); ok {
_spec.SetField(file.FieldDeletedAt, field.TypeTime, value)
}
if fu.mutation.DeletedAtCleared() {
_spec.ClearField(file.FieldDeletedAt, field.TypeTime)
}
if value, ok := fu.mutation.GetType(); ok {
_spec.SetField(file.FieldType, field.TypeInt, value)
}
@ -912,26 +879,14 @@ func (fuo *FileUpdateOne) SetUpdatedAt(t time.Time) *FileUpdateOne {
return fuo
}
// SetDeletedAt sets the "deleted_at" field.
func (fuo *FileUpdateOne) SetDeletedAt(t time.Time) *FileUpdateOne {
fuo.mutation.SetDeletedAt(t)
return fuo
}
// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil.
func (fuo *FileUpdateOne) SetNillableDeletedAt(t *time.Time) *FileUpdateOne {
// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil.
func (fuo *FileUpdateOne) SetNillableUpdatedAt(t *time.Time) *FileUpdateOne {
if t != nil {
fuo.SetDeletedAt(*t)
fuo.SetUpdatedAt(*t)
}
return fuo
}
// ClearDeletedAt clears the value of the "deleted_at" field.
func (fuo *FileUpdateOne) ClearDeletedAt() *FileUpdateOne {
fuo.mutation.ClearDeletedAt()
return fuo
}
// SetType sets the "type" field.
func (fuo *FileUpdateOne) SetType(i int) *FileUpdateOne {
fuo.mutation.ResetType()
@ -1356,9 +1311,6 @@ func (fuo *FileUpdateOne) Select(field string, fields ...string) *FileUpdateOne
// Save executes the query and returns the updated File entity.
func (fuo *FileUpdateOne) Save(ctx context.Context) (*File, error) {
if err := fuo.defaults(); err != nil {
return nil, err
}
return withHooks(ctx, fuo.sqlSave, fuo.mutation, fuo.hooks)
}
@ -1384,18 +1336,6 @@ func (fuo *FileUpdateOne) ExecX(ctx context.Context) {
}
}
// defaults sets the default values of the builder before save.
func (fuo *FileUpdateOne) defaults() error {
if _, ok := fuo.mutation.UpdatedAt(); !ok {
if file.UpdateDefaultUpdatedAt == nil {
return fmt.Errorf("ent: uninitialized file.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)")
}
v := file.UpdateDefaultUpdatedAt()
fuo.mutation.SetUpdatedAt(v)
}
return nil
}
// check runs all checks and user-defined validators on the builder.
func (fuo *FileUpdateOne) check() error {
if _, ok := fuo.mutation.OwnerID(); fuo.mutation.OwnerCleared() && !ok {
@ -1436,12 +1376,6 @@ func (fuo *FileUpdateOne) sqlSave(ctx context.Context) (_node *File, err error)
if value, ok := fuo.mutation.UpdatedAt(); ok {
_spec.SetField(file.FieldUpdatedAt, field.TypeTime, value)
}
if value, ok := fuo.mutation.DeletedAt(); ok {
_spec.SetField(file.FieldDeletedAt, field.TypeTime, value)
}
if fuo.mutation.DeletedAtCleared() {
_spec.ClearField(file.FieldDeletedAt, field.TypeTime)
}
if value, ok := fuo.mutation.GetType(); ok {
_spec.SetField(file.FieldType, field.TypeInt, value)
}

File diff suppressed because one or more lines are too long

@ -107,7 +107,6 @@ var (
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"mysql": "datetime"}},
{Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"mysql": "datetime"}},
{Name: "deleted_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"mysql": "datetime"}},
{Name: "type", Type: field.TypeInt},
{Name: "name", Type: field.TypeString},
{Name: "size", Type: field.TypeInt64, Default: 0},
@ -126,19 +125,19 @@ var (
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "files_files_children",
Columns: []*schema.Column{FilesColumns[10]},
Columns: []*schema.Column{FilesColumns[9]},
RefColumns: []*schema.Column{FilesColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "files_storage_policies_files",
Columns: []*schema.Column{FilesColumns[11]},
Columns: []*schema.Column{FilesColumns[10]},
RefColumns: []*schema.Column{StoragePoliciesColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "files_users_files",
Columns: []*schema.Column{FilesColumns[12]},
Columns: []*schema.Column{FilesColumns[11]},
RefColumns: []*schema.Column{UsersColumns[0]},
OnDelete: schema.NoAction,
},
@ -147,17 +146,17 @@ var (
{
Name: "file_file_children_name",
Unique: true,
Columns: []*schema.Column{FilesColumns[10], FilesColumns[5]},
Columns: []*schema.Column{FilesColumns[9], FilesColumns[4]},
},
{
Name: "file_file_children_type_updated_at",
Unique: false,
Columns: []*schema.Column{FilesColumns[10], FilesColumns[4], FilesColumns[2]},
Columns: []*schema.Column{FilesColumns[9], FilesColumns[3], FilesColumns[2]},
},
{
Name: "file_file_children_type_size",
Unique: false,
Columns: []*schema.Column{FilesColumns[10], FilesColumns[4], FilesColumns[6]},
Columns: []*schema.Column{FilesColumns[9], FilesColumns[3], FilesColumns[5]},
},
},
}

@ -1723,7 +1723,7 @@ type EntityMutation struct {
reference_count *int
addreference_count *int
upload_session_id *uuid.UUID
recycle_options **types.EntityRecycleOption
props **types.EntityProps
clearedFields map[string]struct{}
file map[int]struct{}
removedfile map[int]struct{}
@ -2294,53 +2294,53 @@ func (m *EntityMutation) ResetUploadSessionID() {
delete(m.clearedFields, entity.FieldUploadSessionID)
}
// SetRecycleOptions sets the "recycle_options" field.
func (m *EntityMutation) SetRecycleOptions(tro *types.EntityRecycleOption) {
m.recycle_options = &tro
// SetProps sets the "props" field.
func (m *EntityMutation) SetProps(tp *types.EntityProps) {
m.props = &tp
}
// RecycleOptions returns the value of the "recycle_options" field in the mutation.
func (m *EntityMutation) RecycleOptions() (r *types.EntityRecycleOption, exists bool) {
v := m.recycle_options
// Props returns the value of the "props" field in the mutation.
func (m *EntityMutation) Props() (r *types.EntityProps, exists bool) {
v := m.props
if v == nil {
return
}
return *v, true
}
// OldRecycleOptions returns the old "recycle_options" field's value of the Entity entity.
// OldProps returns the old "props" field's value of the Entity entity.
// If the Entity object wasn't provided to the builder, the object is fetched from the database.
// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
func (m *EntityMutation) OldRecycleOptions(ctx context.Context) (v *types.EntityRecycleOption, err error) {
func (m *EntityMutation) OldProps(ctx context.Context) (v *types.EntityProps, err error) {
if !m.op.Is(OpUpdateOne) {
return v, errors.New("OldRecycleOptions is only allowed on UpdateOne operations")
return v, errors.New("OldProps is only allowed on UpdateOne operations")
}
if m.id == nil || m.oldValue == nil {
return v, errors.New("OldRecycleOptions requires an ID field in the mutation")
return v, errors.New("OldProps requires an ID field in the mutation")
}
oldValue, err := m.oldValue(ctx)
if err != nil {
return v, fmt.Errorf("querying old value for OldRecycleOptions: %w", err)
return v, fmt.Errorf("querying old value for OldProps: %w", err)
}
return oldValue.RecycleOptions, nil
return oldValue.Props, nil
}
// ClearRecycleOptions clears the value of the "recycle_options" field.
func (m *EntityMutation) ClearRecycleOptions() {
m.recycle_options = nil
m.clearedFields[entity.FieldRecycleOptions] = struct{}{}
// ClearProps clears the value of the "props" field.
func (m *EntityMutation) ClearProps() {
m.props = nil
m.clearedFields[entity.FieldProps] = struct{}{}
}
// RecycleOptionsCleared returns if the "recycle_options" field was cleared in this mutation.
func (m *EntityMutation) RecycleOptionsCleared() bool {
_, ok := m.clearedFields[entity.FieldRecycleOptions]
// PropsCleared returns if the "props" field was cleared in this mutation.
func (m *EntityMutation) PropsCleared() bool {
_, ok := m.clearedFields[entity.FieldProps]
return ok
}
// ResetRecycleOptions resets all changes to the "recycle_options" field.
func (m *EntityMutation) ResetRecycleOptions() {
m.recycle_options = nil
delete(m.clearedFields, entity.FieldRecycleOptions)
// ResetProps resets all changes to the "props" field.
func (m *EntityMutation) ResetProps() {
m.props = nil
delete(m.clearedFields, entity.FieldProps)
}
// AddFileIDs adds the "file" edge to the File entity by ids.
@ -2542,8 +2542,8 @@ func (m *EntityMutation) Fields() []string {
if m.upload_session_id != nil {
fields = append(fields, entity.FieldUploadSessionID)
}
if m.recycle_options != nil {
fields = append(fields, entity.FieldRecycleOptions)
if m.props != nil {
fields = append(fields, entity.FieldProps)
}
return fields
}
@ -2573,8 +2573,8 @@ func (m *EntityMutation) Field(name string) (ent.Value, bool) {
return m.CreatedBy()
case entity.FieldUploadSessionID:
return m.UploadSessionID()
case entity.FieldRecycleOptions:
return m.RecycleOptions()
case entity.FieldProps:
return m.Props()
}
return nil, false
}
@ -2604,8 +2604,8 @@ func (m *EntityMutation) OldField(ctx context.Context, name string) (ent.Value,
return m.OldCreatedBy(ctx)
case entity.FieldUploadSessionID:
return m.OldUploadSessionID(ctx)
case entity.FieldRecycleOptions:
return m.OldRecycleOptions(ctx)
case entity.FieldProps:
return m.OldProps(ctx)
}
return nil, fmt.Errorf("unknown Entity field %s", name)
}
@ -2685,12 +2685,12 @@ func (m *EntityMutation) SetField(name string, value ent.Value) error {
}
m.SetUploadSessionID(v)
return nil
case entity.FieldRecycleOptions:
v, ok := value.(*types.EntityRecycleOption)
case entity.FieldProps:
v, ok := value.(*types.EntityProps)
if !ok {
return fmt.Errorf("unexpected type %T for field %s", value, name)
}
m.SetRecycleOptions(v)
m.SetProps(v)
return nil
}
return fmt.Errorf("unknown Entity field %s", name)
@ -2770,8 +2770,8 @@ func (m *EntityMutation) ClearedFields() []string {
if m.FieldCleared(entity.FieldUploadSessionID) {
fields = append(fields, entity.FieldUploadSessionID)
}
if m.FieldCleared(entity.FieldRecycleOptions) {
fields = append(fields, entity.FieldRecycleOptions)
if m.FieldCleared(entity.FieldProps) {
fields = append(fields, entity.FieldProps)
}
return fields
}
@ -2796,8 +2796,8 @@ func (m *EntityMutation) ClearField(name string) error {
case entity.FieldUploadSessionID:
m.ClearUploadSessionID()
return nil
case entity.FieldRecycleOptions:
m.ClearRecycleOptions()
case entity.FieldProps:
m.ClearProps()
return nil
}
return fmt.Errorf("unknown Entity nullable field %s", name)
@ -2837,8 +2837,8 @@ func (m *EntityMutation) ResetField(name string) error {
case entity.FieldUploadSessionID:
m.ResetUploadSessionID()
return nil
case entity.FieldRecycleOptions:
m.ResetRecycleOptions()
case entity.FieldProps:
m.ResetProps()
return nil
}
return fmt.Errorf("unknown Entity field %s", name)
@ -2972,7 +2972,6 @@ type FileMutation struct {
id *int
created_at *time.Time
updated_at *time.Time
deleted_at *time.Time
_type *int
add_type *int
name *string
@ -3179,55 +3178,6 @@ func (m *FileMutation) ResetUpdatedAt() {
m.updated_at = nil
}
// SetDeletedAt sets the "deleted_at" field.
func (m *FileMutation) SetDeletedAt(t time.Time) {
m.deleted_at = &t
}
// DeletedAt returns the value of the "deleted_at" field in the mutation.
func (m *FileMutation) DeletedAt() (r time.Time, exists bool) {
v := m.deleted_at
if v == nil {
return
}
return *v, true
}
// OldDeletedAt returns the old "deleted_at" field's value of the File entity.
// If the File object wasn't provided to the builder, the object is fetched from the database.
// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
func (m *FileMutation) OldDeletedAt(ctx context.Context) (v *time.Time, err error) {
if !m.op.Is(OpUpdateOne) {
return v, errors.New("OldDeletedAt is only allowed on UpdateOne operations")
}
if m.id == nil || m.oldValue == nil {
return v, errors.New("OldDeletedAt requires an ID field in the mutation")
}
oldValue, err := m.oldValue(ctx)
if err != nil {
return v, fmt.Errorf("querying old value for OldDeletedAt: %w", err)
}
return oldValue.DeletedAt, nil
}
// ClearDeletedAt clears the value of the "deleted_at" field.
func (m *FileMutation) ClearDeletedAt() {
m.deleted_at = nil
m.clearedFields[file.FieldDeletedAt] = struct{}{}
}
// DeletedAtCleared returns if the "deleted_at" field was cleared in this mutation.
func (m *FileMutation) DeletedAtCleared() bool {
_, ok := m.clearedFields[file.FieldDeletedAt]
return ok
}
// ResetDeletedAt resets all changes to the "deleted_at" field.
func (m *FileMutation) ResetDeletedAt() {
m.deleted_at = nil
delete(m.clearedFields, file.FieldDeletedAt)
}
// SetType sets the "type" field.
func (m *FileMutation) SetType(i int) {
m._type = &i
@ -4076,16 +4026,13 @@ func (m *FileMutation) Type() string {
// order to get all numeric fields that were incremented/decremented, call
// AddedFields().
func (m *FileMutation) Fields() []string {
fields := make([]string, 0, 12)
fields := make([]string, 0, 11)
if m.created_at != nil {
fields = append(fields, file.FieldCreatedAt)
}
if m.updated_at != nil {
fields = append(fields, file.FieldUpdatedAt)
}
if m.deleted_at != nil {
fields = append(fields, file.FieldDeletedAt)
}
if m._type != nil {
fields = append(fields, file.FieldType)
}
@ -4125,8 +4072,6 @@ func (m *FileMutation) Field(name string) (ent.Value, bool) {
return m.CreatedAt()
case file.FieldUpdatedAt:
return m.UpdatedAt()
case file.FieldDeletedAt:
return m.DeletedAt()
case file.FieldType:
return m.GetType()
case file.FieldName:
@ -4158,8 +4103,6 @@ func (m *FileMutation) OldField(ctx context.Context, name string) (ent.Value, er
return m.OldCreatedAt(ctx)
case file.FieldUpdatedAt:
return m.OldUpdatedAt(ctx)
case file.FieldDeletedAt:
return m.OldDeletedAt(ctx)
case file.FieldType:
return m.OldType(ctx)
case file.FieldName:
@ -4201,13 +4144,6 @@ func (m *FileMutation) SetField(name string, value ent.Value) error {
}
m.SetUpdatedAt(v)
return nil
case file.FieldDeletedAt:
v, ok := value.(time.Time)
if !ok {
return fmt.Errorf("unexpected type %T for field %s", value, name)
}
m.SetDeletedAt(v)
return nil
case file.FieldType:
v, ok := value.(int)
if !ok {
@ -4340,9 +4276,6 @@ func (m *FileMutation) AddField(name string, value ent.Value) error {
// mutation.
func (m *FileMutation) ClearedFields() []string {
var fields []string
if m.FieldCleared(file.FieldDeletedAt) {
fields = append(fields, file.FieldDeletedAt)
}
if m.FieldCleared(file.FieldPrimaryEntity) {
fields = append(fields, file.FieldPrimaryEntity)
}
@ -4369,9 +4302,6 @@ func (m *FileMutation) FieldCleared(name string) bool {
// error if the field is not defined in the schema.
func (m *FileMutation) ClearField(name string) error {
switch name {
case file.FieldDeletedAt:
m.ClearDeletedAt()
return nil
case file.FieldPrimaryEntity:
m.ClearPrimaryEntity()
return nil
@ -4398,9 +4328,6 @@ func (m *FileMutation) ResetField(name string) error {
case file.FieldUpdatedAt:
m.ResetUpdatedAt()
return nil
case file.FieldDeletedAt:
m.ResetDeletedAt()
return nil
case file.FieldType:
m.ResetType()
return nil

@ -87,31 +87,24 @@ func init() {
entityDescReferenceCount := entityFields[3].Descriptor()
// entity.DefaultReferenceCount holds the default value on creation for the reference_count field.
entity.DefaultReferenceCount = entityDescReferenceCount.Default.(int)
fileMixin := schema.File{}.Mixin()
fileMixinHooks0 := fileMixin[0].Hooks()
file.Hooks[0] = fileMixinHooks0[0]
fileMixinInters0 := fileMixin[0].Interceptors()
file.Interceptors[0] = fileMixinInters0[0]
fileMixinFields0 := fileMixin[0].Fields()
_ = fileMixinFields0
fileHooks := schema.File{}.Hooks()
file.Hooks[0] = fileHooks[0]
fileFields := schema.File{}.Fields()
_ = fileFields
// fileDescCreatedAt is the schema descriptor for created_at field.
fileDescCreatedAt := fileMixinFields0[0].Descriptor()
fileDescCreatedAt := fileFields[0].Descriptor()
// file.DefaultCreatedAt holds the default value on creation for the created_at field.
file.DefaultCreatedAt = fileDescCreatedAt.Default.(func() time.Time)
// fileDescUpdatedAt is the schema descriptor for updated_at field.
fileDescUpdatedAt := fileMixinFields0[1].Descriptor()
fileDescUpdatedAt := fileFields[1].Descriptor()
// file.DefaultUpdatedAt holds the default value on creation for the updated_at field.
file.DefaultUpdatedAt = fileDescUpdatedAt.Default.(func() time.Time)
// file.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.
file.UpdateDefaultUpdatedAt = fileDescUpdatedAt.UpdateDefault.(func() time.Time)
// fileDescSize is the schema descriptor for size field.
fileDescSize := fileFields[3].Descriptor()
fileDescSize := fileFields[5].Descriptor()
// file.DefaultSize holds the default value on creation for the size field.
file.DefaultSize = fileDescSize.Default.(int64)
// fileDescIsSymbolic is the schema descriptor for is_symbolic field.
fileDescIsSymbolic := fileFields[6].Descriptor()
fileDescIsSymbolic := fileFields[8].Descriptor()
// file.DefaultIsSymbolic holds the default value on creation for the is_symbolic field.
file.DefaultIsSymbolic = fileDescIsSymbolic.Default.(bool)
groupMixin := schema.Group{}.Mixin()

@ -25,8 +25,9 @@ func (Entity) Fields() []ent.Field {
field.UUID("upload_session_id", uuid.Must(uuid.NewV4())).
Optional().
Nillable(),
field.JSON("recycle_options", &types.EntityRecycleOption{}).
Optional(),
field.JSON("props", &types.EntityProps{}).
Optional().
StorageKey("recycle_options"),
}
}

@ -1,10 +1,15 @@
package schema
import (
"context"
"time"
"entgo.io/ent"
"entgo.io/ent/dialect"
"entgo.io/ent/schema/edge"
"entgo.io/ent/schema/field"
"entgo.io/ent/schema/index"
"github.com/cloudreve/Cloudreve/v4/ent/hook"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
)
@ -16,6 +21,17 @@ type File struct {
// Fields of the File.
func (File) Fields() []ent.Field {
return []ent.Field{
field.Time("created_at").
Immutable().
Default(time.Now).
SchemaType(map[string]string{
dialect.MySQL: "datetime",
}),
field.Time("updated_at").
Default(time.Now).
SchemaType(map[string]string{
dialect.MySQL: "datetime",
}),
field.Int("type"),
field.String("name"),
field.Int("owner_id"),
@ -66,8 +82,19 @@ func (File) Indexes() []ent.Index {
}
}
func (File) Mixin() []ent.Mixin {
return []ent.Mixin{
CommonMixin{},
func (f File) Hooks() []ent.Hook {
return []ent.Hook{
hook.On(func(next ent.Mutator) ent.Mutator {
return ent.MutateFunc(func(ctx context.Context, m ent.Mutation) (ent.Value, error) {
if s, ok := m.(interface{ SetUpdatedAt(time.Time) }); ok {
_, set := m.Field("updated_at")
if !set {
s.SetUpdatedAt(time.Now())
}
}
v, err := next.Mutate(ctx, m)
return v, err
})
}, ent.OpUpdate|ent.OpUpdateOne),
}
}

100
go.mod

@ -1,13 +1,15 @@
module github.com/cloudreve/Cloudreve/v4
go 1.23.0
go 1.24.0
toolchain go1.24.9
require (
entgo.io/ent v0.13.0
github.com/Masterminds/semver/v3 v3.3.1
github.com/abslant/gzip v0.0.9
github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible
github.com/aliyun/alibabacloud-oss-go-sdk-v2 v1.3.0
github.com/aws/aws-sdk-go v1.31.5
github.com/bodgit/sevenzip v1.6.0
github.com/cloudflare/cfssl v1.6.1
github.com/dhowden/tag v0.0.0-20230630033851-978a0926ee25
github.com/dsoprea/go-exif/v3 v3.0.1
@ -17,13 +19,13 @@ require (
github.com/dsoprea/go-tiff-image-structure v0.0.0-20221003165014-8ecc4f52edca
github.com/dsoprea/go-utility v0.0.0-20200711062821-fab8125e9bdf
github.com/fatih/color v1.18.0
github.com/gin-contrib/cors v1.3.0
github.com/gin-contrib/cors v1.6.0
github.com/gin-contrib/gzip v1.2.4
github.com/gin-contrib/sessions v1.0.2
github.com/gin-contrib/static v0.0.0-20191128031702-f81c604d8ac2
github.com/gin-gonic/gin v1.10.0
github.com/gin-gonic/gin v1.11.0
github.com/go-ini/ini v1.50.0
github.com/go-mail/mail v2.3.1+incompatible
github.com/go-playground/validator/v10 v10.20.0
github.com/go-playground/validator/v10 v10.28.0
github.com/go-sql-driver/mysql v1.6.0
github.com/go-webauthn/webauthn v0.11.2
github.com/gofrs/uuid v4.0.0+incompatible
@ -38,48 +40,53 @@ require (
github.com/jinzhu/gorm v1.9.11
github.com/jpillora/backoff v1.0.0
github.com/juju/ratelimit v1.0.1
github.com/ks3sdklib/aws-sdk-go v1.6.2
github.com/lib/pq v1.10.9
github.com/libp2p/go-reuseport v0.4.0
github.com/mholt/archiver/v4 v4.0.0-alpha.6
github.com/mholt/archives v0.1.3
github.com/mojocn/base64Captcha v0.0.0-20190801020520-752b1cd608b2
github.com/pion/stun/v3 v3.0.0
github.com/pquerna/otp v1.2.0
github.com/qiniu/go-sdk/v7 v7.19.0
github.com/quic-go/quic-go v0.52.0
github.com/quic-go/quic-go v0.55.0
github.com/rafaeljusto/redigomock v0.0.0-20191117212112-00b2509252a1
github.com/robfig/cron/v3 v3.0.1
github.com/samber/lo v1.38.1
github.com/sirupsen/logrus v1.8.1
github.com/speps/go-hashids v2.0.0+incompatible
github.com/spf13/cobra v1.7.0
github.com/spf13/pflag v1.0.5
github.com/stretchr/testify v1.9.0
github.com/stretchr/testify v1.11.1
github.com/tencentyun/cos-go-sdk-v5 v0.7.54
github.com/ua-parser/uap-go v0.0.0-20250213224047-9c035f085b90
github.com/upyun/go-sdk v2.1.0+incompatible
github.com/wneessen/go-mail v0.7.1
golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e
golang.org/x/image v0.0.0-20211028202545-6944b10bf410
golang.org/x/text v0.23.0
golang.org/x/image v0.18.0
golang.org/x/sync v0.17.0
golang.org/x/text v0.30.0
golang.org/x/time v0.5.0
golang.org/x/tools v0.24.0
golang.org/x/tools v0.38.0
modernc.org/sqlite v1.30.0
)
require (
ariga.io/atlas v0.19.1-0.20240203083654-5948b60a8e43 // indirect
cloud.google.com/go v0.81.0 // indirect
github.com/STARRY-S/zip v0.2.1 // indirect
github.com/agext/levenshtein v1.2.1 // indirect
github.com/andybalholm/brotli v1.0.4 // indirect
github.com/andybalholm/brotli v1.1.2-0.20250424173009-453214e765f3 // indirect
github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect
github.com/bodgit/plumbing v1.3.0 // indirect
github.com/bodgit/windows v1.0.1 // indirect
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect
github.com/bytedance/sonic v1.11.6 // indirect
github.com/bytedance/sonic/loader v0.1.1 // indirect
github.com/bytedance/gopkg v0.1.3 // indirect
github.com/bytedance/sonic v1.14.1 // indirect
github.com/bytedance/sonic/loader v0.3.0 // indirect
github.com/clbanning/mxj v1.8.4 // indirect
github.com/cloudwego/base64x v0.1.4 // indirect
github.com/cloudwego/iasm v0.2.0 // indirect
github.com/cloudwego/base64x v0.1.6 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/denisenkom/go-mssqldb v0.0.0-20190515213511-eb9f6a1743f3 // indirect
github.com/dsnet/compress v0.0.1 // indirect
github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 // indirect
github.com/dsoprea/go-exif/v2 v2.0.0-20200604193436-ca8584a0e1c4 // indirect
github.com/dsoprea/go-iptc v0.0.0-20200609062250-162ae6b44feb // indirect
github.com/dsoprea/go-logging v0.0.0-20200710184922-b02d349568dd // indirect
@ -87,23 +94,23 @@ require (
github.com/dsoprea/go-utility/v2 v2.0.0-20221003172846-a3e1774ef349 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.3 // indirect
github.com/gin-contrib/sse v0.1.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.10 // indirect
github.com/gin-contrib/sse v1.1.0 // indirect
github.com/go-errors/errors v1.4.2 // indirect
github.com/go-openapi/inflect v0.19.0 // indirect
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
github.com/go-webauthn/x v0.1.14 // indirect
github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b // indirect
github.com/goccy/go-json v0.10.2 // indirect
github.com/goccy/go-json v0.10.5 // indirect
github.com/goccy/go-yaml v1.18.0 // indirect
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 // indirect
github.com/golang/geo v0.0.0-20210211234256-740aa86cb551 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/go-cmp v0.7.0 // indirect
github.com/google/go-tpm v0.9.1 // indirect
github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd // indirect
github.com/gorilla/context v1.1.2 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
github.com/hashicorp/hcl/v2 v2.13.0 // indirect
@ -111,48 +118,45 @@ require (
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jmespath/go-jmespath v0.3.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.17.7 // indirect
github.com/klauspost/cpuid/v2 v2.2.7 // indirect
github.com/klauspost/pgzip v1.2.5 // indirect
github.com/klauspost/compress v1.17.11 // indirect
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
github.com/klauspost/pgzip v1.2.6 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/leodido/go-urn v1.4.0 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mikelolasagasti/xz v1.0.1 // indirect
github.com/minio/minlz v1.0.0 // indirect
github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/mozillazg/go-httpheader v0.4.0 // indirect
github.com/ncruces/go-strftime v0.1.9 // indirect
github.com/nwaples/rardecode/v2 v2.0.0-beta.2 // indirect
github.com/onsi/ginkgo/v2 v2.9.5 // indirect
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
github.com/pierrec/lz4/v4 v4.1.14 // indirect
github.com/nwaples/rardecode/v2 v2.1.0 // indirect
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
github.com/pierrec/lz4/v4 v4.1.21 // indirect
github.com/pion/dtls/v3 v3.0.1 // indirect
github.com/pion/logging v0.2.2 // indirect
github.com/pion/transport/v3 v3.0.7 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/quic-go/qpack v0.5.1 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
github.com/sorairolake/lzip-go v0.3.5 // indirect
github.com/stretchr/objx v0.5.2 // indirect
github.com/therootcompany/xz v1.0.1 // indirect
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
github.com/ugorji/go/codec v1.2.12 // indirect
github.com/ulikunitz/xz v0.5.10 // indirect
github.com/ugorji/go/codec v1.3.0 // indirect
github.com/ulikunitz/xz v0.5.12 // indirect
github.com/wlynxg/anet v0.0.3 // indirect
github.com/x448/float16 v0.8.4 // indirect
github.com/zclconf/go-cty v1.8.0 // indirect
go.uber.org/mock v0.5.0 // indirect
go4.org v0.0.0-20200411211856-f5505b9728dd // indirect
golang.org/x/arch v0.8.0 // indirect
golang.org/x/crypto v0.36.0 // indirect
golang.org/x/mod v0.20.0 // indirect
golang.org/x/net v0.38.0 // indirect
golang.org/x/sync v0.12.0 // indirect
golang.org/x/sys v0.31.0 // indirect
google.golang.org/protobuf v1.34.2 // indirect
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect
gopkg.in/mail.v2 v2.3.1 // indirect
go4.org v0.0.0-20230225012048-214862532bf5 // indirect
golang.org/x/arch v0.22.0 // indirect
golang.org/x/crypto v0.43.0 // indirect
golang.org/x/mod v0.29.0 // indirect
golang.org/x/net v0.46.0 // indirect
golang.org/x/sys v0.37.0 // indirect
google.golang.org/protobuf v1.36.10 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6 // indirect

219
go.sum

@ -82,11 +82,11 @@ github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuN
github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/QcloudApi/qcloud_sign_golang v0.0.0-20141224014652-e4130a326409/go.mod h1:1pk82RBxDY/JZnPQrtqHlUFfCctgdorsd9M06fMynOM=
github.com/STARRY-S/zip v0.2.1 h1:pWBd4tuSGm3wtpoqRZZ2EAwOmcHK6XFf7bU9qcJXyFg=
github.com/STARRY-S/zip v0.2.1/go.mod h1:xNvshLODWtC4EJ702g7cTYn13G53o1+X9BWnPFpcWV4=
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
github.com/abslant/gzip v0.0.9 h1:zxuOQ8QmPwni7vwgE3EyOygdmeCo2UkCmO5t+7Ms6cA=
github.com/abslant/gzip v0.0.9/go.mod h1:IcN2c50tZn2y54oysNcIavbTAc1s0B2f5TqTEA+WCas=
github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
github.com/agext/levenshtein v1.2.1 h1:QmvMAjj2aEICytGiWzmxoE0x2KZvE0fvmqMOfy2tjT8=
github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
@ -98,10 +98,10 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible h1:8psS8a+wKfiLt1iVDX79F7Y6wUM49Lcha2FMXt4UM8g=
github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8=
github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY=
github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/aliyun/alibabacloud-oss-go-sdk-v2 v1.3.0 h1:wQlqotpyjYPjJz+Noh5bRu7Snmydk8SKC5Z6u1CR20Y=
github.com/aliyun/alibabacloud-oss-go-sdk-v2 v1.3.0/go.mod h1:FTzydeQVmR24FI0D6XWUOMKckjXehM/jgMn1xC+DA9M=
github.com/andybalholm/brotli v1.1.2-0.20250424173009-453214e765f3 h1:8PmGpDEZl9yDpcdEr6Odf23feCxK3LNUNMxjXg41pZQ=
github.com/andybalholm/brotli v1.1.2-0.20250424173009-453214e765f3/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ=
@ -138,12 +138,20 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb/go.mod h1:PkYb9DJNAwrSvRx5DYA+gUcOIgTGVMNkfSCbZM8cWpI=
github.com/bodgit/plumbing v1.3.0 h1:pf9Itz1JOQgn7vEOE7v7nlEfBykYqvUYioC61TwWCFU=
github.com/bodgit/plumbing v1.3.0/go.mod h1:JOTb4XiRu5xfnmdnDJo6GmSbSbtSyufrsyZFByMtKEs=
github.com/bodgit/sevenzip v1.6.0 h1:a4R0Wu6/P1o1pP/3VV++aEOcyeBxeO/xE2Y9NSTrr6A=
github.com/bodgit/sevenzip v1.6.0/go.mod h1:zOBh9nJUof7tcrlqJFv1koWRrhz3LbDbUNngkuZxLMc=
github.com/bodgit/windows v1.0.1 h1:tF7K6KOluPYygXa3Z2594zxlkbKPAOvqr97etrGNIz4=
github.com/bodgit/windows v1.0.1/go.mod h1:a6JLwrB4KrTR5hBpp8FI9/9W9jJfeQ2h4XDXU74ZCdM=
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8WK8raXaxBx6fRVTlJILwEwQGL1I/ByEI=
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
github.com/bytedance/sonic v1.11.6 h1:oUp34TzMlL+OY1OUWxHqsdkgC/Zfc85zGqw9siXjrc0=
github.com/bytedance/sonic v1.11.6/go.mod h1:LysEHSvpvDySVdC2f87zGWf6CIKJcAvqab1ZaiQtds4=
github.com/bytedance/sonic/loader v0.1.1 h1:c+e5Pt1k/cy5wMveRDyk2X4B9hF4g7an8N3zCYjJFNM=
github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU=
github.com/bytedance/gopkg v0.1.3 h1:TPBSwH8RsouGCBcMBktLt1AymVo2TVsBVCY4b6TnZ/M=
github.com/bytedance/gopkg v0.1.3/go.mod h1:576VvJ+eJgyCzdjS+c4+77QF3p7ubbtiKARP3TxducM=
github.com/bytedance/sonic v1.14.1 h1:FBMC0zVz5XUmE4z9wF4Jey0An5FueFvOsTKKKtwIl7w=
github.com/bytedance/sonic v1.14.1/go.mod h1:gi6uhQLMbTdeP0muCnrjHLeCUPyb70ujhnNlhOylAFc=
github.com/bytedance/sonic/loader v0.3.0 h1:dskwH8edlzNMctoruo8FPTJDF3vLtDT0sXZwvZJyqeA=
github.com/bytedance/sonic/loader v0.3.0/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI=
github.com/caarlos0/ctrlc v1.0.0/go.mod h1:CdXpj4rmq0q/1Eb44M9zi2nKB0QraNKuRGYGrrHhcQw=
github.com/campoy/unique v0.0.0-20180121183637-88950e537e7e/go.mod h1:9IOqJGCPMSc6E5ydlp5NIonxObaeu/Iub/X03EKPVYo=
github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
@ -167,10 +175,8 @@ github.com/cloudflare/backoff v0.0.0-20161212185259-647f3cdfc87a/go.mod h1:rzgs2
github.com/cloudflare/cfssl v1.6.1 h1:aIOUjpeuDJOpWjVJFP2ByplF53OgqG8I1S40Ggdlk3g=
github.com/cloudflare/cfssl v1.6.1/go.mod h1:ENhCj4Z17+bY2XikpxVmTHDg/C2IsG2Q0ZBeXpAqhCk=
github.com/cloudflare/redoctober v0.0.0-20201013214028-99c99a8e7544/go.mod h1:6Se34jNoqrd8bTxrmJB2Bg2aoZ2CdSXonils9NsiNgo=
github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/0Y=
github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w=
github.com/cloudwego/iasm v0.2.0 h1:1KNIy1I1H9hNNFEEH3DVnI4UujN+1zjpuk6gwHLTssg=
github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY=
github.com/cloudwego/base64x v0.1.6 h1:t11wG9AECkCDk5fMSoxmufanudBtJ+/HemLstXDLI2M=
github.com/cloudwego/base64x v0.1.6/go.mod h1:OFcloc187FXDaYHvrNIjxSe8ncn0OOM8gEHfghB2IPU=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
@ -213,8 +219,8 @@ github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8
github.com/dhowden/tag v0.0.0-20230630033851-978a0926ee25 h1:simG0vMYFvNriGhaaat7QVVkaVkXzvqcohaBoLZl9Hg=
github.com/dhowden/tag v0.0.0-20230630033851-978a0926ee25/go.mod h1:Z3Lomva4pyMWYezjMAU5QWRh0p1VvO4199OHlFnyKkM=
github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
github.com/dsnet/compress v0.0.1 h1:PlZu0n3Tuv04TzpfPbrnI0HW/YwodEXDS+oPKahKF0Q=
github.com/dsnet/compress v0.0.1/go.mod h1:Aw8dCMJ7RioblQeTqt88akK31OvO8Dhf5JflhBbQEHo=
github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 h1:2tV76y6Q9BB+NEBasnqvs7e49aEBFI8ejC89PSnWH+4=
github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707/go.mod h1:qssHWj60/X5sZFNxpG4HBPDHVqxNm4DfnCKgrbZOT+s=
github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY=
github.com/dsoprea/go-exif/v2 v2.0.0-20200321225314-640175a69fe4/go.mod h1:Lm2lMM2zx8p4a34ZemkaUV95AnMl4ZvLbCUbwOvLC2E=
github.com/dsoprea/go-exif/v2 v2.0.0-20200520183328-015129a9efd5/go.mod h1:9EXlPeHfblFFnwu5UOqmP2eoZfJyAZ2Ri/Vki33ajO0=
@ -274,7 +280,6 @@ github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5 h1:Yzb9+7DP
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0=
github.com/etcd-io/gofail v0.0.0-20190801230047-ad7f989257ca/go.mod h1:49H/RkXP8pKaZy4h0d+NW16rSLhyVBt4o6VLJbmOqDE=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
@ -290,24 +295,24 @@ github.com/fullstorydev/grpcurl v1.8.0/go.mod h1:Mn2jWbdMrQGJQ8UD62uNyMumT2acsZU
github.com/fullstorydev/grpcurl v1.8.1/go.mod h1:3BWhvHZwNO7iLXaQlojdg5NA6SxUDePli4ecpK1N7gw=
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0=
github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk=
github.com/gabriel-vasile/mimetype v1.4.10 h1:zyueNbySn/z8mJZHLt6IPw0KoZsiQNszIpU+bX4+ZK0=
github.com/gabriel-vasile/mimetype v1.4.10/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s=
github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gin-contrib/cors v1.3.0 h1:PolezCc89peu+NgkIWt9OB01Kbzt6IP0J/JvkG6xxlg=
github.com/gin-contrib/cors v1.3.0/go.mod h1:artPvLlhkF7oG06nK8v3U8TNz6IeX+w1uzCSEId5/Vc=
github.com/gin-contrib/cors v1.6.0 h1:0Z7D/bVhE6ja07lI8CTjTonp6SB07o8bNuFyRbsBUQg=
github.com/gin-contrib/cors v1.6.0/go.mod h1:cI+h6iOAyxKRtUtC6iF/Si1KSFvGm/gK+kshxlCi8ro=
github.com/gin-contrib/gzip v1.2.4 h1:yNz4EhPC2kHSZJD1oc1zwp7MLEhEZ3goQeGM3a1b6jU=
github.com/gin-contrib/gzip v1.2.4/go.mod h1:aomRgR7ftdZV3uWY0gW/m8rChfxau0n8YVvwlOHONzw=
github.com/gin-contrib/sessions v1.0.2 h1:UaIjUvTH1cMeOdj3in6dl+Xb6It8RiKRF9Z1anbUyCA=
github.com/gin-contrib/sessions v1.0.2/go.mod h1:KxKxWqWP5LJVDCInulOl4WbLzK2KSPlLesfZ66wRvMs=
github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s=
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
github.com/gin-contrib/sse v1.1.0 h1:n0w2GMuUpWDVp7qSpvze6fAu9iRxJY4Hmj6AmBOU05w=
github.com/gin-contrib/sse v1.1.0/go.mod h1:hxRZ5gVpWMT7Z0B0gSNYqqsSCNIJMjzvm6fqCz9vjwM=
github.com/gin-contrib/static v0.0.0-20191128031702-f81c604d8ac2 h1:xLG16iua01X7Gzms9045s2Y2niNpvSY/Zb1oBwgNYZY=
github.com/gin-contrib/static v0.0.0-20191128031702-f81c604d8ac2/go.mod h1:VhW/Ch/3FhimwZb8Oj+qJmdMmoB8r7lmJ5auRjm50oQ=
github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM=
github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do=
github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M=
github.com/gin-gonic/gin v1.10.0 h1:nTuyha1TYqgedzytsKYqna+DfLos46nTv2ygFy86HFU=
github.com/gin-gonic/gin v1.10.0/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y=
github.com/gin-gonic/gin v1.11.0 h1:OW/6PLjyusp2PPXtyxKHU0RbX6I/l28FTdDlae5ueWk=
github.com/gin-gonic/gin v1.11.0/go.mod h1:+iq/FyxlGzII0KHiBGjuNn4UNENUlKbGlNmc+W50Dls=
github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
github.com/go-errors/errors v1.0.2/go.mod h1:psDX2osz5VnTOnFWbDeWwS7yejl+uV3FEWEp4lssFEs=
@ -326,10 +331,6 @@ github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgO
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-mail/mail v2.3.1+incompatible h1:UzNOn0k5lpfVtO31cK3hn6I4VEVGhe3lX8AJBAxXExM=
github.com/go-mail/mail v2.3.1+incompatible/go.mod h1:VPWjmmNyRsWXQZHVHT3g0YbIINUkSmuKOiLIDkWbL6M=
github.com/go-openapi/inflect v0.19.0 h1:9jCH9scKIbHeV9m12SmPilScz6krDxKRasNNSNPXu/4=
github.com/go-openapi/inflect v0.19.0/go.mod h1:lHpZVlpIQqLyKwJ4N+YSc9hchQy/i12fJykb83CRBH4=
github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
@ -345,10 +346,9 @@ github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+
github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA=
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI=
github.com/go-playground/validator/v10 v10.8.0/go.mod h1:9JhgTzTaE31GZDpH/HSvHiRJrJ3iKAgqqH0Bl/Ocjdk=
github.com/go-playground/validator/v10 v10.20.0 h1:K9ISHbSaI0lyB2eWMPJo+kOS/FBExVwjEviJTixqxL8=
github.com/go-playground/validator/v10 v10.20.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
github.com/go-playground/validator/v10 v10.28.0 h1:Q7ibns33JjyW48gHkuFT91qX48KG0ktULL6FgHdG688=
github.com/go-playground/validator/v10 v10.28.0/go.mod h1:GoI6I1SjPBh9p7ykNE/yj3fFYbyDOpwMn5KXd+m2hUU=
github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
@ -356,8 +356,6 @@ github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LB
github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68=
github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
github.com/go-webauthn/webauthn v0.11.2 h1:Fgx0/wlmkClTKlnOsdOQ+K5HcHDsDcYIvtYmfhEOSUc=
@ -366,8 +364,10 @@ github.com/go-webauthn/x v0.1.14 h1:1wrB8jzXAofojJPAaRxnZhRgagvLGnLjhCAwg3kTpT0=
github.com/go-webauthn/x v0.1.14/go.mod h1:UuVvFZ8/NbOnkDz3y1NaxtUN87pmtpC1PQ+/5BBQRdc=
github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b h1:khEcpUM4yFcxg4/FHQWkvVRmgijNXRfzkIDHh23ggEo=
github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM=
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4=
github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw=
github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA=
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw=
@ -420,11 +420,7 @@ github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/gomodule/redigo v1.9.2 h1:HrutZBLhSIU8abiSfW8pj8mPhOyMYjZT/wcA4/L9L9s=
github.com/gomodule/redigo v1.9.2/go.mod h1:KsU3hiK/Ay8U42qpaJk+kuNa3C+spxapWpM+ywhcgtw=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
@ -444,8 +440,8 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/go-github/v28 v28.1.1/go.mod h1:bsqJWQX05omyWVmc00nEUql9mhQyv38lDZ8kPZcQVoM=
github.com/google/go-licenses v0.0.0-20210329231322-ce1d9163b77d/go.mod h1:+TYOmkVoJOpwnS0wfdsJCV9CoD5nJYsHoFk/0CrTK4M=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
@ -528,11 +524,15 @@ github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoP
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/go-retryablehttp v0.6.4/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY=
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
@ -619,15 +619,13 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE=
github.com/kisom/goutils v1.4.3/go.mod h1:Lp5qrquG7yhYnWzZCI/68Pa/GpFynw//od6EkGnWpac=
github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg=
github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM=
github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M=
github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU=
github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
@ -642,11 +640,12 @@ github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/ks3sdklib/aws-sdk-go v1.6.2 h1:nxtaaU3hDD5x6gmoxs/qijSJqZrjFapYYuTiVCEgobA=
github.com/ks3sdklib/aws-sdk-go v1.6.2/go.mod h1:jGcsV0dJgMmStAyqjkKVUu6F167pAXYZAS3LqoZMmtM=
github.com/kylelemons/go-gypsy v1.0.0/go.mod h1:chkXM0zjdpXOiqkCW1XcCHDfjfk14PH2KKkQWxfJUcU=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw=
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
@ -668,19 +667,15 @@ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE=
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
@ -697,11 +692,15 @@ github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S
github.com/mattn/go-zglob v0.0.1/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
github.com/mholt/archiver/v4 v4.0.0-alpha.6 h1:3wvos9Kn1GpKNBz+MpozinGREPslLo1ds1W16vTkErQ=
github.com/mholt/archiver/v4 v4.0.0-alpha.6/go.mod h1:9PTygYq90FQBWPspdwAng6dNjYiBuTYKqmA6c15KuCo=
github.com/mholt/archives v0.1.3 h1:aEAaOtNra78G+TvV5ohmXrJOAzf++dIlYeDW3N9q458=
github.com/mholt/archives v0.1.3/go.mod h1:LUCGp++/IbV/I0Xq4SzcIR6uwgeh2yjnQWamjRQfLTU=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
github.com/mikelolasagasti/xz v1.0.1 h1:Q2F2jX0RYJUG3+WsM+FJknv+6eVjsjXNDV0KJXZzkD0=
github.com/mikelolasagasti/xz v1.0.1/go.mod h1:muAirjiOUxPRXwm9HdDtB3uoRPrGnL85XHtokL9Hcgc=
github.com/minio/minlz v1.0.0 h1:Kj7aJZ1//LlTP1DM8Jm7lNKvvJS2m74gyyXXn3+uJWQ=
github.com/minio/minlz v1.0.0/go.mod h1:qT0aEB35q79LLornSzeDH75LBf3aH1MV+jB5w9Wasec=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
@ -747,8 +746,8 @@ github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdh
github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
github.com/nishanths/predeclared v0.0.0-20200524104333-86fad755b4d3/go.mod h1:nt3d53pc1VYcphSCIaYAJtnPYnr3Zyn8fMq2wvPGPso=
github.com/nkovacs/streamquote v1.0.0/go.mod h1:BN+NaZ2CmdKqUuTUXUEm9j95B2TRbpOWpxbJYzzgUsc=
github.com/nwaples/rardecode/v2 v2.0.0-beta.2 h1:e3mzJFJs4k83GXBEiTaQ5HgSc/kOK8q0rDaRO0MPaOk=
github.com/nwaples/rardecode/v2 v2.0.0-beta.2/go.mod h1:yntwv/HfMc/Hbvtq9I19D1n58te3h6KsqCf3GxyfBGY=
github.com/nwaples/rardecode/v2 v2.1.0 h1:JQl9ZoBPDy+nIZGb1mx8+anfHp/LV3NE2MjMiv0ct/U=
github.com/nwaples/rardecode/v2 v2.1.0/go.mod h1:7uz379lSxPe6j9nvzxUZ+n7mnJNgjsRNb6IbvGVHRmw=
github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
@ -758,13 +757,9 @@ github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo/v2 v2.9.5 h1:+6Hr4uxzP4XIUyAkg61dWBw8lb/gc4/X5luuxN/EC+Q=
github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3RonqW57k=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE=
github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg=
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=
github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=
@ -784,13 +779,13 @@ github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FI
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pierrec/lz4/v4 v4.1.14 h1:+fL8AQEZtz/ijeNnpduH0bROTu0O3NZAlPjQxGn8LwE=
github.com/pierrec/lz4/v4 v4.1.14/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pion/dtls/v3 v3.0.1 h1:0kmoaPYLAo0md/VemjcrAXQiSf8U+tuU3nDYVNpEKaw=
github.com/pion/dtls/v3 v3.0.1/go.mod h1:dfIXcFkKoujDQ+jtd8M6RgqKK3DuaUilm3YatAbGp5k=
github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY=
@ -851,8 +846,8 @@ github.com/qiniu/go-sdk/v7 v7.19.0/go.mod h1:nqoYCNo53ZlGA521RvRethvxUDvXKt4gtYX
github.com/qiniu/x v1.10.5/go.mod h1:03Ni9tj+N2h2aKnAz+6N0Xfl8FwMEDRC2PAlxekASDs=
github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI=
github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg=
github.com/quic-go/quic-go v0.52.0 h1:/SlHrCRElyaU6MaEPKqKr9z83sBg2v4FLLvWM+Z47pA=
github.com/quic-go/quic-go v0.52.0/go.mod h1:MFlGGpcpJqRAfmYi6NC2cptDPSxRWTOGNuP4wqrWmzQ=
github.com/quic-go/quic-go v0.55.0 h1:zccPQIqYCXDt5NmcEabyYvOnomjs8Tlwl7tISjJh9Mk=
github.com/quic-go/quic-go v0.55.0/go.mod h1:DR51ilwU1uE164KuWXhinFcKWGlEjzys2l8zUl5Ss1U=
github.com/rafaeljusto/redigomock v0.0.0-20191117212112-00b2509252a1 h1:leEwA4MD1ew0lNgzz6Q4G76G3AEfeci+TMggN6WuFRs=
github.com/rafaeljusto/redigomock v0.0.0-20191117212112-00b2509252a1/go.mod h1:JaY6n2sDr+z2WTsXkOmNRUfDy6FN0L6Nk7x06ndm4tY=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
@ -892,7 +887,6 @@ github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/assertions v1.0.0 h1:UVQPSSmc3qtTi+zPPkCXvZX9VvW/xT/NsRvKfwY81a8=
@ -905,6 +899,8 @@ github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4k
github.com/soheilhy/cmux v0.1.5-0.20210205191134-5ec6847320e5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
github.com/sorairolake/lzip-go v0.3.5 h1:ms5Xri9o1JBIWvOFAorYtUNik6HI3HgBTkISiqu0Cwg=
github.com/sorairolake/lzip-go v0.3.5/go.mod h1:N0KYq5iWrMXI0ZEXKXaS9hCyOjZUQdBDEIbXfoUwbdk=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/speps/go-hashids v2.0.0+incompatible h1:kSfxGfESueJKTx0mpER9Y/1XHl+FVQjtCqRyYcviFbw=
github.com/speps/go-hashids v2.0.0+incompatible/go.mod h1:P7hqPzMdnZOfyIk+xrlG1QaSMw+gCBdHKsBDnhpaZvc=
@ -948,16 +944,13 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.563/go.mod h1:7sCQWVkxcsR38nffDW057DRGk8mUjK1Ing/EFOK8s8Y=
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/kms v1.0.563/go.mod h1:uom4Nvi9W+Qkom0exYiJ9VWJjXwyxtPYTkKkaLMlfE0=
github.com/tencentyun/cos-go-sdk-v5 v0.7.54 h1:FRamEhNBbSeggyYfWfzFejTLftgbICocSYFk4PKTSV4=
github.com/tencentyun/cos-go-sdk-v5 v0.7.54/go.mod h1:UN+VdbCl1hg+kKi5RXqZgaP+Boqfmk+D04GRc4XFk70=
github.com/therootcompany/xz v1.0.1 h1:CmOtsn1CbtmyYiusbfmhmkpAAETj0wBIH6kCYaX+xzw=
github.com/therootcompany/xz v1.0.1/go.mod h1:3K3UH1yCKgBneZYhuQUvJ9HPD19UEXEI0BWbMn8qNMY=
github.com/tj/assert v0.0.0-20171129193455-018094318fb0/go.mod h1:mZ9/Rh9oLWpLLDRpvE+3b7gP/C2YyLFYxNmcLnPTMe0=
github.com/tj/go-elastic v0.0.0-20171221160941-36157cbbebc2/go.mod h1:WjeM0Oo1eNAjXGDx2yma7uG2XoyRZTq1uv3M/o7imD0=
github.com/tj/go-kinesis v0.0.0-20171128231115-08b17f58cb1b/go.mod h1:/yhzCV0xPfx6jb1bBgRFjl5lytqVqZXEaeqWP8lTEao=
@ -975,12 +968,13 @@ github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGr
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE=
github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
github.com/ugorji/go/codec v1.3.0 h1:Qd2W2sQawAfG8XSvzwhBeoGq71zXOC/Q1E9y/wUcsUA=
github.com/ugorji/go/codec v1.3.0/go.mod h1:pRBVtBSKl77K30Bv8R2P+cLSGaTtex6fsA2Wjqmfxj4=
github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8=
github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8=
github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc=
github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/upyun/go-sdk v2.1.0+incompatible h1:OdjXghQ/TVetWV16Pz3C1/SUpjhGBVPr+cLiqZLLyq0=
github.com/upyun/go-sdk v2.1.0+incompatible/go.mod h1:eu3F5Uz4b9ZE5bE5QsCL6mgSNWRwfj0zpJ9J626HEqs=
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
@ -995,6 +989,8 @@ github.com/weppos/publicsuffix-go v0.13.1-0.20210123135404-5fd73613514e/go.mod h
github.com/weppos/publicsuffix-go v0.15.1-0.20210511084619-b1f36a2d6c0b/go.mod h1:HYux0V0Zi04bHNwOHy4cXJVz/TQjYonnF6aoYhj+3QE=
github.com/wlynxg/anet v0.0.3 h1:PvR53psxFXstc12jelG6f1Lv4MWqE0tI76/hHGjh9rg=
github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA=
github.com/wneessen/go-mail v0.7.1 h1:rvy63sp14N06/kdGqCYwW8Na5gDCXjTQM1E7So4PuKk=
github.com/wneessen/go-mail v0.7.1/go.mod h1:+TkW6QP3EVkgTEqHtVmnAE/1MRhmzb8Y9/W3pweuS+k=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/xanzy/go-gitlab v0.31.0/go.mod h1:sPLojNBn68fMUWSxIJtdVVIP8uSBYqesTfDUseX11Ug=
@ -1002,6 +998,8 @@ github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0B
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU=
github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
@ -1043,8 +1041,8 @@ go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU=
go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM=
go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y=
go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
@ -1053,12 +1051,12 @@ go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9E
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ=
go4.org v0.0.0-20200411211856-f5505b9728dd h1:BNJlw5kRTzdmyfh5U8F93HA2OwkP7ZGwA51eJ/0wKOU=
go4.org v0.0.0-20200411211856-f5505b9728dd/go.mod h1:CIiUVy99QCPfoE13bO4EZaz5GZMZXMSBGhxRdsvzbkg=
go4.org v0.0.0-20230225012048-214862532bf5 h1:nifaUDeh+rPaBCMPMQHZmvJf+QdpLFnuQPwx+LxVmtc=
go4.org v0.0.0-20230225012048-214862532bf5/go.mod h1:F57wTi5Lrj6WLyswp5EYV1ncrEbFGHD4hhz6S1ZYeaU=
gocloud.dev v0.19.0/go.mod h1:SmKwiR8YwIMMJvQBKLsC3fHNyMwXLw3PMDO+VVteJMI=
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/arch v0.8.0 h1:3wRIsP3pM4yUptoR96otTUOXI367OS0+c9eeRi9doIc=
golang.org/x/arch v0.8.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys=
golang.org/x/arch v0.22.0 h1:c/Zle32i5ttqRXjdLyyHZESLD/bB90DCU1g9l/0YBDI=
golang.org/x/arch v0.22.0/go.mod h1:dNHoOeKiyja7GTvF9NJS1l3Z2yntpQNzgrjh1cU103A=
golang.org/x/crypto v0.0.0-20180501155221-613d6eafa307/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
@ -1081,8 +1079,8 @@ golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf/go.mod h1:P+XmwS30IXTQdn5
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04=
golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@ -1099,8 +1097,8 @@ golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e/go.mod h1:akd2r19cwCdwSwWeId
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190501045829-6d32002ffd75/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/image v0.0.0-20211028202545-6944b10bf410 h1:hTftEOvwiOq2+O8k2D5/Q7COC7k5Qcrgc2TFURJYnvQ=
golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
golang.org/x/image v0.18.0 h1:jGzIakQa/ZXI1I0Fxvaa9W7yP25TqT6cHIHn+6CqvSQ=
golang.org/x/image v0.18.0/go.mod h1:4yyo5vMFQjVjUcVk4jEQcU9MGy/rulF5WvUILseCM2E=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@ -1125,8 +1123,8 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0=
golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -1181,8 +1179,9 @@ golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20221002022538-bcab6841153b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4=
golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -1211,8 +1210,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -1244,7 +1243,6 @@ golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -1287,12 +1285,13 @@ golang.org/x/sys v0.0.0-20220928140112-f11e5e49a4ec/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -1303,8 +1302,9 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k=
golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@ -1378,8 +1378,8 @@ golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4f
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24=
golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ=
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -1511,11 +1511,9 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba
google.golang.org/protobuf v1.25.1-0.20200805231151-a709e31e5d12/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc h1:2gGKlE2+asNV9m7xrywl36YYNnBG5ZQ0r/BOOxqPpmk=
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@ -1527,12 +1525,9 @@ gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=
gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y=
gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ=
gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno=
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/mail.v2 v2.3.1 h1:WYFn/oANrAGP2C0dcV6/pbkPzv8yGzqTjPmTeO7qoXk=
gopkg.in/mail.v2 v2.3.1/go.mod h1:htwXN1Qh09vZJ1NVKxQqHPBaCBbzKhp5GzuJEA4VJWw=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/src-d/go-billy.v4 v4.3.2/go.mod h1:nDjArDMp+XMs1aFAESLRjfGSgfvoYN0hDfzEk0GjC98=
gopkg.in/src-d/go-git-fixtures.v3 v3.5.0/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g=
@ -1589,10 +1584,8 @@ modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA=
modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0=
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50=
pack.ag/amqp v0.11.2/go.mod h1:4/cbmt4EJXSKlG6LCfWHoqmN0uFdy5i/+YFz+fTfhV4=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=

@ -57,13 +57,22 @@ type (
UserID int
Name string
StoragePolicyID int
HasMetadata string
Shared bool
HasDirectLink bool
}
MetadataFilter struct {
Key string
Value string
Exact bool
}
SearchFileParameters struct {
Name []string
// NameOperatorOr is true if the name should match any of the given names, false if all of them
NameOperatorOr bool
Metadata map[string]string
Metadata []MetadataFilter
Type *types.FileType
UseFullText bool
CaseFolding bool
@ -121,6 +130,7 @@ type (
Size int64
UploadSessionID uuid.UUID
Importing bool
EncryptMetadata *types.EncryptMetadata
}
RelocateEntityParameter struct {
@ -179,7 +189,7 @@ type FileClient interface {
// Copy copies a layer of file to its corresponding destination folder. dstMap is a map from src parent ID to dst parent Files.
Copy(ctx context.Context, files []*ent.File, dstMap map[int][]*ent.File) (map[int][]*ent.File, StorageDiff, error)
// Delete deletes a group of files (and related models) with given entity recycle option
Delete(ctx context.Context, files []*ent.File, options *types.EntityRecycleOption) ([]*ent.Entity, StorageDiff, error)
Delete(ctx context.Context, files []*ent.File, options *types.EntityProps) ([]*ent.Entity, StorageDiff, error)
// StaleEntities returns stale entities of a given file. If ID is not provided, all entities
// will be examined.
StaleEntities(ctx context.Context, ids ...int) ([]*ent.Entity, error)
@ -211,6 +221,8 @@ type FileClient interface {
ListEntities(ctx context.Context, args *ListEntityParameters) (*ListEntityResult, error)
// UpdateProps updates props of a file
UpdateProps(ctx context.Context, file *ent.File, props *types.FileProps) (*ent.File, error)
// UpdateModifiedAt updates modified at of a file
UpdateModifiedAt(ctx context.Context, file *ent.File, modifiedAt time.Time) error
}
func NewFileClient(client *ent.Client, dbType conf.DBType, hasher hashid.Encoder) FileClient {
@ -458,7 +470,7 @@ func (f *fileClient) DeleteByUser(ctx context.Context, uid int) error {
return nil
}
func (f *fileClient) Delete(ctx context.Context, files []*ent.File, options *types.EntityRecycleOption) ([]*ent.Entity, StorageDiff, error) {
func (f *fileClient) Delete(ctx context.Context, files []*ent.File, options *types.EntityProps) ([]*ent.Entity, StorageDiff, error) {
// 1. Decrease reference count for all entities;
// entities stores the relation between its reference count in `files` and entity ID.
entities := make(map[int]int)
@ -514,7 +526,7 @@ func (f *fileClient) Delete(ctx context.Context, files []*ent.File, options *typ
for _, chunk := range chunks {
if err := f.client.Entity.Update().
Where(entity.IDIn(chunk...)).
SetRecycleOptions(options).
SetProps(options).
Exec(ctx); err != nil {
return nil, nil, fmt.Errorf("failed to update recycle options for entities %v: %w", chunk, err)
}
@ -640,6 +652,10 @@ func (f *fileClient) Copy(ctx context.Context, files []*ent.File, dstMap map[int
return newDstMap, map[int]int64{dstMap[files[0].FileChildren][0].OwnerID: sizeDiff}, nil
}
func (f *fileClient) UpdateModifiedAt(ctx context.Context, file *ent.File, modifiedAt time.Time) error {
return f.client.File.UpdateOne(file).SetUpdatedAt(modifiedAt).Exec(ctx)
}
func (f *fileClient) UpsertMetadata(ctx context.Context, file *ent.File, data map[string]string, privateMask map[string]bool) error {
// Validate value length
for key, value := range data {
@ -712,10 +728,15 @@ func (f *fileClient) UpgradePlaceholder(ctx context.Context, file *ent.File, mod
}
if entityType == types.EntityTypeVersion {
if err := f.client.File.UpdateOne(file).
stm := f.client.File.UpdateOne(file).
SetSize(placeholder.Size).
SetPrimaryEntity(placeholder.ID).
Exec(ctx); err != nil {
SetPrimaryEntity(placeholder.ID)
if modifiedAt != nil {
stm.SetUpdatedAt(*modifiedAt)
}
if err := stm.Exec(ctx); err != nil {
return fmt.Errorf("failed to upgrade file primary entity: %v", err)
}
}
@ -864,6 +885,17 @@ func (f *fileClient) RemoveStaleEntities(ctx context.Context, file *ent.File) (S
func (f *fileClient) CreateEntity(ctx context.Context, file *ent.File, args *EntityParameters) (*ent.Entity, StorageDiff, error) {
createdBy := UserFromContext(ctx)
var opt *types.EntityProps
if args.EncryptMetadata != nil {
opt = &types.EntityProps{
EncryptMetadata: &types.EncryptMetadata{
Algorithm: args.EncryptMetadata.Algorithm,
Key: args.EncryptMetadata.Key,
IV: args.EncryptMetadata.IV,
},
}
}
stm := f.client.Entity.
Create().
SetType(int(args.EntityType)).
@ -871,6 +903,10 @@ func (f *fileClient) CreateEntity(ctx context.Context, file *ent.File, args *Ent
SetSize(args.Size).
SetStoragePolicyID(args.StoragePolicyID)
if opt != nil {
stm.SetProps(opt)
}
if createdBy != nil && !IsAnonymousUser(createdBy) {
stm.SetUser(createdBy)
}
@ -890,7 +926,7 @@ func (f *fileClient) CreateEntity(ctx context.Context, file *ent.File, args *Ent
diff := map[int]int64{file.OwnerID: created.Size}
if err := f.client.File.UpdateOne(file).AddEntities(created).Exec(ctx); err != nil {
if err := f.client.Entity.UpdateOne(created).AddFile(file).Exec(ctx); err != nil {
return nil, diff, fmt.Errorf("failed to add file entity: %v", err)
}
@ -1081,6 +1117,18 @@ func (f *fileClient) FlattenListFiles(ctx context.Context, args *FlattenListFile
query = query.Where(file.NameContainsFold(args.Name))
}
if args.HasMetadata != "" {
query = query.Where(file.HasMetadataWith(metadata.Name(args.HasMetadata)))
}
if args.Shared {
query = query.Where(file.HasSharesWith(share.DeletedAtIsNil()))
}
if args.HasDirectLink {
query = query.Where(file.HasDirectLinksWith(directlink.DeletedAtIsNil()))
}
query.Order(getFileOrderOption(&ListFileParameters{
PaginationArgs: args.PaginationArgs,
})...)

@ -16,6 +16,10 @@ import (
"github.com/samber/lo"
)
const (
metadataExactMatchPrefix = "!exact:"
)
func (f *fileClient) searchQuery(q *ent.FileQuery, args *SearchFileParameters, parents []*ent.File, ownerId int) *ent.FileQuery {
if len(parents) == 1 && parents[0] == nil {
q = q.Where(file.OwnerID(ownerId))
@ -69,17 +73,22 @@ func (f *fileClient) searchQuery(q *ent.FileQuery, args *SearchFileParameters, p
}
if len(args.Metadata) > 0 {
metaPredicates := lo.MapToSlice(args.Metadata, func(name string, value string) predicate.Metadata {
nameEq := metadata.NameEQ(value)
if name == "" {
metaPredicates := lo.Map(args.Metadata, func(item MetadataFilter, index int) predicate.Metadata {
if item.Exact {
return metadata.And(metadata.NameEQ(item.Key), metadata.ValueEQ(item.Value))
}
nameEq := metadata.And(metadata.IsPublic(true), metadata.NameEQ(item.Key))
if item.Value == "" {
return nameEq
} else {
valueContain := metadata.ValueContainsFold(value)
return metadata.And(metadata.NameEQ(name), valueContain)
valueContain := metadata.ValueContainsFold(item.Value)
return metadata.And(nameEq, valueContain)
}
})
metaPredicates = append(metaPredicates, metadata.IsPublic(true))
q.Where(file.HasMetadataWith(metadata.And(metaPredicates...)))
q.Where(file.And(lo.Map(metaPredicates, func(item predicate.Metadata, index int) predicate.File {
return file.HasMetadataWith(item)
})...))
}
if args.SizeLte > 0 || args.SizeGte > 0 {

@ -279,6 +279,53 @@ type (
)
var patches = []Patch{
{
Name: "apply_default_archive_viewer",
EndVersion: "4.7.0",
Func: func(l logging.Logger, client *ent.Client, ctx context.Context) error {
fileViewersSetting, err := client.Setting.Query().Where(setting.Name("file_viewers")).First(ctx)
if err != nil {
return fmt.Errorf("failed to query file_viewers setting: %w", err)
}
var fileViewers []types.ViewerGroup
if err := json.Unmarshal([]byte(fileViewersSetting.Value), &fileViewers); err != nil {
return fmt.Errorf("failed to unmarshal file_viewers setting: %w", err)
}
fileViewerExisted := false
for _, viewer := range fileViewers[0].Viewers {
if viewer.ID == "archive" {
fileViewerExisted = true
break
}
}
// 2.2 If not existed, add it
if !fileViewerExisted {
// Found existing archive viewer default setting
var defaultArchiveViewer types.Viewer
for _, viewer := range defaultFileViewers[0].Viewers {
if viewer.ID == "archive" {
defaultArchiveViewer = viewer
break
}
}
fileViewers[0].Viewers = append(fileViewers[0].Viewers, defaultArchiveViewer)
newFileViewersSetting, err := json.Marshal(fileViewers)
if err != nil {
return fmt.Errorf("failed to marshal file_viewers setting: %w", err)
}
if _, err := client.Setting.UpdateOne(fileViewersSetting).SetValue(string(newFileViewersSetting)).Save(ctx); err != nil {
return fmt.Errorf("failed to update file_viewers setting: %w", err)
}
}
return nil
},
},
{
Name: "apply_default_excalidraw_viewer",
EndVersion: "4.1.0",
@ -367,6 +414,86 @@ var patches = []Patch{
}
}
return nil
},
},
{
Name: "apply_email_title_magic_var",
EndVersion: "4.7.0",
Func: func(l logging.Logger, client *ent.Client, ctx context.Context) error {
// 1. Activate Template
mailActivationTemplateSetting, err := client.Setting.Query().Where(setting.Name("mail_activation_template")).First(ctx)
if err != nil {
return fmt.Errorf("failed to query mail_activation_template setting: %w", err)
}
var mailActivationTemplate []struct {
Title string `json:"title"`
Body string `json:"body"`
Language string `json:"language"`
}
if err := json.Unmarshal([]byte(mailActivationTemplateSetting.Value), &mailActivationTemplate); err != nil {
return fmt.Errorf("failed to unmarshal mail_activation_template setting: %w", err)
}
for i, t := range mailActivationTemplate {
mailActivationTemplate[i].Title = fmt.Sprintf("[{{ .CommonContext.SiteBasic.Name }}] %s", t.Title)
}
newMailActivationTemplate, err := json.Marshal(mailActivationTemplate)
if err != nil {
return fmt.Errorf("failed to marshal mail_activation_template setting: %w", err)
}
if _, err := client.Setting.UpdateOne(mailActivationTemplateSetting).SetValue(string(newMailActivationTemplate)).Save(ctx); err != nil {
return fmt.Errorf("failed to update mail_activation_template setting: %w", err)
}
// 2. Reset Password Template
mailResetTemplateSetting, err := client.Setting.Query().Where(setting.Name("mail_reset_template")).First(ctx)
if err != nil {
return fmt.Errorf("failed to query mail_reset_template setting: %w", err)
}
var mailResetTemplate []struct {
Title string `json:"title"`
Body string `json:"body"`
Language string `json:"language"`
}
if err := json.Unmarshal([]byte(mailResetTemplateSetting.Value), &mailResetTemplate); err != nil {
return fmt.Errorf("failed to unmarshal mail_reset_template setting: %w", err)
}
for i, t := range mailResetTemplate {
mailResetTemplate[i].Title = fmt.Sprintf("[{{ .CommonContext.SiteBasic.Name }}] %s", t.Title)
}
newMailResetTemplate, err := json.Marshal(mailResetTemplate)
if err != nil {
return fmt.Errorf("failed to marshal mail_reset_template setting: %w", err)
}
if _, err := client.Setting.UpdateOne(mailResetTemplateSetting).SetValue(string(newMailResetTemplate)).Save(ctx); err != nil {
return fmt.Errorf("failed to update mail_reset_template setting: %w", err)
}
return nil
},
},
{
Name: "apply_thumb_path_magic_var",
EndVersion: "4.10.0",
Func: func(l logging.Logger, client *ent.Client, ctx context.Context) error {
thumbSuffixSetting, err := client.Setting.Query().Where(setting.Name("thumb_entity_suffix")).First(ctx)
if err != nil {
return fmt.Errorf("failed to query thumb_entity_suffix setting: %w", err)
}
newThumbSuffix := fmt.Sprintf("{blob_path}/{blob_name}%s", thumbSuffixSetting.Value)
if _, err := client.Setting.UpdateOne(thumbSuffixSetting).SetValue(newThumbSuffix).Save(ctx); err != nil {
return fmt.Errorf("failed to update thumb_entity_suffix setting: %w", err)
}
return nil
},
},

File diff suppressed because one or more lines are too long

@ -3,6 +3,7 @@ package inventory
import (
"context"
"fmt"
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
)
@ -60,6 +61,22 @@ func WithTx[T TxOperator](ctx context.Context, c T) (T, *Tx, context.Context, er
return c.SetClient(txClient).(T), txWrapper, ctx, nil
}
// InheritTx wraps the given inventory client with a transaction.
// If the transaction is already in the context, it will be inherited.
// Otherwise, original client will be returned.
func InheritTx[T TxOperator](ctx context.Context, c T) (T, *Tx) {
var txClient *ent.Client
var txWrapper *Tx
if txInherited, ok := ctx.Value(TxCtx{}).(*Tx); ok && !txInherited.finished {
txWrapper = &Tx{inherited: true, tx: txInherited.tx, parent: txInherited}
txClient = txWrapper.tx.Client()
return c.SetClient(txClient).(T), txWrapper
}
return c, nil
}
func Rollback(tx *Tx) error {
if !tx.inherited {
tx.finished = true

@ -7,17 +7,20 @@ import (
// UserSetting 用户其他配置
type (
UserSetting struct {
ProfileOff bool `json:"profile_off,omitempty"`
PreferredTheme string `json:"preferred_theme,omitempty"`
VersionRetention bool `json:"version_retention,omitempty"`
VersionRetentionExt []string `json:"version_retention_ext,omitempty"`
VersionRetentionMax int `json:"version_retention_max,omitempty"`
Pined []PinedFile `json:"pined,omitempty"`
Language string `json:"email_language,omitempty"`
DisableViewSync bool `json:"disable_view_sync,omitempty"`
FsViewMap map[string]ExplorerView `json:"fs_view_map,omitempty"`
ProfileOff bool `json:"profile_off,omitempty"`
PreferredTheme string `json:"preferred_theme,omitempty"`
VersionRetention bool `json:"version_retention,omitempty"`
VersionRetentionExt []string `json:"version_retention_ext,omitempty"`
VersionRetentionMax int `json:"version_retention_max,omitempty"`
Pined []PinedFile `json:"pined,omitempty"`
Language string `json:"email_language,omitempty"`
DisableViewSync bool `json:"disable_view_sync,omitempty"`
FsViewMap map[string]ExplorerView `json:"fs_view_map,omitempty"`
ShareLinksInProfile ShareLinksInProfileLevel `json:"share_links_in_profile,omitempty"`
}
ShareLinksInProfileLevel string
PinedFile struct {
Uri string `json:"uri"`
Name string `json:"name,omitempty"`
@ -41,6 +44,12 @@ type (
Token string `json:"token"`
// 允许的文件扩展名
FileType []string `json:"file_type"`
// IsFileTypeDenyList Whether above list is a deny list.
IsFileTypeDenyList bool `json:"is_file_type_deny_list,omitempty"`
// FileRegexp 文件扩展名正则表达式
NameRegexp string `json:"file_regexp,omitempty"`
// IsNameRegexp Whether above regexp is a deny list.
IsNameRegexpDenyList bool `json:"is_name_regexp_deny_list,omitempty"`
// OauthRedirect Oauth 重定向地址
OauthRedirect string `json:"od_redirect,omitempty"`
// CustomProxy whether to use custom-proxy to get file content
@ -92,6 +101,10 @@ type (
SourceAuth bool `json:"source_auth,omitempty"`
// QiniuUploadCdn whether to use CDN for Qiniu upload.
QiniuUploadCdn bool `json:"qiniu_upload_cdn,omitempty"`
// ChunkConcurrency the number of chunks to upload concurrently.
ChunkConcurrency int `json:"chunk_concurrency,omitempty"`
// Whether to enable file encryption.
Encryption bool `json:"encryption,omitempty"`
}
FileType int
@ -143,8 +156,18 @@ type (
MasterSiteVersion string `json:"master_site_version,omitempty"`
}
EntityRecycleOption struct {
UnlinkOnly bool `json:"unlink_only,omitempty"`
EntityProps struct {
UnlinkOnly bool `json:"unlink_only,omitempty"`
EncryptMetadata *EncryptMetadata `json:"encrypt_metadata,omitempty"`
}
Cipher string
EncryptMetadata struct {
Algorithm Cipher `json:"algorithm"`
Key []byte `json:"key"`
KeyPlainText []byte `json:"key_plain_text,omitempty"`
IV []byte `json:"iv"`
}
DavAccountProps struct {
@ -173,7 +196,8 @@ type (
}
ColumTypeProps struct {
MetadataKey string `json:"metadata_key,omitempty" binding:"max=255"`
MetadataKey string `json:"metadata_key,omitempty" binding:"max=255"`
CustomPropsID string `json:"custom_props_id,omitempty" binding:"max=255"`
}
ShareProps struct {
@ -245,6 +269,7 @@ func FileTypeFromString(s string) FileType {
const (
DavAccountReadOnly DavAccountOption = iota
DavAccountProxy
DavAccountDisableSysFiles
)
const (
@ -254,6 +279,7 @@ const (
PolicyTypeOss = "oss"
PolicyTypeCos = "cos"
PolicyTypeS3 = "s3"
PolicyTypeKs3 = "ks3"
PolicyTypeOd = "onedrive"
PolicyTypeRemote = "remote"
PolicyTypeObs = "obs"
@ -278,26 +304,62 @@ const (
ViewerTypeCustom = "custom"
)
type Viewer struct {
ID string `json:"id"`
Type ViewerType `json:"type"`
DisplayName string `json:"display_name"`
Exts []string `json:"exts"`
Url string `json:"url,omitempty"`
Icon string `json:"icon,omitempty"`
WopiActions map[string]map[ViewerAction]string `json:"wopi_actions,omitempty"`
Props map[string]string `json:"props,omitempty"`
MaxSize int64 `json:"max_size,omitempty"`
Disabled bool `json:"disabled,omitempty"`
Templates []NewFileTemplate `json:"templates,omitempty"`
Platform string `json:"platform,omitempty"`
}
type (
Viewer struct {
ID string `json:"id"`
Type ViewerType `json:"type"`
DisplayName string `json:"display_name"`
Exts []string `json:"exts"`
Url string `json:"url,omitempty"`
Icon string `json:"icon,omitempty"`
WopiActions map[string]map[ViewerAction]string `json:"wopi_actions,omitempty"`
Props map[string]string `json:"props,omitempty"`
MaxSize int64 `json:"max_size,omitempty"`
Disabled bool `json:"disabled,omitempty"`
Templates []NewFileTemplate `json:"templates,omitempty"`
Platform string `json:"platform,omitempty"`
RequiredGroupPermission []GroupPermission `json:"required_group_permission,omitempty"`
}
ViewerGroup struct {
Viewers []Viewer `json:"viewers"`
}
type ViewerGroup struct {
Viewers []Viewer `json:"viewers"`
}
NewFileTemplate struct {
Ext string `json:"ext"`
DisplayName string `json:"display_name"`
}
)
type NewFileTemplate struct {
Ext string `json:"ext"`
DisplayName string `json:"display_name"`
}
type (
CustomPropsType string
CustomProps struct {
ID string `json:"id"`
Name string `json:"name"`
Type CustomPropsType `json:"type"`
Max int `json:"max,omitempty"`
Min int `json:"min,omitempty"`
Default string `json:"default,omitempty"`
Options []string `json:"options,omitempty"`
Icon string `json:"icon,omitempty"`
}
)
const (
CustomPropsTypeText = "text"
CustomPropsTypeNumber = "number"
CustomPropsTypeBoolean = "boolean"
CustomPropsTypeSelect = "select"
CustomPropsTypeMultiSelect = "multi_select"
CustomPropsTypeLink = "link"
CustomPropsTypeRating = "rating"
)
const (
ProfilePublicShareOnly = ShareLinksInProfileLevel("")
ProfileAllShare = ShareLinksInProfileLevel("all_share")
ProfileHideShare = ShareLinksInProfileLevel("hide_share")
)
const (
CipherAES256CTR Cipher = "aes-256-ctr"
)

@ -220,8 +220,29 @@ func (c *userClient) Delete(ctx context.Context, uid int) error {
func (c *userClient) ApplyStorageDiff(ctx context.Context, diffs StorageDiff) error {
ae := serializer.NewAggregateError()
for uid, diff := range diffs {
if err := c.client.User.Update().Where(user.ID(uid)).AddStorage(diff).Exec(ctx); err != nil {
ae.Add(fmt.Sprintf("%d", uid), fmt.Errorf("failed to apply storage diff for user %d: %w", uid, err))
// Retry logic for MySQL deadlock (Error 1213)
// This is a temporary workaround. TODO: optimize storage mutation
maxRetries := 3
var lastErr error
for attempt := 0; attempt < maxRetries; attempt++ {
if err := c.client.User.Update().Where(user.ID(uid)).AddStorage(diff).Exec(ctx); err != nil {
lastErr = err
// Check if it's a MySQL deadlock error (Error 1213)
if strings.Contains(err.Error(), "Error 1213") && attempt < maxRetries-1 {
// Wait a bit before retrying with exponential backoff
time.Sleep(time.Duration(attempt+1) * 10 * time.Millisecond)
continue
}
ae.Add(fmt.Sprintf("%d", uid), fmt.Errorf("failed to apply storage diff for user %d: %w", uid, err))
break
}
// Success, break out of retry loop
lastErr = nil
break
}
if lastErr != nil {
ae.Add(fmt.Sprintf("%d", uid), fmt.Errorf("failed to apply storage diff for user %d: %w", uid, lastErr))
}
}

@ -1,8 +1,10 @@
//go:debug rsa1024min=0
package main
import (
_ "embed"
"flag"
"github.com/cloudreve/Cloudreve/v4/cmd"
"github.com/cloudreve/Cloudreve/v4/pkg/util"
)

@ -3,6 +3,10 @@ package middleware
import (
"context"
"fmt"
"net/http"
"strings"
"time"
"github.com/cloudreve/Cloudreve/v4/application/constants"
"github.com/cloudreve/Cloudreve/v4/application/dependency"
"github.com/cloudreve/Cloudreve/v4/pkg/auth/requestinfo"
@ -14,8 +18,6 @@ import (
"github.com/cloudreve/Cloudreve/v4/pkg/util"
"github.com/gin-gonic/gin"
"github.com/gofrs/uuid"
"net/http"
"time"
)
// HashID 将给定对象的HashID转换为真实ID
@ -92,8 +94,13 @@ func MobileRequestOnly() gin.HandlerFunc {
// 2. Generate and inject correlation ID for diagnostic.
func InitializeHandling(dep dependency.Dep) gin.HandlerFunc {
return func(c *gin.Context) {
clientIp := c.ClientIP()
if idx := strings.Index(clientIp, ","); idx > 0 {
clientIp = clientIp[:idx]
}
reqInfo := &requestinfo.RequestInfo{
IP: c.ClientIP(),
IP: clientIp,
Host: c.Request.Host,
UserAgent: c.Request.UserAgent(),
}

@ -1,9 +1,10 @@
package cache
import (
"github.com/stretchr/testify/assert"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestNewMemoStore(t *testing.T) {

@ -180,9 +180,9 @@ func SlaveFileContentUrl(base *url.URL, srcPath, name string, download bool, spe
return base
}
func SlaveMediaMetaRoute(src, ext string) string {
func SlaveMediaMetaRoute(src, ext, language string) string {
src = url.PathEscape(base64.URLEncoding.EncodeToString([]byte(src)))
return fmt.Sprintf("file/meta/%s/%s", src, url.PathEscape(ext))
return fmt.Sprintf("file/meta/%s/%s?language=%s", src, url.PathEscape(ext), language)
}
func SlaveFileListRoute(srcPath string, recursive bool) string {

@ -46,7 +46,7 @@ type System struct {
SessionSecret string
HashIDSalt string // deprecated
GracePeriod int `validate:"gte=0"`
ProxyHeader string `validate:"required_with=Listen"`
ProxyHeader string
LogLevel string `validate:"oneof=debug info warning error"`
}
@ -114,7 +114,7 @@ var SystemConfig = &System{
Debug: false,
Mode: MasterMode,
Listen: ":5212",
ProxyHeader: "X-Forwarded-For",
ProxyHeader: "",
LogLevel: "info",
}

@ -32,18 +32,18 @@ const (
)
var (
supportDownloadOptions = map[string]bool{
"cookie": true,
"skip_checking": true,
"root_folder": true,
"rename": true,
"upLimit": true,
"dlLimit": true,
"ratioLimit": true,
"seedingTimeLimit": true,
"autoTMM": true,
"sequentialDownload": true,
"firstLastPiecePrio": true,
downloadOptionFormatTypes = map[string]string{
"cookie": "%s",
"skip_checking": "%s",
"root_folder": "%s",
"rename": "%s",
"upLimit": "%.0f",
"dlLimit": "%.0f",
"ratioLimit": "%f",
"seedingTimeLimit": "%.0f",
"autoTMM": "%t",
"sequentialDownload": "%s",
"firstLastPiecePrio": "%t",
}
)
@ -271,15 +271,15 @@ func (c *qbittorrentClient) CreateTask(ctx context.Context, url string, options
// Apply global options
for k, v := range c.options.Options {
if _, ok := supportDownloadOptions[k]; ok {
_ = formWriter.WriteField(k, fmt.Sprintf("%s", v))
if _, ok := downloadOptionFormatTypes[k]; ok {
_ = formWriter.WriteField(k, fmt.Sprintf(downloadOptionFormatTypes[k], v))
}
}
// Apply group options
for k, v := range options {
if _, ok := supportDownloadOptions[k]; ok {
_ = formWriter.WriteField(k, fmt.Sprintf("%s", v))
if _, ok := downloadOptionFormatTypes[k]; ok {
_ = formWriter.WriteField(k, fmt.Sprintf(downloadOptionFormatTypes[k], v))
}
}

@ -2,6 +2,7 @@ package email
import (
"context"
"errors"
"fmt"
"strings"
"time"
@ -9,8 +10,7 @@ import (
"github.com/cloudreve/Cloudreve/v4/inventory"
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
"github.com/go-mail/mail"
"github.com/gofrs/uuid"
"github.com/wneessen/go-mail"
)
// SMTPPool SMTP协议发送邮件
@ -38,9 +38,11 @@ type SMTPConfig struct {
}
type message struct {
msg *mail.Message
cid string
userID int
msg *mail.Msg
to string
subject string
cid string
userID int
}
// NewSMTPPool initializes a new SMTP based email sending queue.
@ -81,17 +83,21 @@ func (client *SMTPPool) Send(ctx context.Context, to, title, body string) error
return nil
}
m := mail.NewMessage()
m.SetAddressHeader("From", client.config.From, client.config.FromName)
m.SetAddressHeader("Reply-To", client.config.ReplyTo, client.config.FromName)
m.SetHeader("To", to)
m.SetHeader("Subject", title)
m.SetHeader("Message-ID", fmt.Sprintf("<%s@%s>", uuid.Must(uuid.NewV4()).String(), "cloudreve"))
m.SetBody("text/html", body)
m := mail.NewMsg()
if err := m.FromFormat(client.config.FromName, client.config.From); err != nil {
return err
}
m.ReplyToFormat(client.config.FromName, client.config.ReplyTo)
m.To(to)
m.Subject(title)
m.SetMessageID()
m.SetBodyString(mail.TypeTextHTML, body)
client.ch <- &message{
msg: m,
cid: logging.CorrelationID(ctx).String(),
userID: inventory.UserIDFromContext(ctx),
msg: m,
subject: title,
to: to,
cid: logging.CorrelationID(ctx).String(),
userID: inventory.UserIDFromContext(ctx),
}
return nil
}
@ -116,17 +122,24 @@ func (client *SMTPPool) Init() {
}
}()
d := mail.NewDialer(client.config.Host, client.config.Port, client.config.User, client.config.Password)
d.Timeout = time.Duration(client.config.Keepalive+5) * time.Second
client.chOpen = true
// 是否启用 SSL
d.SSL = false
opts := []mail.Option{
mail.WithPort(client.config.Port),
mail.WithTimeout(time.Duration(client.config.Keepalive+5) * time.Second),
mail.WithSMTPAuth(mail.SMTPAuthAutoDiscover), mail.WithTLSPortPolicy(mail.TLSOpportunistic),
mail.WithUsername(client.config.User), mail.WithPassword(client.config.Password),
}
if client.config.ForceEncryption {
d.SSL = true
opts = append(opts, mail.WithSSL())
}
d, diaErr := mail.NewClient(client.config.Host, opts...)
if diaErr != nil {
client.l.Panic("Failed to create SMTP client: %s", diaErr)
return
}
d.StartTLSPolicy = mail.OpportunisticStartTLS
var s mail.SendCloser
client.chOpen = true
var err error
open := false
for {
@ -139,22 +152,32 @@ func (client *SMTPPool) Init() {
}
if !open {
if s, err = d.Dial(); err != nil {
if err = d.DialWithContext(context.Background()); err != nil {
panic(err)
}
open = true
}
l := client.l.CopyWithPrefix(fmt.Sprintf("[Cid: %s]", m.cid))
if err := mail.Send(s, m.msg); err != nil {
if err := d.Send(m.msg); err != nil {
// Check if this is an SMTP RESET error after successful delivery
var sendErr *mail.SendError
var errParsed = errors.As(err, &sendErr)
if errParsed && sendErr.Reason == mail.ErrSMTPReset {
open = false
l.Debug("SMTP RESET error, closing connection...")
// https://github.com/wneessen/go-mail/issues/463
continue // Don't treat this as a delivery failure since mail was sent
}
l.Warning("Failed to send email: %s, Cid=%s", err, m.cid)
} else {
l.Info("Email sent to %q, title: %q.", m.msg.GetHeader("To"), m.msg.GetHeader("Subject"))
l.Info("Email sent to %q, title: %q.", m.to, m.subject)
}
// 长时间没有新邮件则关闭SMTP连接
case <-time.After(time.Duration(client.config.Keepalive) * time.Second):
if open {
if err := s.Close(); err != nil {
if err := d.Close(); err != nil {
client.l.Warning("Failed to close SMTP connection: %s", err)
}
open = false

@ -38,18 +38,29 @@ func NewResetEmail(ctx context.Context, settings setting.Provider, user *ent.Use
Url: url,
}
tmpl, err := template.New("reset").Parse(selected.Body)
tmplTitle, err := template.New("resetTitle").Parse(selected.Title)
if err != nil {
return "", "", fmt.Errorf("failed to parse email title: %w", err)
}
var resTitle strings.Builder
err = tmplTitle.Execute(&resTitle, resetCtx)
if err != nil {
return "", "", fmt.Errorf("failed to execute email title: %w", err)
}
tmplBody, err := template.New("resetBody").Parse(selected.Body)
if err != nil {
return "", "", fmt.Errorf("failed to parse email template: %w", err)
}
var res strings.Builder
err = tmpl.Execute(&res, resetCtx)
var resBody strings.Builder
err = tmplBody.Execute(&resBody, resetCtx)
if err != nil {
return "", "", fmt.Errorf("failed to execute email template: %w", err)
}
return fmt.Sprintf("[%s] %s", resetCtx.SiteBasic.Name, selected.Title), res.String(), nil
return resTitle.String(), resBody.String(), nil
}
// ActivationContext used for variables in activation email
@ -73,18 +84,29 @@ func NewActivationEmail(ctx context.Context, settings setting.Provider, user *en
Url: url,
}
tmpl, err := template.New("activation").Parse(selected.Body)
tmplTitle, err := template.New("activationTitle").Parse(selected.Title)
if err != nil {
return "", "", fmt.Errorf("failed to parse email title: %w", err)
}
var resTitle strings.Builder
err = tmplTitle.Execute(&resTitle, activationCtx)
if err != nil {
return "", "", fmt.Errorf("failed to execute email title: %w", err)
}
tmplBody, err := template.New("activationBody").Parse(selected.Body)
if err != nil {
return "", "", fmt.Errorf("failed to parse email template: %w", err)
}
var res strings.Builder
err = tmpl.Execute(&res, activationCtx)
var resBody strings.Builder
err = tmplBody.Execute(&resBody, activationCtx)
if err != nil {
return "", "", fmt.Errorf("failed to execute email template: %w", err)
}
return fmt.Sprintf("[%s] %s", activationCtx.SiteBasic.Name, selected.Title), res.String(), nil
return resTitle.String(), resBody.String(), nil
}
func commonContext(ctx context.Context, settings setting.Provider) *CommonContext {
@ -122,4 +144,4 @@ func selectTemplate(templates []setting.EmailTemplate, u *ent.User) setting.Emai
}
return selected
}
}

@ -244,7 +244,7 @@ func (handler *Driver) Put(ctx context.Context, file *fs.UploadRequest) error {
mimeType := file.Props.MimeType
if mimeType == "" {
handler.mime.TypeByName(file.Props.Uri.Name())
mimeType = handler.mime.TypeByName(file.Props.Uri.Name())
}
// 是否允许覆盖
@ -352,6 +352,14 @@ func (handler Driver) Thumb(ctx context.Context, expire *time.Time, ext string,
w, h := handler.settings.ThumbSize(ctx)
thumbParam := fmt.Sprintf("imageMogr2/thumbnail/%dx%d", w, h)
enco := handler.settings.ThumbEncode(ctx)
switch enco.Format {
case "jpg", "webp":
thumbParam += fmt.Sprintf("/format/%s/rquality/%d", enco.Format, enco.Quality)
case "png":
thumbParam += fmt.Sprintf("/format/%s", enco.Format)
}
source, err := handler.signSourceURL(
ctx,
e.Source(),
@ -374,7 +382,12 @@ func (handler Driver) Thumb(ctx context.Context, expire *time.Time, ext string,
func (handler Driver) Source(ctx context.Context, e fs.Entity, args *driver.GetSourceArgs) (string, error) {
// 添加各项设置
options := urlOption{}
if args.Speed > 0 {
// Byte 转换为 bit
args.Speed *= 8
// COS对速度值有范围限制
if args.Speed < 819200 {
args.Speed = 819200
}
@ -383,6 +396,7 @@ func (handler Driver) Source(ctx context.Context, e fs.Entity, args *driver.GetS
}
options.Speed = args.Speed
}
if args.IsDownload {
encodedFilename := url.PathEscape(args.DisplayName)
options.ContentDescription = fmt.Sprintf(`attachment; filename="%s"; filename*=UTF-8''%s`,
@ -441,7 +455,7 @@ func (handler Driver) Token(ctx context.Context, uploadSession *fs.UploadSession
mimeType := file.Props.MimeType
if mimeType == "" {
handler.mime.TypeByName(file.Props.Uri.Name())
mimeType = handler.mime.TypeByName(file.Props.Uri.Name())
}
// 初始化分片上传
@ -580,7 +594,7 @@ func (handler Driver) Meta(ctx context.Context, path string) (*MetaData, error)
}, nil
}
func (handler *Driver) MediaMeta(ctx context.Context, path, ext string) ([]driver.MediaMeta, error) {
func (handler *Driver) MediaMeta(ctx context.Context, path, ext, language string) ([]driver.MediaMeta, error) {
if util.ContainsString(supportedImageExt, ext) {
return handler.extractImageMeta(ctx, path)
}

@ -83,7 +83,7 @@ type (
Capabilities() *Capabilities
// MediaMeta extracts media metadata from the given file.
MediaMeta(ctx context.Context, path, ext string) ([]MediaMeta, error)
MediaMeta(ctx context.Context, path, ext, language string) ([]MediaMeta, error)
}
Capabilities struct {
@ -117,6 +117,7 @@ const (
MetaTypeExif MetaType = "exif"
MediaTypeMusic MetaType = "music"
MetaTypeStreamMedia MetaType = "stream"
MetaTypeGeocoding MetaType = "geocoding"
)
type ForceUsePublicEndpointCtx struct{}

@ -0,0 +1,592 @@
package ks3
import (
"context"
"errors"
"fmt"
"github.com/aws/aws-sdk-go/aws/request"
"io"
"net/url"
"os"
"path"
"path/filepath"
"strings"
"time"
"strconv"
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
"github.com/cloudreve/Cloudreve/v4/pkg/cluster/routes"
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/chunk"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/chunk/backoff"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/mime"
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
"github.com/ks3sdklib/aws-sdk-go/aws/awserr"
"github.com/ks3sdklib/aws-sdk-go/service/s3/s3manager"
"github.com/samber/lo"
"github.com/ks3sdklib/aws-sdk-go/aws"
"github.com/ks3sdklib/aws-sdk-go/aws/credentials"
"github.com/ks3sdklib/aws-sdk-go/service/s3"
)
// Driver KS3 compatible driver
type Driver struct {
policy *ent.StoragePolicy
chunkSize int64
settings setting.Provider
l logging.Logger
config conf.ConfigProvider
mime mime.MimeDetector
sess *aws.Config
svc *s3.S3
}
// UploadPolicy KS3上传策略
type UploadPolicy struct {
Expiration string `json:"expiration"`
Conditions []interface{} `json:"conditions"`
}
type Session struct {
Config *aws.Config
Handlers request.Handlers
}
// MetaData 文件信息
type MetaData struct {
Size int64
Etag string
}
var (
features = &boolset.BooleanSet{}
)
func init() {
boolset.Sets(map[driver.HandlerCapability]bool{
driver.HandlerCapabilityUploadSentinelRequired: true,
}, features)
}
func Int64(v int64) *int64 {
return &v
}
func New(ctx context.Context, policy *ent.StoragePolicy, settings setting.Provider,
config conf.ConfigProvider, l logging.Logger, mime mime.MimeDetector) (*Driver, error) {
chunkSize := policy.Settings.ChunkSize
if policy.Settings.ChunkSize == 0 {
chunkSize = 25 << 20 // 25 MB
}
driver := &Driver{
policy: policy,
settings: settings,
chunkSize: chunkSize,
config: config,
l: l,
mime: mime,
}
sess := aws.Config{
Credentials: credentials.NewStaticCredentials(policy.AccessKey, policy.SecretKey, ""),
Endpoint: policy.Server,
Region: policy.Settings.Region,
S3ForcePathStyle: policy.Settings.S3ForcePathStyle,
}
driver.sess = &sess
driver.svc = s3.New(&sess)
return driver, nil
}
// List 列出给定路径下的文件
func (handler *Driver) List(ctx context.Context, base string, onProgress driver.ListProgressFunc, recursive bool) ([]fs.PhysicalObject, error) {
// 初始化列目录参数
base = strings.TrimPrefix(base, "/")
if base != "" {
base += "/"
}
opt := &s3.ListObjectsInput{
Bucket: &handler.policy.BucketName,
Prefix: &base,
MaxKeys: Int64(1000),
}
// 是否为递归列出
if !recursive {
opt.Delimiter = aws.String("/")
}
var (
objects []*s3.Object
commons []*s3.CommonPrefix
)
for {
res, err := handler.svc.ListObjectsWithContext(ctx, opt)
if err != nil {
return nil, err
}
objects = append(objects, res.Contents...)
commons = append(commons, res.CommonPrefixes...)
// 如果本次未列取完则继续使用marker获取结果
if *res.IsTruncated {
opt.Marker = res.NextMarker
} else {
break
}
}
// 处理列取结果
res := make([]fs.PhysicalObject, 0, len(objects)+len(commons))
// 处理目录
for _, object := range commons {
rel, err := filepath.Rel(*opt.Prefix, *object.Prefix)
if err != nil {
continue
}
res = append(res, fs.PhysicalObject{
Name: path.Base(*object.Prefix),
RelativePath: filepath.ToSlash(rel),
Size: 0,
IsDir: true,
LastModify: time.Now(),
})
}
onProgress(len(commons))
// 处理文件
for _, object := range objects {
rel, err := filepath.Rel(*opt.Prefix, *object.Key)
if err != nil {
continue
}
res = append(res, fs.PhysicalObject{
Name: path.Base(*object.Key),
Source: *object.Key,
RelativePath: filepath.ToSlash(rel),
Size: *object.Size,
IsDir: false,
LastModify: time.Now(),
})
}
onProgress(len(objects))
return res, nil
}
// Open 打开文件
func (handler *Driver) Open(ctx context.Context, path string) (*os.File, error) {
return nil, errors.New("not implemented")
}
// Put 将文件流保存到指定目录
func (handler *Driver) Put(ctx context.Context, file *fs.UploadRequest) error {
defer file.Close()
// 是否允许覆盖
overwrite := file.Mode&fs.ModeOverwrite == fs.ModeOverwrite
if !overwrite {
// Check for duplicated file
if _, err := handler.Meta(ctx, file.Props.SavePath); err == nil {
return fs.ErrFileExisted
}
}
// 初始化配置
uploader := s3manager.NewUploader(&s3manager.UploadOptions{
S3: handler.svc, // S3Client实例必填
PartSize: handler.chunkSize, // 分块大小默认为5MB非必填
})
mimeType := file.Props.MimeType
if mimeType == "" {
mimeType = handler.mime.TypeByName(file.Props.Uri.Name())
}
_, err := uploader.UploadWithContext(ctx, &s3manager.UploadInput{
Bucket: &handler.policy.BucketName,
Key: &file.Props.SavePath,
Body: io.LimitReader(file, file.Props.Size),
ContentType: aws.String(mimeType),
})
if err != nil {
return err
}
return nil
}
// Delete 删除文件
func (handler *Driver) Delete(ctx context.Context, files ...string) ([]string, error) {
failed := make([]string, 0, len(files))
batchSize := handler.policy.Settings.S3DeleteBatchSize
if batchSize == 0 {
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html
// The request can contain a list of up to 1000 keys that you want to delete.
batchSize = 1000
}
var lastErr error
groups := lo.Chunk(files, batchSize)
for _, group := range groups {
if len(group) == 1 {
// Invoke single file delete API
_, err := handler.svc.DeleteObjectWithContext(ctx, &s3.DeleteObjectInput{
Bucket: &handler.policy.BucketName,
Key: &group[0],
})
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
// Ignore NoSuchKey error
if aerr.Code() == s3.ErrCodeNoSuchKey {
continue
}
}
failed = append(failed, group[0])
lastErr = err
}
} else {
// Invoke batch delete API
res, err := handler.svc.DeleteObjects(
&s3.DeleteObjectsInput{
Bucket: &handler.policy.BucketName,
Delete: &s3.Delete{
Objects: lo.Map(group, func(s string, i int) *s3.ObjectIdentifier {
return &s3.ObjectIdentifier{Key: &s}
}),
},
})
if err != nil {
failed = append(failed, group...)
lastErr = err
continue
}
for _, v := range res.Errors {
handler.l.Debug("Failed to delete file: %s, Code:%s, Message:%s", v.Key, v.Code, v.Key)
failed = append(failed, *v.Key)
}
}
}
return failed, lastErr
}
// Thumb 获取缩略图URL
func (handler *Driver) Thumb(ctx context.Context, expire *time.Time, ext string, e fs.Entity) (string, error) {
w, h := handler.settings.ThumbSize(ctx)
thumbParam := fmt.Sprintf("@base@tag=imgScale&m=0&w=%d&h=%d", w, h)
enco := handler.settings.ThumbEncode(ctx)
switch enco.Format {
case "jpg", "webp":
thumbParam += fmt.Sprintf("&q=%d&F=%s", enco.Quality, enco.Format)
case "png":
thumbParam += fmt.Sprintf("&F=%s", enco.Format)
}
// 确保过期时间不小于 0 ,如果小于则设置为 7 天
var ttl int64
if expire != nil {
ttl = int64(time.Until(*expire).Seconds())
} else {
ttl = 604800
}
thumbUrl, err := handler.svc.GeneratePresignedUrl(&s3.GeneratePresignedUrlInput{
HTTPMethod: s3.GET, // 请求方法
Bucket: &handler.policy.BucketName, // 存储空间名称
Key: aws.String(e.Source() + thumbParam), // 对象的key
Expires: ttl, // 过期时间,转换为秒数
})
if err != nil {
return "", err
}
// 将最终生成的签名URL域名换成用户自定义的加速域名如果有
finalThumbURL, err := url.Parse(thumbUrl)
if err != nil {
return "", err
}
// 公有空间替换掉Key及不支持的头
if !handler.policy.IsPrivate {
finalThumbURL.RawQuery = ""
}
return finalThumbURL.String(), nil
}
// Source 获取文件外链
func (handler *Driver) Source(ctx context.Context, e fs.Entity, args *driver.GetSourceArgs) (string, error) {
var contentDescription *string
if args.IsDownload {
encodedFilename := url.PathEscape(args.DisplayName)
contentDescription = aws.String(fmt.Sprintf(`attachment; filename="%s"`, encodedFilename))
}
// 确保过期时间不小于 0 ,如果小于则设置为 7 天
var ttl int64
if args.Expire != nil {
ttl = int64(time.Until(*args.Expire).Seconds())
} else {
ttl = 604800
}
downloadUrl, err := handler.svc.GeneratePresignedUrl(&s3.GeneratePresignedUrlInput{
HTTPMethod: s3.GET, // 请求方法
Bucket: &handler.policy.BucketName, // 存储空间名称
Key: aws.String(e.Source()), // 对象的key
Expires: ttl, // 过期时间,转换为秒数
ResponseContentDisposition: contentDescription, // 设置响应头部 Content-Disposition
})
if err != nil {
return "", err
}
// 将最终生成的签名URL域名换成用户自定义的加速域名如果有
finalURL, err := url.Parse(downloadUrl)
if err != nil {
return "", err
}
// 公有空间替换掉Key及不支持的头
if !handler.policy.IsPrivate {
finalURL.RawQuery = ""
}
return finalURL.String(), nil
}
// Token 获取上传凭证
func (handler *Driver) Token(ctx context.Context, uploadSession *fs.UploadSession, file *fs.UploadRequest) (*fs.UploadCredential, error) {
// Check for duplicated file
if _, err := handler.Meta(ctx, file.Props.SavePath); err == nil {
return nil, fs.ErrFileExisted
}
// 生成回调地址
siteURL := handler.settings.SiteURL(setting.UseFirstSiteUrl(ctx))
// 在从机端创建上传会话
uploadSession.ChunkSize = handler.chunkSize
uploadSession.Callback = routes.MasterSlaveCallbackUrl(siteURL, types.PolicyTypeKs3, uploadSession.Props.UploadSessionID, uploadSession.CallbackSecret).String()
mimeType := file.Props.MimeType
if mimeType == "" {
mimeType = handler.mime.TypeByName(file.Props.Uri.Name())
}
// 创建分片上传
res, err := handler.svc.CreateMultipartUploadWithContext(ctx, &s3.CreateMultipartUploadInput{
Bucket: &handler.policy.BucketName,
Key: &uploadSession.Props.SavePath,
Expires: &uploadSession.Props.ExpireAt,
ContentType: aws.String(mimeType),
})
if err != nil {
return nil, fmt.Errorf("failed to create multipart upload: %w", err)
}
uploadSession.UploadID = *res.UploadID
// 为每个分片签名上传 URL
chunks := chunk.NewChunkGroup(file, handler.chunkSize, &backoff.ConstantBackoff{}, false, handler.l, "")
urls := make([]string, chunks.Num())
for chunks.Next() {
err := chunks.Process(func(c *chunk.ChunkGroup, chunk io.Reader) error {
// 计算过期时间(秒)
expireSeconds := int(time.Until(uploadSession.Props.ExpireAt).Seconds())
partNumber := c.Index() + 1
// 生成预签名URL
signedURL, err := handler.svc.GeneratePresignedUrl(&s3.GeneratePresignedUrlInput{
HTTPMethod: s3.PUT,
Bucket: &handler.policy.BucketName,
Key: &uploadSession.Props.SavePath,
Expires: int64(expireSeconds),
Parameters: map[string]*string{
"partNumber": aws.String(strconv.Itoa(partNumber)),
"uploadId": res.UploadID,
},
ContentType: aws.String("application/octet-stream"),
})
if err != nil {
return fmt.Errorf("failed to generate presigned upload url for chunk %d: %w", partNumber, err)
}
urls[c.Index()] = signedURL
return nil
})
if err != nil {
return nil, err
}
}
// 签名完成分片上传的请求URL
expireSeconds := int(time.Until(uploadSession.Props.ExpireAt).Seconds())
signedURL, err := handler.svc.GeneratePresignedUrl(&s3.GeneratePresignedUrlInput{
HTTPMethod: s3.POST,
Bucket: &handler.policy.BucketName,
Key: &file.Props.SavePath,
Expires: int64(expireSeconds),
Parameters: map[string]*string{
"uploadId": res.UploadID,
},
ContentType: aws.String("application/octet-stream"),
})
if err != nil {
return nil, err
}
// 生成上传凭证
return &fs.UploadCredential{
UploadID: *res.UploadID,
UploadURLs: urls,
CompleteURL: signedURL,
SessionID: uploadSession.Props.UploadSessionID,
ChunkSize: handler.chunkSize,
}, nil
}
// CancelToken 取消上传凭证
func (handler *Driver) CancelToken(ctx context.Context, uploadSession *fs.UploadSession) error {
_, err := handler.svc.AbortMultipartUploadWithContext(ctx, &s3.AbortMultipartUploadInput{
UploadID: &uploadSession.UploadID,
Bucket: &handler.policy.BucketName,
Key: &uploadSession.Props.SavePath,
})
return err
}
// cancelUpload 取消分片上传
func (handler *Driver) cancelUpload(key, id *string) {
if _, err := handler.svc.AbortMultipartUpload(&s3.AbortMultipartUploadInput{
Bucket: &handler.policy.BucketName,
UploadID: id,
Key: key,
}); err != nil {
handler.l.Warning("failed to abort multipart upload: %s", err)
}
}
// Capabilities 获取存储能力
func (handler *Driver) Capabilities() *driver.Capabilities {
return &driver.Capabilities{
StaticFeatures: features,
MediaMetaProxy: handler.policy.Settings.MediaMetaGeneratorProxy,
ThumbProxy: handler.policy.Settings.ThumbGeneratorProxy,
MaxSourceExpire: time.Duration(604800) * time.Second,
}
}
// MediaMeta 获取媒体元信息
func (handler *Driver) MediaMeta(ctx context.Context, path, ext, language string) ([]driver.MediaMeta, error) {
return nil, errors.New("not implemented")
}
// LocalPath 获取本地路径
func (handler *Driver) LocalPath(ctx context.Context, path string) string {
return ""
}
// CompleteUpload 完成上传
func (handler *Driver) CompleteUpload(ctx context.Context, session *fs.UploadSession) error {
if session.SentinelTaskID == 0 {
return nil
}
// Make sure uploaded file size is correct
res, err := handler.Meta(ctx, session.Props.SavePath)
if err != nil {
return fmt.Errorf("failed to get uploaded file size: %w", err)
}
if res.Size != session.Props.Size {
return serializer.NewError(
serializer.CodeMetaMismatch,
fmt.Sprintf("File size not match, expected: %d, actual: %d", session.Props.Size, res.Size),
nil,
)
}
return nil
}
// Meta 获取文件元信息
func (handler *Driver) Meta(ctx context.Context, path string) (*MetaData, error) {
res, err := handler.svc.HeadObjectWithContext(ctx,
&s3.HeadObjectInput{
Bucket: &handler.policy.BucketName,
Key: &path,
})
if err != nil {
return nil, err
}
return &MetaData{
Size: *res.ContentLength,
Etag: *res.ETag,
}, nil
}
// CORS 设置CORS规则
func (handler *Driver) CORS() error {
rule := s3.CORSRule{
AllowedMethod: []string{
"GET",
"POST",
"PUT",
"DELETE",
"HEAD",
},
AllowedOrigin: []string{"*"},
AllowedHeader: []string{"*"},
ExposeHeader: []string{"ETag"},
MaxAgeSeconds: 3600,
}
_, err := handler.svc.PutBucketCORS(&s3.PutBucketCORSInput{
Bucket: &handler.policy.BucketName,
CORSConfiguration: &s3.CORSConfiguration{
Rules: []*s3.CORSRule{&rule},
},
})
return err
}
// Reader 读取器
type Reader struct {
r io.Reader
}
// Read 读取数据
func (r Reader) Read(p []byte) (int, error) {
return r.r.Read(p)
}

@ -1,13 +1,14 @@
package local
import (
"os"
"time"
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
"github.com/cloudreve/Cloudreve/v4/pkg/util"
"github.com/gofrs/uuid"
"os"
"time"
)
// NewLocalFileEntity creates a new local file entity.
@ -73,3 +74,11 @@ func (l *localFileEntity) UploadSessionID() *uuid.UUID {
func (l *localFileEntity) Model() *ent.Entity {
return nil
}
func (l *localFileEntity) Props() *types.EntityProps {
return nil
}
func (l *localFileEntity) Encrypted() bool {
return false
}

@ -140,9 +140,9 @@ func (handler *Driver) Put(ctx context.Context, file *fs.UploadRequest) error {
}
openMode := os.O_CREATE | os.O_RDWR
if file.Mode&fs.ModeOverwrite == fs.ModeOverwrite && file.Offset == 0 {
openMode |= os.O_TRUNC
}
// if file.Mode&fs.ModeOverwrite == fs.ModeOverwrite && file.Offset == 0 {
// openMode |= os.O_TRUNC
// }
out, err := os.OpenFile(dst, openMode, Perm)
if err != nil {
@ -298,6 +298,6 @@ func (handler *Driver) Capabilities() *driver.Capabilities {
return capabilities
}
func (handler *Driver) MediaMeta(ctx context.Context, path, ext string) ([]driver.MediaMeta, error) {
func (handler *Driver) MediaMeta(ctx context.Context, path, ext, language string) ([]driver.MediaMeta, error) {
return nil, errors.New("not implemented")
}

@ -17,7 +17,7 @@ import (
"github.com/samber/lo"
)
func (d *Driver) MediaMeta(ctx context.Context, path, ext string) ([]driver.MediaMeta, error) {
func (d *Driver) MediaMeta(ctx context.Context, path, ext, language string) ([]driver.MediaMeta, error) {
thumbURL, err := d.signSourceURL(&obs.CreateSignedUrlInput{
Method: obs.HttpMethodGet,
Bucket: d.policy.BucketName,

@ -335,13 +335,23 @@ func (d *Driver) LocalPath(ctx context.Context, path string) string {
func (d *Driver) Thumb(ctx context.Context, expire *time.Time, ext string, e fs.Entity) (string, error) {
w, h := d.settings.ThumbSize(ctx)
thumbParam := fmt.Sprintf("image/resize,m_lfit,w_%d,h_%d", w, h)
enco := d.settings.ThumbEncode(ctx)
switch enco.Format {
case "jpg", "webp":
thumbParam += fmt.Sprintf("/format,%s/quality,q_%d", enco.Format, enco.Quality)
case "png":
thumbParam += fmt.Sprintf("/format,%s", enco.Format)
}
thumbURL, err := d.signSourceURL(&obs.CreateSignedUrlInput{
Method: obs.HttpMethodGet,
Bucket: d.policy.BucketName,
Key: e.Source(),
Expires: int(time.Until(*expire).Seconds()),
QueryParams: map[string]string{
imageProcessHeader: fmt.Sprintf("image/resize,m_lfit,w_%d,h_%d", w, h),
imageProcessHeader: thumbParam,
},
})

@ -241,7 +241,7 @@ func (handler *Driver) Capabilities() *driver.Capabilities {
}
}
func (handler *Driver) MediaMeta(ctx context.Context, path, ext string) ([]driver.MediaMeta, error) {
func (handler *Driver) MediaMeta(ctx context.Context, path, ext, language string) ([]driver.MediaMeta, error) {
return nil, errors.New("not implemented")
}

@ -10,12 +10,13 @@ import (
"encoding/pem"
"errors"
"fmt"
"github.com/cloudreve/Cloudreve/v4/pkg/cache"
"github.com/cloudreve/Cloudreve/v4/pkg/request"
"io"
"net/http"
"net/url"
"strings"
"github.com/cloudreve/Cloudreve/v4/pkg/cache"
"github.com/cloudreve/Cloudreve/v4/pkg/request"
)
const (

@ -5,16 +5,17 @@ import (
"encoding/json"
"encoding/xml"
"fmt"
"github.com/aliyun/aliyun-oss-go-sdk/oss"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
"github.com/cloudreve/Cloudreve/v4/pkg/mediameta"
"github.com/cloudreve/Cloudreve/v4/pkg/request"
"github.com/samber/lo"
"math"
"net/http"
"strconv"
"strings"
"time"
"github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
"github.com/cloudreve/Cloudreve/v4/pkg/mediameta"
"github.com/cloudreve/Cloudreve/v4/pkg/request"
"github.com/samber/lo"
)
const (
@ -265,13 +266,14 @@ func (handler *Driver) extractImageMeta(ctx context.Context, path string) ([]dri
// extractMediaInfo Sends API calls to OSS IMM service to extract media info.
func (handler *Driver) extractMediaInfo(ctx context.Context, path string, category string, forceSign bool) (string, error) {
mediaOption := []oss.Option{oss.Process(category)}
mediaInfoExpire := time.Now().Add(mediaInfoTTL)
thumbURL, err := handler.signSourceURL(
ctx,
path,
&mediaInfoExpire,
mediaOption,
&oss.GetObjectRequest{
Process: oss.Ptr(category),
},
forceSign,
)
if err != nil {

@ -15,7 +15,8 @@ import (
"strings"
"time"
"github.com/aliyun/aliyun-oss-go-sdk/oss"
"github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss"
"github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials"
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
@ -52,7 +53,6 @@ type Driver struct {
policy *ent.StoragePolicy
client *oss.Client
bucket *oss.Bucket
settings setting.Provider
l logging.Logger
config conf.ConfigProvider
@ -65,12 +65,12 @@ type Driver struct {
type key int
const (
chunkRetrySleep = time.Duration(5) * time.Second
uploadIdParam = "uploadId"
partNumberParam = "partNumber"
callbackParam = "callback"
completeAllHeader = "x-oss-complete-all"
maxDeleteBatch = 1000
chunkRetrySleep = time.Duration(5) * time.Second
maxDeleteBatch = 1000
maxSignTTL = time.Duration(24) * time.Hour * 7
completeAllHeader = "x-oss-complete-all"
forbidOverwriteHeader = "x-oss-forbid-overwrite"
trafficLimitHeader = "x-oss-traffic-limit"
// MultiPartUploadThreshold 服务端使用分片上传的阈值
MultiPartUploadThreshold int64 = 5 * (1 << 30) // 5GB
@ -102,21 +102,27 @@ func New(ctx context.Context, policy *ent.StoragePolicy, settings setting.Provid
// CORS 创建跨域策略
func (handler *Driver) CORS() error {
return handler.client.SetBucketCORS(handler.policy.BucketName, []oss.CORSRule{
{
AllowedOrigin: []string{"*"},
AllowedMethod: []string{
"GET",
"POST",
"PUT",
"DELETE",
"HEAD",
_, err := handler.client.PutBucketCors(context.Background(), &oss.PutBucketCorsRequest{
Bucket: &handler.policy.BucketName,
CORSConfiguration: &oss.CORSConfiguration{
CORSRules: []oss.CORSRule{
{
AllowedOrigins: []string{"*"},
AllowedMethods: []string{
"GET",
"POST",
"PUT",
"DELETE",
"HEAD",
},
ExposeHeaders: []string{},
AllowedHeaders: []string{"*"},
MaxAgeSeconds: oss.Ptr(int64(3600)),
},
},
ExposeHeader: []string{},
AllowedHeader: []string{"*"},
MaxAgeSeconds: 3600,
},
})
}})
return err
}
// InitOSSClient 初始化OSS鉴权客户端
@ -125,34 +131,28 @@ func (handler *Driver) InitOSSClient(forceUsePublicEndpoint bool) error {
return errors.New("empty policy")
}
opt := make([]oss.ClientOption, 0)
// 决定是否使用内网 Endpoint
endpoint := handler.policy.Server
useCname := false
if handler.policy.Settings.ServerSideEndpoint != "" && !forceUsePublicEndpoint {
endpoint = handler.policy.Settings.ServerSideEndpoint
} else if handler.policy.Settings.UseCname {
opt = append(opt, oss.UseCname(true))
useCname = true
}
if !strings.HasPrefix(endpoint, "http://") && !strings.HasPrefix(endpoint, "https://") {
endpoint = "https://" + endpoint
}
cfg := oss.LoadDefaultConfig().
WithCredentialsProvider(credentials.NewStaticCredentialsProvider(handler.policy.AccessKey, handler.policy.SecretKey, "")).
WithEndpoint(endpoint).
WithRegion(handler.policy.Settings.Region).
WithUseCName(useCname)
// 初始化客户端
client, err := oss.New(endpoint, handler.policy.AccessKey, handler.policy.SecretKey, opt...)
if err != nil {
return err
}
client := oss.NewClient(cfg)
handler.client = client
// 初始化存储桶
bucket, err := client.Bucket(handler.policy.BucketName)
if err != nil {
return err
}
handler.bucket = bucket
return nil
}
@ -166,38 +166,40 @@ func (handler *Driver) List(ctx context.Context, base string, onProgress driver.
var (
delimiter string
marker string
objects []oss.ObjectProperties
commons []string
commons []oss.CommonPrefix
)
if !recursive {
delimiter = "/"
}
for {
subRes, err := handler.bucket.ListObjects(oss.Marker(marker), oss.Prefix(base),
oss.MaxKeys(1000), oss.Delimiter(delimiter))
p := handler.client.NewListObjectsPaginator(&oss.ListObjectsRequest{
Bucket: &handler.policy.BucketName,
Prefix: &base,
MaxKeys: 1000,
Delimiter: &delimiter,
})
for p.HasNext() {
page, err := p.NextPage(ctx)
if err != nil {
return nil, err
}
objects = append(objects, subRes.Objects...)
commons = append(commons, subRes.CommonPrefixes...)
marker = subRes.NextMarker
if marker == "" {
break
}
objects = append(objects, page.Contents...)
commons = append(commons, page.CommonPrefixes...)
}
// 处理列取结果
res := make([]fs.PhysicalObject, 0, len(objects)+len(commons))
// 处理目录
for _, object := range commons {
rel, err := filepath.Rel(base, object)
rel, err := filepath.Rel(base, *object.Prefix)
if err != nil {
continue
}
res = append(res, fs.PhysicalObject{
Name: path.Base(object),
Name: path.Base(*object.Prefix),
RelativePath: filepath.ToSlash(rel),
Size: 0,
IsDir: true,
@ -208,17 +210,17 @@ func (handler *Driver) List(ctx context.Context, base string, onProgress driver.
// 处理文件
for _, object := range objects {
rel, err := filepath.Rel(base, object.Key)
rel, err := filepath.Rel(base, *object.Key)
if err != nil {
continue
}
res = append(res, fs.PhysicalObject{
Name: path.Base(object.Key),
Source: object.Key,
Name: path.Base(*object.Key),
Source: *object.Key,
RelativePath: filepath.ToSlash(rel),
Size: object.Size,
IsDir: false,
LastModify: object.LastModified,
LastModify: *object.LastModified,
})
}
onProgress(len(res))
@ -240,30 +242,39 @@ func (handler *Driver) Put(ctx context.Context, file *fs.UploadRequest) error {
mimeType := file.Props.MimeType
if mimeType == "" {
handler.mime.TypeByName(file.Props.Uri.Name())
mimeType = handler.mime.TypeByName(file.Props.Uri.Name())
}
// 是否允许覆盖
overwrite := file.Mode&fs.ModeOverwrite == fs.ModeOverwrite
options := []oss.Option{
oss.WithContext(ctx),
oss.Expires(time.Now().Add(credentialTTL * time.Second)),
oss.ForbidOverWrite(!overwrite),
oss.ContentType(mimeType),
}
forbidOverwrite := oss.Ptr(strconv.FormatBool(!overwrite))
exipires := oss.Ptr(time.Now().Add(credentialTTL * time.Second).Format(time.RFC3339))
// 小文件直接上传
if file.Props.Size < MultiPartUploadThreshold {
return handler.bucket.PutObject(file.Props.SavePath, file, options...)
_, err := handler.client.PutObject(ctx, &oss.PutObjectRequest{
Bucket: &handler.policy.BucketName,
Key: &file.Props.SavePath,
Body: file,
ForbidOverwrite: forbidOverwrite,
ContentType: oss.Ptr(mimeType),
})
return err
}
// 超过阈值时使用分片上传
imur, err := handler.bucket.InitiateMultipartUpload(file.Props.SavePath, options...)
imur, err := handler.client.InitiateMultipartUpload(ctx, &oss.InitiateMultipartUploadRequest{
Bucket: &handler.policy.BucketName,
Key: &file.Props.SavePath,
ContentType: oss.Ptr(mimeType),
ForbidOverwrite: forbidOverwrite,
Expires: exipires,
})
if err != nil {
return fmt.Errorf("failed to initiate multipart upload: %w", err)
}
parts := make([]oss.UploadPart, 0)
parts := make([]*oss.UploadPartResult, 0)
chunks := chunk.NewChunkGroup(file, handler.chunkSize, &backoff.ConstantBackoff{
Max: handler.settings.ChunkRetryLimit(ctx),
@ -271,7 +282,13 @@ func (handler *Driver) Put(ctx context.Context, file *fs.UploadRequest) error {
}, handler.settings.UseChunkBuffer(ctx), handler.l, handler.settings.TempPath(ctx))
uploadFunc := func(current *chunk.ChunkGroup, content io.Reader) error {
part, err := handler.bucket.UploadPart(imur, content, current.Length(), current.Index()+1, oss.WithContext(ctx))
part, err := handler.client.UploadPart(ctx, &oss.UploadPartRequest{
Bucket: &handler.policy.BucketName,
Key: &file.Props.SavePath,
UploadId: imur.UploadId,
PartNumber: int32(current.Index() + 1),
Body: content,
})
if err == nil {
parts = append(parts, part)
}
@ -280,14 +297,27 @@ func (handler *Driver) Put(ctx context.Context, file *fs.UploadRequest) error {
for chunks.Next() {
if err := chunks.Process(uploadFunc); err != nil {
handler.cancelUpload(imur)
handler.cancelUpload(*imur)
return fmt.Errorf("failed to upload chunk #%d: %w", chunks.Index(), err)
}
}
_, err = handler.bucket.CompleteMultipartUpload(imur, parts, oss.ForbidOverWrite(!overwrite), oss.WithContext(ctx))
_, err = handler.client.CompleteMultipartUpload(ctx, &oss.CompleteMultipartUploadRequest{
Bucket: &handler.policy.BucketName,
Key: imur.Key,
UploadId: imur.UploadId,
CompleteMultipartUpload: &oss.CompleteMultipartUpload{
Parts: lo.Map(parts, func(part *oss.UploadPartResult, i int) oss.UploadPart {
return oss.UploadPart{
PartNumber: int32(i + 1),
ETag: part.ETag,
}
}),
},
ForbidOverwrite: oss.Ptr(strconv.FormatBool(!overwrite)),
})
if err != nil {
handler.cancelUpload(imur)
handler.cancelUpload(*imur)
}
return err
@ -302,7 +332,12 @@ func (handler *Driver) Delete(ctx context.Context, files ...string) ([]string, e
for index, group := range groups {
handler.l.Debug("Process delete group #%d: %v", index, group)
// 删除文件
delRes, err := handler.bucket.DeleteObjects(group)
delRes, err := handler.client.DeleteMultipleObjects(ctx, &oss.DeleteMultipleObjectsRequest{
Bucket: &handler.policy.BucketName,
Objects: lo.Map(group, func(v string, i int) oss.DeleteObject {
return oss.DeleteObject{Key: &v}
}),
})
if err != nil {
failed = append(failed, group...)
lastError = err
@ -310,7 +345,14 @@ func (handler *Driver) Delete(ctx context.Context, files ...string) ([]string, e
}
// 统计未删除的文件
failed = append(failed, util.SliceDifference(files, delRes.DeletedObjects)...)
failed = append(
failed,
util.SliceDifference(files,
lo.Map(delRes.DeletedObjects, func(v oss.DeletedInfo, i int) string {
return *v.Key
}),
)...,
)
}
if len(failed) > 0 && lastError == nil {
@ -334,12 +376,23 @@ func (handler *Driver) Thumb(ctx context.Context, expire *time.Time, ext string,
w, h := handler.settings.ThumbSize(ctx)
thumbParam := fmt.Sprintf("image/resize,m_lfit,h_%d,w_%d", h, w)
thumbOption := []oss.Option{oss.Process(thumbParam)}
enco := handler.settings.ThumbEncode(ctx)
switch enco.Format {
case "jpg", "webp":
thumbParam += fmt.Sprintf("/format,%s/quality,q_%d", enco.Format, enco.Quality)
case "png":
thumbParam += fmt.Sprintf("/format,%s", enco.Format)
}
req := &oss.GetObjectRequest{
Process: oss.Ptr(thumbParam),
}
thumbURL, err := handler.signSourceURL(
ctx,
e.Source(),
expire,
thumbOption,
req,
false,
)
if err != nil {
@ -361,11 +414,11 @@ func (handler *Driver) Source(ctx context.Context, e fs.Entity, args *driver.Get
}
// 添加各项设置
var signOptions = make([]oss.Option, 0, 2)
req := &oss.GetObjectRequest{}
if args.IsDownload {
encodedFilename := url.PathEscape(args.DisplayName)
signOptions = append(signOptions, oss.ResponseContentDisposition(fmt.Sprintf(`attachment; filename="%s"; filename*=UTF-8''%s`,
encodedFilename, encodedFilename)))
req.ResponseContentDisposition = oss.Ptr(fmt.Sprintf(`attachment; filename="%s"; filename*=UTF-8''%s`,
encodedFilename, encodedFilename))
}
if args.Speed > 0 {
// Byte 转换为 bit
@ -378,25 +431,39 @@ func (handler *Driver) Source(ctx context.Context, e fs.Entity, args *driver.Get
if args.Speed > 838860800 {
args.Speed = 838860800
}
signOptions = append(signOptions, oss.TrafficLimitParam(args.Speed))
req.Parameters = map[string]string{
trafficLimitHeader: strconv.FormatInt(args.Speed, 10),
}
}
return handler.signSourceURL(ctx, e.Source(), args.Expire, signOptions, false)
return handler.signSourceURL(ctx, e.Source(), args.Expire, req, false)
}
func (handler *Driver) signSourceURL(ctx context.Context, path string, expire *time.Time, options []oss.Option, forceSign bool) (string, error) {
ttl := int64(86400 * 365 * 20)
func (handler *Driver) signSourceURL(ctx context.Context, path string, expire *time.Time, req *oss.GetObjectRequest, forceSign bool) (string, error) {
// V4 Sign 最大过期时间为7天
ttl := maxSignTTL
if expire != nil {
ttl = int64(time.Until(*expire).Seconds())
ttl = time.Until(*expire)
if ttl > maxSignTTL {
ttl = maxSignTTL
}
}
signedURL, err := handler.bucket.SignURL(path, oss.HTTPGet, ttl, options...)
if req == nil {
req = &oss.GetObjectRequest{}
}
req.Bucket = &handler.policy.BucketName
req.Key = &path
// signedURL, err := handler.client.Presign(path, oss.HTTPGet, ttl, options...)
result, err := handler.client.Presign(ctx, req, oss.PresignExpires(ttl))
if err != nil {
return "", err
}
// 将最终生成的签名URL域名换成用户自定义的加速域名如果有
finalURL, err := url.Parse(signedURL)
finalURL, err := url.Parse(result.URL)
if err != nil {
return "", err
}
@ -404,10 +471,12 @@ func (handler *Driver) signSourceURL(ctx context.Context, path string, expire *t
// 公有空间替换掉Key及不支持的头
if !handler.policy.IsPrivate && !forceSign {
query := finalURL.Query()
query.Del("OSSAccessKeyId")
query.Del("Signature")
query.Del("x-oss-credential")
query.Del("x-oss-date")
query.Del("x-oss-expires")
query.Del("x-oss-signature")
query.Del("x-oss-signature-version")
query.Del("response-content-disposition")
query.Del("x-oss-traffic-limit")
finalURL.RawQuery = query.Encode()
}
return finalURL.String(), nil
@ -441,38 +510,45 @@ func (handler *Driver) Token(ctx context.Context, uploadSession *fs.UploadSessio
mimeType := file.Props.MimeType
if mimeType == "" {
handler.mime.TypeByName(file.Props.Uri.Name())
mimeType = handler.mime.TypeByName(file.Props.Uri.Name())
}
// 初始化分片上传
options := []oss.Option{
oss.WithContext(ctx),
oss.Expires(uploadSession.Props.ExpireAt),
oss.ForbidOverWrite(true),
oss.ContentType(mimeType),
}
imur, err := handler.bucket.InitiateMultipartUpload(file.Props.SavePath, options...)
imur, err := handler.client.InitiateMultipartUpload(ctx, &oss.InitiateMultipartUploadRequest{
Bucket: &handler.policy.BucketName,
Key: &file.Props.SavePath,
ContentType: oss.Ptr(mimeType),
ForbidOverwrite: oss.Ptr(strconv.FormatBool(true)),
Expires: oss.Ptr(uploadSession.Props.ExpireAt.Format(time.RFC3339)),
})
if err != nil {
return nil, fmt.Errorf("failed to initialize multipart upload: %w", err)
}
uploadSession.UploadID = imur.UploadID
uploadSession.UploadID = *imur.UploadId
// 为每个分片签名上传 URL
chunks := chunk.NewChunkGroup(file, handler.chunkSize, &backoff.ConstantBackoff{}, false, handler.l, "")
urls := make([]string, chunks.Num())
ttl := int64(time.Until(uploadSession.Props.ExpireAt).Seconds())
ttl := time.Until(uploadSession.Props.ExpireAt)
for chunks.Next() {
err := chunks.Process(func(c *chunk.ChunkGroup, chunk io.Reader) error {
signedURL, err := handler.bucket.SignURL(file.Props.SavePath, oss.HTTPPut,
ttl,
oss.AddParam(partNumberParam, strconv.Itoa(c.Index()+1)),
oss.AddParam(uploadIdParam, imur.UploadID),
oss.ContentType("application/octet-stream"))
signedURL, err := handler.client.Presign(ctx, &oss.UploadPartRequest{
Bucket: &handler.policy.BucketName,
Key: &file.Props.SavePath,
UploadId: imur.UploadId,
PartNumber: int32(c.Index() + 1),
Body: chunk,
RequestCommon: oss.RequestCommon{
Headers: map[string]string{
"Content-Type": "application/octet-stream",
},
},
}, oss.PresignExpires(ttl))
if err != nil {
return err
}
urls[c.Index()] = signedURL
urls[c.Index()] = signedURL.URL
return nil
})
if err != nil {
@ -481,29 +557,43 @@ func (handler *Driver) Token(ctx context.Context, uploadSession *fs.UploadSessio
}
// 签名完成分片上传的URL
completeURL, err := handler.bucket.SignURL(file.Props.SavePath, oss.HTTPPost, ttl,
oss.ContentType("application/octet-stream"),
oss.AddParam(uploadIdParam, imur.UploadID),
oss.Expires(time.Now().Add(time.Duration(ttl)*time.Second)),
oss.SetHeader(completeAllHeader, "yes"),
oss.ForbidOverWrite(true),
oss.AddParam(callbackParam, callbackPolicyEncoded))
completeURL, err := handler.client.Presign(ctx, &oss.CompleteMultipartUploadRequest{
Bucket: &handler.policy.BucketName,
Key: &file.Props.SavePath,
UploadId: imur.UploadId,
RequestCommon: oss.RequestCommon{
Parameters: map[string]string{
"callback": callbackPolicyEncoded,
},
Headers: map[string]string{
"Content-Type": "application/octet-stream",
completeAllHeader: "yes",
forbidOverwriteHeader: "true",
},
},
}, oss.PresignExpires(ttl))
if err != nil {
return nil, err
}
return &fs.UploadCredential{
UploadID: imur.UploadID,
UploadID: *imur.UploadId,
UploadURLs: urls,
CompleteURL: completeURL,
CompleteURL: completeURL.URL,
SessionID: uploadSession.Props.UploadSessionID,
ChunkSize: handler.chunkSize,
Callback: callbackPolicyEncoded,
}, nil
}
// 取消上传凭证
func (handler *Driver) CancelToken(ctx context.Context, uploadSession *fs.UploadSession) error {
return handler.bucket.AbortMultipartUpload(oss.InitiateMultipartUploadResult{UploadID: uploadSession.UploadID, Key: uploadSession.Props.SavePath}, oss.WithContext(ctx))
_, err := handler.client.AbortMultipartUpload(ctx, &oss.AbortMultipartUploadRequest{
Bucket: &handler.policy.BucketName,
Key: &uploadSession.Props.SavePath,
UploadId: &uploadSession.UploadID,
})
return err
}
func (handler *Driver) CompleteUpload(ctx context.Context, session *fs.UploadSession) error {
@ -526,7 +616,7 @@ func (handler *Driver) Capabilities() *driver.Capabilities {
}
}
func (handler *Driver) MediaMeta(ctx context.Context, path, ext string) ([]driver.MediaMeta, error) {
func (handler *Driver) MediaMeta(ctx context.Context, path, ext, language string) ([]driver.MediaMeta, error) {
if util.ContainsString(supportedImageExt, ext) {
return handler.extractImageMeta(ctx, path)
}
@ -547,7 +637,11 @@ func (handler *Driver) LocalPath(ctx context.Context, path string) string {
}
func (handler *Driver) cancelUpload(imur oss.InitiateMultipartUploadResult) {
if err := handler.bucket.AbortMultipartUpload(imur); err != nil {
if _, err := handler.client.AbortMultipartUpload(context.Background(), &oss.AbortMultipartUploadRequest{
Bucket: &handler.policy.BucketName,
Key: imur.Key,
UploadId: imur.UploadId,
}); err != nil {
handler.l.Warning("failed to abort multipart upload: %s", err)
}
}

@ -223,7 +223,7 @@ func (handler *Driver) Put(ctx context.Context, file *fs.UploadRequest) error {
mimeType := file.Props.MimeType
if mimeType == "" {
handler.mime.TypeByName(file.Props.Uri.Name())
mimeType = handler.mime.TypeByName(file.Props.Uri.Name())
}
err = resumeUploader.CompleteParts(ctx, upToken, upHost, nil, handler.policy.BucketName,
@ -277,10 +277,20 @@ func (handler *Driver) Delete(ctx context.Context, files ...string) ([]string, e
// Thumb 获取文件缩略图
func (handler *Driver) Thumb(ctx context.Context, expire *time.Time, ext string, e fs.Entity) (string, error) {
w, h := handler.settings.ThumbSize(ctx)
thumbParam := fmt.Sprintf("imageView2/1/w/%d/h/%d", w, h)
enco := handler.settings.ThumbEncode(ctx)
switch enco.Format {
case "jpg", "webp":
thumbParam += fmt.Sprintf("/format/%s/q/%d", enco.Format, enco.Quality)
case "png":
thumbParam += fmt.Sprintf("/format/%s", enco.Format)
}
return handler.signSourceURL(
e.Source(),
url.Values{
fmt.Sprintf("imageView2/1/w/%d/h/%d", w, h): []string{},
thumbParam: []string{},
},
expire,
), nil
@ -379,7 +389,7 @@ func (handler *Driver) Token(ctx context.Context, uploadSession *fs.UploadSessio
mimeType := file.Props.MimeType
if mimeType == "" {
handler.mime.TypeByName(file.Props.Uri.Name())
mimeType = handler.mime.TypeByName(file.Props.Uri.Name())
}
uploadSession.UploadID = ret.UploadID
@ -423,7 +433,7 @@ func (handler *Driver) Capabilities() *driver.Capabilities {
}
}
func (handler *Driver) MediaMeta(ctx context.Context, path, ext string) ([]driver.MediaMeta, error) {
func (handler *Driver) MediaMeta(ctx context.Context, path, ext, language string) ([]driver.MediaMeta, error) {
if util.ContainsString(supportedImageExt, ext) {
return handler.extractImageMeta(ctx, path)
}

@ -43,7 +43,7 @@ type Client interface {
// DeleteUploadSession deletes remote upload session
DeleteUploadSession(ctx context.Context, sessionID string) error
// MediaMeta gets media meta from remote server
MediaMeta(ctx context.Context, src, ext string) ([]driver.MediaMeta, error)
MediaMeta(ctx context.Context, src, ext, language string) ([]driver.MediaMeta, error)
// DeleteFiles deletes files from remote server
DeleteFiles(ctx context.Context, files ...string) ([]string, error)
// List lists files from remote server
@ -183,10 +183,10 @@ func (c *remoteClient) DeleteFiles(ctx context.Context, files ...string) ([]stri
return nil, nil
}
func (c *remoteClient) MediaMeta(ctx context.Context, src, ext string) ([]driver.MediaMeta, error) {
func (c *remoteClient) MediaMeta(ctx context.Context, src, ext, language string) ([]driver.MediaMeta, error) {
resp, err := c.httpClient.Request(
http.MethodGet,
routes.SlaveMediaMetaRoute(src, ext),
routes.SlaveMediaMetaRoute(src, ext, language),
nil,
request.WithContext(ctx),
request.WithLogger(c.l),

@ -179,6 +179,6 @@ func (handler *Driver) Capabilities() *driver.Capabilities {
}
}
func (handler *Driver) MediaMeta(ctx context.Context, path, ext string) ([]driver.MediaMeta, error) {
return handler.uploadClient.MediaMeta(ctx, path, ext)
func (handler *Driver) MediaMeta(ctx context.Context, path, ext, language string) ([]driver.MediaMeta, error) {
return handler.uploadClient.MediaMeta(ctx, path, ext, language)
}

@ -207,7 +207,7 @@ func (handler *Driver) Put(ctx context.Context, file *fs.UploadRequest) error {
mimeType := file.Props.MimeType
if mimeType == "" {
handler.mime.TypeByName(file.Props.Uri.Name())
mimeType = handler.mime.TypeByName(file.Props.Uri.Name())
}
_, err := uploader.UploadWithContext(ctx, &s3manager.UploadInput{
@ -344,7 +344,7 @@ func (handler *Driver) Token(ctx context.Context, uploadSession *fs.UploadSessio
mimeType := file.Props.MimeType
if mimeType == "" {
handler.mime.TypeByName(file.Props.Uri.Name())
mimeType = handler.mime.TypeByName(file.Props.Uri.Name())
}
// 创建分片上传
@ -482,7 +482,7 @@ func (handler *Driver) Capabilities() *driver.Capabilities {
}
}
func (handler *Driver) MediaMeta(ctx context.Context, path, ext string) ([]driver.MediaMeta, error) {
func (handler *Driver) MediaMeta(ctx context.Context, path, ext, language string) ([]driver.MediaMeta, error) {
return nil, errors.New("not implemented")
}

@ -161,7 +161,7 @@ func (handler *Driver) Put(ctx context.Context, file *fs.UploadRequest) error {
mimeType := file.Props.MimeType
if mimeType == "" {
handler.mime.TypeByName(file.Props.Uri.Name())
mimeType = handler.mime.TypeByName(file.Props.Uri.Name())
}
err := handler.up.Put(&upyun.PutObjectConfig{
@ -203,8 +203,16 @@ func (handler *Driver) Delete(ctx context.Context, files ...string) ([]string, e
// Thumb 获取文件缩略图
func (handler *Driver) Thumb(ctx context.Context, expire *time.Time, ext string, e fs.Entity) (string, error) {
w, h := handler.settings.ThumbSize(ctx)
thumbParam := fmt.Sprintf("!/fwfh/%dx%d", w, h)
enco := handler.settings.ThumbEncode(ctx)
switch enco.Format {
case "jpg", "webp":
thumbParam += fmt.Sprintf("/format/%s/quality/%d", enco.Format, enco.Quality)
case "png":
thumbParam += fmt.Sprintf("/format/%s", enco.Format)
}
thumbURL, err := handler.signURL(ctx, e.Source()+thumbParam, nil, expire)
if err != nil {
return "", err
@ -301,7 +309,7 @@ func (handler *Driver) Token(ctx context.Context, uploadSession *fs.UploadSessio
mimeType := file.Props.MimeType
if mimeType == "" {
handler.mime.TypeByName(file.Props.Uri.Name())
mimeType = handler.mime.TypeByName(file.Props.Uri.Name())
}
return &fs.UploadCredential{
@ -337,7 +345,7 @@ func (handler *Driver) Capabilities() *driver.Capabilities {
}
}
func (handler *Driver) MediaMeta(ctx context.Context, path, ext string) ([]driver.MediaMeta, error) {
func (handler *Driver) MediaMeta(ctx context.Context, path, ext, language string) ([]driver.MediaMeta, error) {
return handler.extractImageMeta(ctx, path)
}

@ -0,0 +1,360 @@
// Package encrypt provides AES-256-CTR encryption and decryption functionality
// compatible with the JavaScript EncryptedBlob implementation.
//
// # Usage Example
//
// Basic usage with encrypted metadata:
//
// // Create AES256CTR instance
// aes := NewAES256CTR(masterKeyVault)
//
// // Load encrypted metadata (key is encrypted with master key)
// err := aes.LoadMetadata(ctx, encryptedMetadata, masterKeyVault)
// if err != nil {
// return err
// }
//
// // Set encrypted source stream
// err = aes.SetSource(encryptedStream, 0)
// if err != nil {
// return err
// }
//
// // Read decrypted data
// decryptedData, err := io.ReadAll(aes)
// if err != nil {
// return err
// }
// aes.Close()
//
// Usage with plain metadata (already decrypted):
//
// aes := NewAES256CTR(masterKeyVault)
// err := aes.LoadPlainMetadata(plainMetadata)
// err = aes.SetSource(encryptedStream, 0)
// // Read decrypted data...
//
// Usage with counter offset (for chunked/sliced streams):
//
// // If reading from byte offset 1048576 (1MB) of the encrypted file
// aes := NewAES256CTR(masterKeyVault)
// err := aes.LoadPlainMetadata(metadata)
// err = aes.SetSource(encryptedStreamStartingAt1MB, 1048576)
// // This ensures proper counter alignment for correct decryption
//
// Using the Seeker interface (requires seekable source):
//
// aes := NewAES256CTR(masterKeyVault)
// err := aes.LoadPlainMetadata(metadata)
// err = aes.SetSource(seekableEncryptedStream, 0)
// aes.SetSize(totalFileSize) // Required for io.SeekEnd
//
// // Seek to position 1048576
// newPos, err := aes.Seek(1048576, io.SeekStart)
// // Read from that position...
//
// // Seek relative to current position
// newPos, err = aes.Seek(100, io.SeekCurrent)
//
// // Seek from end (requires SetSize to be called first)
// newPos, err = aes.Seek(-1024, io.SeekEnd)
//
// Using the factory pattern:
//
// factory := NewDecrypterFactory(masterKeyVault)
// decrypter, err := factory(types.CipherAES256CTR)
// if err != nil {
// return err
// }
// err = decrypter.LoadMetadata(ctx, encryptedMetadata, masterKeyVault)
// err = decrypter.SetSource(encryptedStream, 0)
// defer decrypter.Close()
package encrypt
import (
"context"
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"fmt"
"io"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
)
// AES256CTR provides both encryption and decryption for AES-256-CTR.
// It implements both Cryptor and Decrypter interfaces.
type AES256CTR struct {
masterKeyVault MasterEncryptKeyVault
// Decryption fields
src io.ReadCloser // Source encrypted stream
seeker io.Seeker // Seeker for the source stream
stream cipher.Stream // AES-CTR cipher stream
metadata *types.EncryptMetadata
counterOffset int64 // Byte offset for sliced streams
pos int64 // Current read position relative to counterOffset
size int64 // Total size of encrypted data (for SeekEnd support, -1 if unknown)
eof bool // EOF flag
}
func NewAES256CTR(masterKeyVault MasterEncryptKeyVault) *AES256CTR {
return &AES256CTR{
masterKeyVault: masterKeyVault,
size: -1, // Unknown by default
}
}
func (e *AES256CTR) GenerateMetadata(ctx context.Context) (*types.EncryptMetadata, error) {
// Generate random 32-byte key for AES-256
key := make([]byte, 32)
if _, err := io.ReadFull(rand.Reader, key); err != nil {
return nil, err
}
// Generate random 16-byte IV for CTR mode
iv := make([]byte, 16)
if _, err := io.ReadFull(rand.Reader, iv); err != nil {
return nil, err
}
// Get master key from vault
masterKey, err := e.masterKeyVault.GetMasterKey(ctx)
if err != nil {
return nil, err
}
// Encrypt the key with master key
encryptedKey, err := EncryptWithMasterKey(masterKey, key)
if err != nil {
return nil, err
}
return &types.EncryptMetadata{
Algorithm: types.CipherAES256CTR,
Key: encryptedKey,
KeyPlainText: key,
IV: iv,
}, nil
}
// LoadMetadata loads and decrypts the encryption metadata using the master key.
func (e *AES256CTR) LoadMetadata(ctx context.Context, encryptedMetadata *types.EncryptMetadata) error {
if encryptedMetadata == nil {
return fmt.Errorf("encryption metadata is nil")
}
if encryptedMetadata.Algorithm != types.CipherAES256CTR {
return fmt.Errorf("unsupported algorithm: %s", encryptedMetadata.Algorithm)
}
if len(encryptedMetadata.KeyPlainText) > 0 {
e.metadata = encryptedMetadata
return nil
}
// Decrypt the encryption key
decryptedKey, err := DecriptKey(ctx, e.masterKeyVault, encryptedMetadata.Key)
if err != nil {
return fmt.Errorf("failed to decrypt encryption key: %w", err)
}
// Store decrypted metadata
e.metadata = &types.EncryptMetadata{
Algorithm: encryptedMetadata.Algorithm,
KeyPlainText: decryptedKey,
IV: encryptedMetadata.IV,
}
return nil
}
// SetSource sets the encrypted data source and initializes the cipher stream.
// The counterOffset parameter allows for proper decryption of sliced streams,
// where the stream doesn't start at byte 0 of the original file.
//
// For non-block-aligned offsets (offset % 16 != 0), this method advances the
// cipher stream to the correct position within the block to ensure proper decryption.
func (e *AES256CTR) SetSource(src io.ReadCloser, seeker io.Seeker, size, counterOffset int64) error {
if e.metadata == nil {
return fmt.Errorf("metadata not loaded, call LoadMetadata first")
}
e.src = src
e.seeker = seeker
e.counterOffset = counterOffset
e.pos = 0 // Reset position to start
e.eof = false // Reset EOF flag
e.size = size
// Initialize cipher stream at counterOffset position
return e.initCipherStream(counterOffset)
}
// Read implements io.Reader interface to read decrypted data.
// It reads encrypted data from the source and decrypts it on-the-fly.
func (e *AES256CTR) Read(p []byte) (int, error) {
if e.src == nil {
return 0, fmt.Errorf("source not set, call SetSource first")
}
if e.eof {
return 0, io.EOF
}
// Read encrypted data from source
n, err := e.src.Read(p)
if err != nil {
if err == io.EOF {
e.eof = true
if n == 0 {
return 0, io.EOF
}
} else {
return n, err
}
}
// Decrypt data in place
if n > 0 {
e.stream.XORKeyStream(p[:n], p[:n])
e.pos += int64(n) // Update current position
}
return n, err
}
// Close implements io.Closer interface.
func (e *AES256CTR) Close() error {
if e.src != nil {
return e.src.Close()
}
return nil
}
// Seek implements io.Seeker interface for seeking within the encrypted stream.
// It properly adjusts the AES-CTR counter based on the seek position.
//
// Parameters:
// - offset: byte offset relative to whence
// - whence: io.SeekStart, io.SeekCurrent, or io.SeekEnd
//
// Returns the new absolute position (relative to counterOffset start).
//
// Note: For io.SeekEnd to work, you must call SetSize() first, otherwise it returns an error.
// Also note that seeking requires the underlying source to support seeking (io.Seeker).
func (e *AES256CTR) Seek(offset int64, whence int) (int64, error) {
if e.metadata == nil {
return 0, fmt.Errorf("metadata not loaded, call LoadMetadata first")
}
if e.src == nil {
return 0, fmt.Errorf("source not set, call SetSource first")
}
// Check if source supports seeking
if e.seeker == nil {
return 0, fmt.Errorf("source does not support seeking")
}
// Calculate new absolute position
var newPos int64
switch whence {
case io.SeekStart:
newPos = offset
case io.SeekCurrent:
newPos = e.pos + offset
case io.SeekEnd:
if e.size < 0 {
return 0, fmt.Errorf("size unknown, call SetSize before using SeekEnd")
}
newPos = e.size + offset
default:
return 0, fmt.Errorf("invalid whence: %d", whence)
}
// Validate new position
if newPos < 0 {
return 0, fmt.Errorf("negative position: %d", newPos)
}
// Seek in the underlying source stream
// The absolute position in the source is counterOffset + newPos
absPos := e.counterOffset + newPos
_, err := e.seeker.Seek(absPos, io.SeekStart)
if err != nil {
return 0, fmt.Errorf("failed to seek source: %w", err)
}
// Reinitialize cipher stream with new counter position
if err := e.initCipherStream(absPos); err != nil {
return 0, fmt.Errorf("failed to reinitialize cipher stream: %w", err)
}
// Update position and reset EOF flag
e.pos = newPos
e.eof = false
return newPos, nil
}
// initCipherStream initializes the cipher stream with proper counter alignment
// for the given absolute byte position.
func (e *AES256CTR) initCipherStream(absolutePosition int64) error {
// Create AES cipher block
block, err := aes.NewCipher(e.metadata.KeyPlainText)
if err != nil {
return fmt.Errorf("failed to create AES cipher: %w", err)
}
// Create counter value (16 bytes IV) and apply offset for position
counter := make([]byte, 16)
copy(counter, e.metadata.IV)
// Apply counter offset based on byte position (each block is 16 bytes)
if absolutePosition > 0 {
blockOffset := absolutePosition / 16
incrementCounter(counter, blockOffset)
}
// Create CTR cipher stream
e.stream = cipher.NewCTR(block, counter)
// For non-block-aligned offsets, we need to advance the stream position
// within the current block to match the offset
offsetInBlock := absolutePosition % 16
if offsetInBlock > 0 {
// Create a dummy buffer to advance the stream
dummy := make([]byte, offsetInBlock)
e.stream.XORKeyStream(dummy, dummy)
}
return nil
}
// incrementCounter increments a counter ([]byte) by a given number of blocks.
// This matches the JavaScript implementation's incrementCounter function.
// The counter is treated as a big-endian 128-bit integer.
func incrementCounter(counter []byte, blocks int64) {
// Convert blocks to add into bytes (big-endian)
// We only need to handle the lower 64 bits since blocks is int64
for i := 15; i >= 0 && blocks > 0; i-- {
// Add the lowest byte of blocks to current counter byte
sum := uint64(counter[i]) + uint64(blocks&0xff)
counter[i] = byte(sum & 0xff)
// Shift blocks right by 8 bits for next iteration
blocks = blocks >> 8
// Add carry from this position to the next
if sum > 0xff {
carry := sum >> 8
// Propagate carry to higher bytes
for j := i - 1; j >= 0 && carry > 0; j-- {
sum = uint64(counter[j]) + carry
counter[j] = byte(sum & 0xff)
carry = sum >> 8
}
}
}
}

@ -0,0 +1,97 @@
package encrypt
import (
"context"
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"fmt"
"io"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
)
type (
Cryptor interface {
io.ReadCloser
io.Seeker
// LoadMetadata loads and decrypts the encryption metadata using the master key
LoadMetadata(ctx context.Context, encryptedMetadata *types.EncryptMetadata) error
// SetSource sets the encrypted data source and initializes the cipher stream
SetSource(src io.ReadCloser, seeker io.Seeker, size, counterOffset int64) error
// GenerateMetadata generates a new encryption metadata
GenerateMetadata(ctx context.Context) (*types.EncryptMetadata, error)
}
CryptorFactory func(algorithm types.Cipher) (Cryptor, error)
)
func NewCryptorFactory(masterKeyVault MasterEncryptKeyVault) CryptorFactory {
return func(algorithm types.Cipher) (Cryptor, error) {
switch algorithm {
case types.CipherAES256CTR:
return NewAES256CTR(masterKeyVault), nil
default:
return nil, fmt.Errorf("unknown algorithm: %s", algorithm)
}
}
}
// EncryptWithMasterKey encrypts data using the master key with AES-256-CTR
// Returns: [16-byte IV] + [encrypted data]
func EncryptWithMasterKey(masterKey, data []byte) ([]byte, error) {
// Create AES cipher with master key
block, err := aes.NewCipher(masterKey)
if err != nil {
return nil, err
}
// Generate random IV for encryption
iv := make([]byte, 16)
if _, err := io.ReadFull(rand.Reader, iv); err != nil {
return nil, err
}
// Encrypt data
stream := cipher.NewCTR(block, iv)
encrypted := make([]byte, len(data))
stream.XORKeyStream(encrypted, data)
// Return IV + encrypted data
result := append(iv, encrypted...)
return result, nil
}
func DecriptKey(ctx context.Context, keyVault MasterEncryptKeyVault, encryptedKey []byte) ([]byte, error) {
masterKey, err := keyVault.GetMasterKey(ctx)
if err != nil {
return nil, fmt.Errorf("failed to get master key: %w", err)
}
return DecryptWithMasterKey(masterKey, encryptedKey)
}
// DecryptWithMasterKey decrypts data using the master key with AES-256-CTR
// Input format: [16-byte IV] + [encrypted data]
func DecryptWithMasterKey(masterKey, encryptedData []byte) ([]byte, error) {
// Validate input length
if len(encryptedData) < 16 {
return nil, aes.KeySizeError(len(encryptedData))
}
// Extract IV and encrypted data
iv := encryptedData[:16]
encrypted := encryptedData[16:]
// Create AES cipher with master key
block, err := aes.NewCipher(masterKey)
if err != nil {
return nil, err
}
// Decrypt data
stream := cipher.NewCTR(block, iv)
decrypted := make([]byte, len(encrypted))
stream.XORKeyStream(decrypted, encrypted)
return decrypted, nil
}

@ -0,0 +1,105 @@
package encrypt
import (
"context"
"encoding/base64"
"errors"
"fmt"
"os"
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
)
const (
EnvMasterEncryptKey = "CR_ENCRYPT_MASTER_KEY"
)
// MasterEncryptKeyVault is a vault for the master encrypt key.
type MasterEncryptKeyVault interface {
GetMasterKey(ctx context.Context) ([]byte, error)
}
func NewMasterEncryptKeyVault(ctx context.Context, settings setting.Provider) MasterEncryptKeyVault {
vaultType := settings.MasterEncryptKeyVault(ctx)
switch vaultType {
case setting.MasterEncryptKeyVaultTypeEnv:
return NewEnvMasterEncryptKeyVault()
case setting.MasterEncryptKeyVaultTypeFile:
return NewFileMasterEncryptKeyVault(settings.MasterEncryptKeyFile(ctx))
default:
return NewSettingMasterEncryptKeyVault(settings)
}
}
// settingMasterEncryptKeyVault is a vault for the master encrypt key that gets the key from the setting KV.
type settingMasterEncryptKeyVault struct {
setting setting.Provider
}
func NewSettingMasterEncryptKeyVault(setting setting.Provider) MasterEncryptKeyVault {
return &settingMasterEncryptKeyVault{setting: setting}
}
func (v *settingMasterEncryptKeyVault) GetMasterKey(ctx context.Context) ([]byte, error) {
key := v.setting.MasterEncryptKey(ctx)
if key == nil {
return nil, errors.New("master encrypt key is not set")
}
return key, nil
}
func NewEnvMasterEncryptKeyVault() MasterEncryptKeyVault {
return &envMasterEncryptKeyVault{}
}
type envMasterEncryptKeyVault struct {
}
var envMasterKeyCache = []byte{}
func (v *envMasterEncryptKeyVault) GetMasterKey(ctx context.Context) ([]byte, error) {
if len(envMasterKeyCache) > 0 {
return envMasterKeyCache, nil
}
key := os.Getenv(EnvMasterEncryptKey)
if key == "" {
return nil, errors.New("master encrypt key is not set")
}
decodedKey, err := base64.StdEncoding.DecodeString(key)
if err != nil {
return nil, fmt.Errorf("failed to decode master encrypt key: %w", err)
}
envMasterKeyCache = decodedKey
return decodedKey, nil
}
func NewFileMasterEncryptKeyVault(path string) MasterEncryptKeyVault {
return &fileMasterEncryptKeyVault{path: path}
}
var fileMasterKeyCache = []byte{}
type fileMasterEncryptKeyVault struct {
path string
}
func (v *fileMasterEncryptKeyVault) GetMasterKey(ctx context.Context) ([]byte, error) {
if len(fileMasterKeyCache) > 0 {
return fileMasterKeyCache, nil
}
key, err := os.ReadFile(v.path)
if err != nil {
return nil, fmt.Errorf("invalid master encrypt key file")
}
decodedKey, err := base64.StdEncoding.DecodeString(string(key))
if err != nil {
return nil, fmt.Errorf("invalid master encrypt key")
}
fileMasterKeyCache = decodedKey
return fileMasterKeyCache, nil
}

@ -4,11 +4,8 @@ import (
"context"
"errors"
"fmt"
"math/rand"
"path"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
@ -17,6 +14,7 @@ import (
"github.com/cloudreve/Cloudreve/v4/inventory"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/cache"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/encrypt"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/lock"
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
@ -46,7 +44,7 @@ type (
func NewDatabaseFS(u *ent.User, fileClient inventory.FileClient, shareClient inventory.ShareClient,
l logging.Logger, ls lock.LockSystem, settingClient setting.Provider,
storagePolicyClient inventory.StoragePolicyClient, hasher hashid.Encoder, userClient inventory.UserClient,
cache, stateKv cache.Driver, directLinkClient inventory.DirectLinkClient) fs.FileSystem {
cache, stateKv cache.Driver, directLinkClient inventory.DirectLinkClient, encryptorFactory encrypt.CryptorFactory) fs.FileSystem {
return &DBFS{
user: u,
navigators: make(map[string]Navigator),
@ -61,6 +59,7 @@ func NewDatabaseFS(u *ent.User, fileClient inventory.FileClient, shareClient inv
cache: cache,
stateKv: stateKv,
directLinkClient: directLinkClient,
encryptorFactory: encryptorFactory,
}
}
@ -79,6 +78,7 @@ type DBFS struct {
cache cache.Driver
stateKv cache.Driver
mu sync.Mutex
encryptorFactory encrypt.CryptorFactory
}
func (f *DBFS) Recycle() {
@ -122,7 +122,7 @@ func (f *DBFS) List(ctx context.Context, path *fs.URI, opts ...fs.Option) (fs.Fi
parent, err := f.getFileByPath(ctx, navigator, path)
if err != nil {
return nil, nil, fmt.Errorf("Parent not exist: %w", err)
return nil, nil, fmt.Errorf("parent not exist: %w", err)
}
pageSize := 0
@ -286,6 +286,7 @@ func (f *DBFS) CreateEntity(ctx context.Context, file fs.File, policy *ent.Stora
Source: req.Props.SavePath,
Size: req.Props.Size,
UploadSessionID: uuid.FromStringOrNil(o.UploadRequest.Props.UploadSessionID),
EncryptMetadata: o.encryptMetadata,
})
if err != nil {
_ = inventory.Rollback(tx)
@ -616,6 +617,7 @@ func (f *DBFS) createFile(ctx context.Context, parent *File, name string, fileTy
ModifiedAt: o.UploadRequest.Props.LastModified,
UploadSessionID: uuid.FromStringOrNil(o.UploadRequest.Props.UploadSessionID),
Importing: o.UploadRequest.ImportFrom != nil,
EncryptMetadata: o.encryptMetadata,
}
}
@ -644,6 +646,20 @@ func (f *DBFS) createFile(ctx context.Context, parent *File, name string, fileTy
return newFile(parent, file), nil
}
func (f *DBFS) generateEncryptMetadata(ctx context.Context, uploadRequest *fs.UploadRequest, policy *ent.StoragePolicy) (*types.EncryptMetadata, error) {
relayEnabled := policy.Settings != nil && policy.Settings.Relay
if (len(uploadRequest.Props.EncryptionSupported) > 0 && uploadRequest.Props.EncryptionSupported[0] == types.CipherAES256CTR) || relayEnabled {
encryptor, err := f.encryptorFactory(types.CipherAES256CTR)
if err != nil {
return nil, fmt.Errorf("failed to get encryptor: %w", err)
}
return encryptor.GenerateMetadata(ctx)
}
return nil, nil
}
// getPreferredPolicy tries to get the preferred storage policy for the given file.
func (f *DBFS) getPreferredPolicy(ctx context.Context, file *File) (*ent.StoragePolicy, error) {
ownerGroup := file.Owner().Edges.Group
@ -651,7 +667,8 @@ func (f *DBFS) getPreferredPolicy(ctx context.Context, file *File) (*ent.Storage
return nil, fmt.Errorf("owner group not loaded")
}
groupPolicy, err := f.storagePolicyClient.GetByGroup(ctx, ownerGroup)
sc, _ := inventory.InheritTx(ctx, f.storagePolicyClient)
groupPolicy, err := sc.GetByGroup(ctx, ownerGroup)
if err != nil {
return nil, serializer.NewError(serializer.CodeDBError, "Failed to get available storage policies", err)
}
@ -765,44 +782,17 @@ func (f *DBFS) navigatorId(path *fs.URI) string {
// generateSavePath generates the physical save path for the upload request.
func generateSavePath(policy *ent.StoragePolicy, req *fs.UploadRequest, user *ent.User) string {
baseTable := map[string]string{
"{randomkey16}": util.RandStringRunes(16),
"{randomkey8}": util.RandStringRunes(8),
"{timestamp}": strconv.FormatInt(time.Now().Unix(), 10),
"{timestamp_nano}": strconv.FormatInt(time.Now().UnixNano(), 10),
"{randomnum2}": strconv.Itoa(rand.Intn(2)),
"{randomnum3}": strconv.Itoa(rand.Intn(3)),
"{randomnum4}": strconv.Itoa(rand.Intn(4)),
"{randomnum8}": strconv.Itoa(rand.Intn(8)),
"{uid}": strconv.Itoa(user.ID),
"{datetime}": time.Now().Format("20060102150405"),
"{date}": time.Now().Format("20060102"),
"{year}": time.Now().Format("2006"),
"{month}": time.Now().Format("01"),
"{day}": time.Now().Format("02"),
"{hour}": time.Now().Format("15"),
"{minute}": time.Now().Format("04"),
"{second}": time.Now().Format("05"),
currentTime := time.Now()
dynamicReplace := func(rule string, pathAvailable bool) string {
return util.ReplaceMagicVar(rule, fs.Separator, pathAvailable, false, currentTime, user.ID, req.Props.Uri.Name(), req.Props.Uri.Dir(), "")
}
dirRule := policy.DirNameRule
dirRule = filepath.ToSlash(dirRule)
dirRule = util.Replace(baseTable, dirRule)
dirRule = util.Replace(map[string]string{
"{path}": req.Props.Uri.Dir() + fs.Separator,
}, dirRule)
originName := req.Props.Uri.Name()
nameTable := map[string]string{
"{originname}": originName,
"{ext}": filepath.Ext(originName),
"{originname_without_ext}": strings.TrimSuffix(originName, filepath.Ext(originName)),
"{uuid}": uuid.Must(uuid.NewV4()).String(),
}
dirRule = dynamicReplace(dirRule, true)
nameRule := policy.FileNameRule
nameRule = util.Replace(baseTable, nameRule)
nameRule = util.Replace(nameTable, nameRule)
nameRule = dynamicReplace(nameRule, false)
return path.Join(path.Clean(dirRule), nameRule)
}

@ -120,6 +120,20 @@ func (f *DBFS) Create(ctx context.Context, path *fs.URI, fileType types.FileType
ancestor = newFile(ancestor, newFolder)
} else {
// valide file name
policy, err := f.getPreferredPolicy(ctx, ancestor)
if err != nil {
return nil, err
}
if err := validateExtension(desired[i], policy); err != nil {
return nil, fs.ErrIllegalObjectName.WithError(err)
}
if err := validateFileNameRegexp(desired[i], policy); err != nil {
return nil, fs.ErrIllegalObjectName.WithError(err)
}
file, err := f.createFile(ctx, ancestor, desired[i], fileType, o)
if err != nil {
return nil, err
@ -170,6 +184,10 @@ func (f *DBFS) Rename(ctx context.Context, path *fs.URI, newName string) (fs.Fil
if err := validateExtension(newName, policy); err != nil {
return nil, fs.ErrIllegalObjectName.WithError(err)
}
if err := validateFileNameRegexp(newName, policy); err != nil {
return nil, fs.ErrIllegalObjectName.WithError(err)
}
}
// Lock target
@ -294,9 +312,9 @@ func (f *DBFS) Delete(ctx context.Context, path []*fs.URI, opts ...fs.Option) ([
o.apply(opt)
}
var opt *types.EntityRecycleOption
var opt *types.EntityProps
if o.UnlinkOnly {
opt = &types.EntityRecycleOption{
opt = &types.EntityProps{
UnlinkOnly: true,
}
}
@ -738,11 +756,10 @@ func (f *DBFS) setCurrentVersion(ctx context.Context, target *File, versionId in
return nil
}
func (f *DBFS) deleteFiles(ctx context.Context, targets map[Navigator][]*File, fc inventory.FileClient, opt *types.EntityRecycleOption) ([]fs.Entity, inventory.StorageDiff, error) {
func (f *DBFS) deleteFiles(ctx context.Context, targets map[Navigator][]*File, fc inventory.FileClient, opt *types.EntityProps) ([]fs.Entity, inventory.StorageDiff, error) {
if f.user.Edges.Group == nil {
return nil, nil, fmt.Errorf("user group not loaded")
}
limit := max(f.user.Edges.Group.Settings.MaxWalkedFiles, 1)
allStaleEntities := make([]fs.Entity, 0, len(targets))
storageDiff := make(inventory.StorageDiff)
for n, files := range targets {
@ -756,8 +773,7 @@ func (f *DBFS) deleteFiles(ctx context.Context, targets map[Navigator][]*File, f
// List all files to be deleted
toBeDeletedFiles := make([]*File, 0, len(files))
if err := n.Walk(ctx, files, limit, intsets.MaxInt, func(targets []*File, level int) error {
limit -= len(targets)
if err := n.Walk(ctx, files, intsets.MaxInt, intsets.MaxInt, func(targets []*File, level int) error {
toBeDeletedFiles = append(toBeDeletedFiles, targets...)
return nil
}); err != nil {

@ -2,6 +2,7 @@ package dbfs
import (
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
)
@ -26,6 +27,7 @@ type dbfsOption struct {
streamListResponseCallback func(parent fs.File, file []fs.File)
ancestor *File
notRoot bool
encryptMetadata *types.EncryptMetadata
}
func newDbfsOption() *dbfsOption {
@ -50,6 +52,13 @@ func (f optionFunc) Apply(o any) {
}
}
// WithEncryptMetadata sets the encrypt metadata for the upload operation.
func WithEncryptMetadata(encryptMetadata *types.EncryptMetadata) fs.Option {
return optionFunc(func(o *dbfsOption) {
o.encryptMetadata = encryptMetadata
})
}
// WithFilePublicMetadata enables loading file public metadata.
func WithFilePublicMetadata() fs.Option {
return optionFunc(func(o *dbfsOption) {

@ -3,6 +3,7 @@ package dbfs
import (
"context"
"fmt"
"time"
"github.com/cloudreve/Cloudreve/v4/inventory"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
@ -100,6 +101,7 @@ func (f *DBFS) PatchMetadata(ctx context.Context, path []*fs.URI, metas ...fs.Me
metadataMap := make(map[string]string)
privateMap := make(map[string]bool)
deleted := make([]string, 0)
updateModifiedAt := false
for _, meta := range metas {
if meta.Remove {
deleted = append(deleted, meta.Key)
@ -109,6 +111,9 @@ func (f *DBFS) PatchMetadata(ctx context.Context, path []*fs.URI, metas ...fs.Me
if meta.Private {
privateMap[meta.Key] = meta.Private
}
if meta.UpdateModifiedAt {
updateModifiedAt = true
}
}
fc, tx, ctx, err := inventory.WithTx(ctx, f.fileClient)
@ -128,6 +133,13 @@ func (f *DBFS) PatchMetadata(ctx context.Context, path []*fs.URI, metas ...fs.Me
return fmt.Errorf("failed to remove metadata: %w", err)
}
}
if updateModifiedAt {
if err := fc.UpdateModifiedAt(ctx, target.Model, time.Now()); err != nil {
_ = inventory.Rollback(tx)
return fmt.Errorf("failed to update file modified at: %w", err)
}
}
}
if err := inventory.Commit(tx); err != nil {

@ -157,6 +157,14 @@ func (n *shareNavigator) Root(ctx context.Context, path *fs.URI) (*File, error)
}
if n.user.ID != n.owner.ID && !n.user.Edges.Group.Permissions.Enabled(int(types.GroupPermissionShareDownload)) {
if inventory.IsAnonymousUser(n.user) {
return nil, serializer.NewError(
serializer.CodeAnonymouseAccessDenied,
fmt.Sprintf("You don't have permission to access share links"),
err,
)
}
return nil, serializer.NewError(
serializer.CodeNoPermissionErr,
fmt.Sprintf("You don't have permission to access share links"),

@ -129,6 +129,20 @@ func (f *DBFS) PrepareUpload(ctx context.Context, req *fs.UploadRequest, opts ..
return nil, err
}
// Encryption setting
var (
encryptMetadata *types.EncryptMetadata
)
if !policy.Settings.Encryption || req.ImportFrom != nil || len(req.Props.EncryptionSupported) == 0 {
req.Props.EncryptionSupported = nil
} else {
res, err := f.generateEncryptMetadata(ctx, req, policy)
if err != nil {
return nil, serializer.NewError(serializer.CodeInternalSetting, "Failed to generate encrypt metadata", err)
}
encryptMetadata = res
}
// validate upload request
if err := validateNewFile(req.Props.Uri.Name(), req.Props.Size, policy); err != nil {
return nil, err
@ -146,11 +160,7 @@ func (f *DBFS) PrepareUpload(ctx context.Context, req *fs.UploadRequest, opts ..
if req.Props.SavePath == "" || isThumbnailAndPolicyNotAvailable {
req.Props.SavePath = generateSavePath(policy, req, f.user)
if isThumbnailAndPolicyNotAvailable {
req.Props.SavePath = fmt.Sprintf(
"%s.%s%s",
req.Props.SavePath,
util.RandStringRunes(16),
f.settingClient.ThumbEntitySuffix(ctx))
req.Props.SavePath = path.Clean(util.ReplaceMagicVar(f.settingClient.ThumbEntitySuffix(ctx), fs.Separator, true, true, time.Now(), f.user.ID, req.Props.Uri.Name(), req.Props.Uri.Path(), req.Props.SavePath))
}
}
@ -174,6 +184,7 @@ func (f *DBFS) PrepareUpload(ctx context.Context, req *fs.UploadRequest, opts ..
entity, err := f.CreateEntity(ctx, ancestor, policy, entityType, req,
WithPreviousVersion(req.Props.PreviousVersion),
fs.WithUploadRequest(req),
WithEncryptMetadata(encryptMetadata),
WithRemoveStaleEntities(),
)
if err != nil {
@ -189,6 +200,7 @@ func (f *DBFS) PrepareUpload(ctx context.Context, req *fs.UploadRequest, opts ..
WithPreferredStoragePolicy(policy),
WithErrorOnConflict(),
WithAncestor(ancestor),
WithEncryptMetadata(encryptMetadata),
)
if err != nil {
_ = inventory.Rollback(dbTx)
@ -219,14 +231,15 @@ func (f *DBFS) PrepareUpload(ctx context.Context, req *fs.UploadRequest, opts ..
session := &fs.UploadSession{
Props: &fs.UploadProps{
Uri: req.Props.Uri,
Size: req.Props.Size,
SavePath: req.Props.SavePath,
LastModified: req.Props.LastModified,
UploadSessionID: req.Props.UploadSessionID,
ExpireAt: req.Props.ExpireAt,
EntityType: req.Props.EntityType,
Metadata: req.Props.Metadata,
Uri: req.Props.Uri,
Size: req.Props.Size,
SavePath: req.Props.SavePath,
LastModified: req.Props.LastModified,
UploadSessionID: req.Props.UploadSessionID,
ExpireAt: req.Props.ExpireAt,
EntityType: req.Props.EntityType,
Metadata: req.Props.Metadata,
ClientSideEncrypted: req.Props.ClientSideEncrypted,
},
FileID: fileId,
NewFileCreated: !fileExisted,
@ -238,6 +251,10 @@ func (f *DBFS) PrepareUpload(ctx context.Context, req *fs.UploadRequest, opts ..
LockToken: lockToken, // Prevent lock being released.
}
if encryptMetadata != nil {
session.EncryptMetadata = encryptMetadata
}
// TODO: frontend should create new upload session if resumed session does not exist.
return session, nil
}

@ -3,10 +3,12 @@ package dbfs
import (
"context"
"fmt"
"regexp"
"strings"
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
"github.com/cloudreve/Cloudreve/v4/pkg/util"
"strings"
)
const MaxFileNameLength = 256
@ -30,18 +32,35 @@ func validateFileName(name string) error {
// validateExtension validates the file extension.
func validateExtension(name string, policy *ent.StoragePolicy) error {
// 不需要验证
if len(policy.Settings.FileType) == 0 {
return nil
}
if !util.IsInExtensionList(policy.Settings.FileType, name) {
inList := util.IsInExtensionList(policy.Settings.FileType, name)
if (policy.Settings.IsFileTypeDenyList && inList) || (!policy.Settings.IsFileTypeDenyList && !inList) {
return fmt.Errorf("file extension is not allowed")
}
return nil
}
func validateFileNameRegexp(name string, policy *ent.StoragePolicy) error {
if policy.Settings.NameRegexp == "" {
return nil
}
match, err := regexp.MatchString(policy.Settings.NameRegexp, name)
if err != nil {
return fmt.Errorf("invalid file name regexp: %s", err)
}
if (policy.Settings.IsNameRegexpDenyList && match) || (!policy.Settings.IsNameRegexpDenyList && !match) {
return fmt.Errorf("file name is not allowed by regexp")
}
return nil
}
// validateFileSize validates the file size.
func validateFileSize(size int64, policy *ent.StoragePolicy) error {
if policy.MaxSize == 0 {
@ -56,11 +75,15 @@ func validateFileSize(size int64, policy *ent.StoragePolicy) error {
// validateNewFile validates the upload request.
func validateNewFile(fileName string, size int64, policy *ent.StoragePolicy) error {
if err := validateFileName(fileName); err != nil {
return err
return fs.ErrIllegalObjectName.WithError(err)
}
if err := validateExtension(fileName, policy); err != nil {
return err
return fs.ErrIllegalObjectName.WithError(err)
}
if err := validateFileNameRegexp(fileName, policy); err != nil {
return fs.ErrIllegalObjectName.WithError(err)
}
if err := validateFileSize(size, policy); err != nil {

@ -183,6 +183,8 @@ type (
UploadSessionID() *uuid.UUID
CreatedBy() *ent.User
Model() *ent.Entity
Props() *types.EntityProps
Encrypted() bool
}
FileExtendedInfo struct {
@ -203,10 +205,11 @@ type (
}
MetadataPatch struct {
Key string `json:"key" binding:"required"`
Value string `json:"value"`
Private bool `json:"private" binding:"ne=true"`
Remove bool `json:"remove"`
Key string `json:"key" binding:"required"`
Value string `json:"value"`
Private bool `json:"private" binding:"ne=true"`
Remove bool `json:"remove"`
UpdateModifiedAt bool `json:"-"`
}
// ListFileResult result of listing files.
@ -237,38 +240,40 @@ type (
// UploadCredential for uploading files in client side.
UploadCredential struct {
SessionID string `json:"session_id"`
ChunkSize int64 `json:"chunk_size"` // 分块大小0 为部分快
Expires int64 `json:"expires"` // 上传凭证过期时间, Unix 时间戳
UploadURLs []string `json:"upload_urls,omitempty"`
Credential string `json:"credential,omitempty"`
UploadID string `json:"uploadID,omitempty"`
Callback string `json:"callback,omitempty"` // 回调地址
Uri string `json:"uri,omitempty"` // 存储路径
AccessKey string `json:"ak,omitempty"`
KeyTime string `json:"keyTime,omitempty"` // COS用有效期
CompleteURL string `json:"completeURL,omitempty"`
StoragePolicy *ent.StoragePolicy
CallbackSecret string `json:"callback_secret,omitempty"`
MimeType string `json:"mime_type,omitempty"` // Expected mimetype
UploadPolicy string `json:"upload_policy,omitempty"` // Upyun upload policy
SessionID string `json:"session_id"`
ChunkSize int64 `json:"chunk_size"` // 分块大小0 为部分快
Expires int64 `json:"expires"` // 上传凭证过期时间, Unix 时间戳
UploadURLs []string `json:"upload_urls,omitempty"`
Credential string `json:"credential,omitempty"`
UploadID string `json:"uploadID,omitempty"`
Callback string `json:"callback,omitempty"`
Uri string `json:"uri,omitempty"` // 存储路径
AccessKey string `json:"ak,omitempty"`
KeyTime string `json:"keyTime,omitempty"` // COS用有效期
CompleteURL string `json:"completeURL,omitempty"`
StoragePolicy *ent.StoragePolicy
CallbackSecret string `json:"callback_secret,omitempty"`
MimeType string `json:"mime_type,omitempty"` // Expected mimetype
UploadPolicy string `json:"upload_policy,omitempty"` // Upyun upload policy
EncryptMetadata *types.EncryptMetadata `json:"encrypt_metadata,omitempty"`
}
// UploadSession stores the information of an upload session, used in server side.
UploadSession struct {
UID int // 发起者
Policy *ent.StoragePolicy
FileID int // ID of the placeholder file
EntityID int // ID of the new entity
Callback string // 回调 URL 地址
CallbackSecret string // Callback secret
UploadID string // Multi-part upload ID
UploadURL string
Credential string
ChunkSize int64
SentinelTaskID int
NewFileCreated bool // If new file is created for this session
Importing bool // If the upload is importing from another file
UID int // 发起者
Policy *ent.StoragePolicy
FileID int // ID of the placeholder file
EntityID int // ID of the new entity
Callback string // 回调 URL 地址
CallbackSecret string // Callback secret
UploadID string // Multi-part upload ID
UploadURL string
Credential string
ChunkSize int64
SentinelTaskID int
NewFileCreated bool // If new file is created for this session
Importing bool // If the upload is importing from another file
EncryptMetadata *types.EncryptMetadata
LockToken string // Token of the locked placeholder file
Props *UploadProps
@ -287,8 +292,10 @@ type (
PreviousVersion string
// EntityType is the type of the entity to be created. If not set, a new file will be created
// with a default version entity. This will be set in update request for existing files.
EntityType *types.EntityType
ExpireAt time.Time
EntityType *types.EntityType
ExpireAt time.Time
EncryptionSupported []types.Cipher
ClientSideEncrypted bool // Whether the file stream is already encrypted by client side.
}
// FsOption options for underlying file system.
@ -698,6 +705,8 @@ func LockSessionToContext(ctx context.Context, session LockSession) context.Cont
return context.WithValue(ctx, LockSessionCtxKey{}, session)
}
// FindDesiredEntity finds the desired entity from the file.
// entityType is optional, if it is not nil, it will only return the entity with the given type.
func FindDesiredEntity(file File, version string, hasher hashid.Encoder, entityType *types.EntityType) (bool, Entity) {
if version == "" {
return true, file.PrimaryEntity()
@ -779,6 +788,14 @@ func (e *DbEntity) Model() *ent.Entity {
return e.model
}
func (e *DbEntity) Props() *types.EntityProps {
return e.model.Props
}
func (e *DbEntity) Encrypted() bool {
return e.model.Props != nil && e.model.Props.EncryptMetadata != nil
}
func NewEmptyEntity(u *ent.User) Entity {
return &DbEntity{
model: &ent.Entity{

@ -36,5 +36,11 @@ func (d *mimeDetector) TypeByName(p string) string {
return m
}
return mime.TypeByExtension(ext)
m := mime.TypeByExtension(ext)
if m != "" {
return m
}
// Fallback
return "application/octet-stream"
}

@ -25,6 +25,7 @@ const (
QuerySearchNameOpOr = "name_op_or"
QuerySearchUseOr = "use_or"
QuerySearchMetadataPrefix = "meta_"
QuerySearchMetadataExact = "exact_meta_"
QuerySearchCaseFolding = "case_folding"
QuerySearchType = "type"
QuerySearchTypeCategory = "category"
@ -218,7 +219,7 @@ func (u *URI) FileSystem() constants.FileSystemType {
func (u *URI) SearchParameters() *inventory.SearchFileParameters {
q := u.U.Query()
res := &inventory.SearchFileParameters{
Metadata: make(map[string]string),
Metadata: make([]inventory.MetadataFilter, 0),
}
withSearch := false
@ -252,7 +253,18 @@ func (u *URI) SearchParameters() *inventory.SearchFileParameters {
for k, v := range q {
if strings.HasPrefix(k, QuerySearchMetadataPrefix) {
res.Metadata[strings.TrimPrefix(k, QuerySearchMetadataPrefix)] = v[0]
res.Metadata = append(res.Metadata, inventory.MetadataFilter{
Key: strings.TrimPrefix(k, QuerySearchMetadataPrefix),
Value: v[0],
Exact: false,
})
withSearch = true
} else if strings.HasPrefix(k, QuerySearchMetadataExact) {
res.Metadata = append(res.Metadata, inventory.MetadataFilter{
Key: strings.TrimPrefix(k, QuerySearchMetadataExact),
Value: v[0],
Exact: true,
})
withSearch = true
}
}

@ -3,19 +3,153 @@ package manager
import (
"archive/zip"
"context"
"encoding/gob"
"fmt"
"io"
"path"
"path/filepath"
"strings"
"time"
"github.com/bodgit/sevenzip"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/dbfs"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/manager/entitysource"
"github.com/cloudreve/Cloudreve/v4/pkg/util"
"golang.org/x/text/encoding"
"golang.org/x/text/encoding/charmap"
"golang.org/x/text/encoding/japanese"
"golang.org/x/text/encoding/korean"
"golang.org/x/text/encoding/simplifiedchinese"
"golang.org/x/text/encoding/traditionalchinese"
"golang.org/x/text/encoding/unicode"
"golang.org/x/tools/container/intsets"
)
type (
ArchivedFile struct {
Name string `json:"name"`
Size int64 `json:"size"`
UpdatedAt *time.Time `json:"updated_at"`
IsDirectory bool `json:"is_directory"`
}
)
const (
ArchiveListCacheTTL = 3600 // 1 hour
)
func init() {
gob.Register([]ArchivedFile{})
}
var ZipEncodings = map[string]encoding.Encoding{
"ibm866": charmap.CodePage866,
"iso8859_2": charmap.ISO8859_2,
"iso8859_3": charmap.ISO8859_3,
"iso8859_4": charmap.ISO8859_4,
"iso8859_5": charmap.ISO8859_5,
"iso8859_6": charmap.ISO8859_6,
"iso8859_7": charmap.ISO8859_7,
"iso8859_8": charmap.ISO8859_8,
"iso8859_8I": charmap.ISO8859_8I,
"iso8859_10": charmap.ISO8859_10,
"iso8859_13": charmap.ISO8859_13,
"iso8859_14": charmap.ISO8859_14,
"iso8859_15": charmap.ISO8859_15,
"iso8859_16": charmap.ISO8859_16,
"koi8r": charmap.KOI8R,
"koi8u": charmap.KOI8U,
"macintosh": charmap.Macintosh,
"windows874": charmap.Windows874,
"windows1250": charmap.Windows1250,
"windows1251": charmap.Windows1251,
"windows1252": charmap.Windows1252,
"windows1253": charmap.Windows1253,
"windows1254": charmap.Windows1254,
"windows1255": charmap.Windows1255,
"windows1256": charmap.Windows1256,
"windows1257": charmap.Windows1257,
"windows1258": charmap.Windows1258,
"macintoshcyrillic": charmap.MacintoshCyrillic,
"gbk": simplifiedchinese.GBK,
"gb18030": simplifiedchinese.GB18030,
"big5": traditionalchinese.Big5,
"eucjp": japanese.EUCJP,
"iso2022jp": japanese.ISO2022JP,
"shiftjis": japanese.ShiftJIS,
"euckr": korean.EUCKR,
"utf16be": unicode.UTF16(unicode.BigEndian, unicode.IgnoreBOM),
"utf16le": unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM),
}
func (m *manager) ListArchiveFiles(ctx context.Context, uri *fs.URI, entity, zipEncoding string) ([]ArchivedFile, error) {
file, err := m.fs.Get(ctx, uri, dbfs.WithFileEntities(), dbfs.WithRequiredCapabilities(dbfs.NavigatorCapabilityDownloadFile))
if err != nil {
return nil, fmt.Errorf("failed to get file: %w", err)
}
if file.Type() != types.FileTypeFile {
return nil, fs.ErrNotSupportedAction.WithError(fmt.Errorf("path %s is not a file", uri))
}
// Validate file size
if m.user.Edges.Group.Settings.DecompressSize > 0 && file.Size() > m.user.Edges.Group.Settings.DecompressSize {
return nil, fs.ErrFileSizeTooBig.WithError(fmt.Errorf("file size %d exceeds the limit %d", file.Size(), m.user.Edges.Group.Settings.DecompressSize))
}
found, targetEntity := fs.FindDesiredEntity(file, entity, m.hasher, nil)
if !found {
return nil, fs.ErrEntityNotExist
}
var (
enc encoding.Encoding
ok bool
)
if zipEncoding != "" {
enc, ok = ZipEncodings[strings.ToLower(zipEncoding)]
if !ok {
return nil, fs.ErrNotSupportedAction.WithError(fmt.Errorf("not supported zip encoding: %s", zipEncoding))
}
}
cacheKey := getArchiveListCacheKey(targetEntity.ID(), zipEncoding)
kv := m.kv
res, found := kv.Get(cacheKey)
if found {
return res.([]ArchivedFile), nil
}
es, err := m.GetEntitySource(ctx, 0, fs.WithEntity(targetEntity))
if err != nil {
return nil, fmt.Errorf("failed to get entity source: %w", err)
}
es.Apply(entitysource.WithContext(ctx))
defer es.Close()
var readerFunc func(ctx context.Context, file io.ReaderAt, size int64, textEncoding encoding.Encoding) ([]ArchivedFile, error)
switch file.Ext() {
case "zip":
readerFunc = getZipFileList
case "7z":
readerFunc = get7zFileList
default:
return nil, fs.ErrNotSupportedAction.WithError(fmt.Errorf("not supported archive format: %s", file.Ext()))
}
sr := io.NewSectionReader(es, 0, targetEntity.Size())
fileList, err := readerFunc(ctx, sr, targetEntity.Size(), enc)
if err != nil {
return nil, fmt.Errorf("failed to read file list: %w", err)
}
kv.Set(cacheKey, fileList, ArchiveListCacheTTL)
return fileList, nil
}
func (m *manager) CreateArchive(ctx context.Context, uris []*fs.URI, writer io.Writer, opts ...fs.Option) (int, error) {
o := newOption()
for _, opt := range opts {
@ -122,3 +256,62 @@ func (m *manager) compressFileToArchive(ctx context.Context, parent string, file
return err
}
func getZipFileList(ctx context.Context, file io.ReaderAt, size int64, textEncoding encoding.Encoding) ([]ArchivedFile, error) {
zr, err := zip.NewReader(file, size)
if err != nil {
return nil, fmt.Errorf("failed to create zip reader: %w", err)
}
fileList := make([]ArchivedFile, 0, len(zr.File))
for _, f := range zr.File {
hdr := f.FileHeader
if hdr.NonUTF8 && textEncoding != nil {
dec := textEncoding.NewDecoder()
filename, err := dec.String(hdr.Name)
if err == nil {
hdr.Name = filename
}
if hdr.Comment != "" {
comment, err := dec.String(hdr.Comment)
if err == nil {
hdr.Comment = comment
}
}
}
info := f.FileInfo()
modTime := info.ModTime()
fileList = append(fileList, ArchivedFile{
Name: util.FormSlash(hdr.Name),
Size: info.Size(),
UpdatedAt: &modTime,
IsDirectory: info.IsDir(),
})
}
return fileList, nil
}
func get7zFileList(ctx context.Context, file io.ReaderAt, size int64, extEncoding encoding.Encoding) ([]ArchivedFile, error) {
zr, err := sevenzip.NewReader(file, size)
if err != nil {
return nil, fmt.Errorf("failed to create 7z reader: %w", err)
}
fileList := make([]ArchivedFile, 0, len(zr.File))
for _, f := range zr.File {
info := f.FileInfo()
modTime := info.ModTime()
fileList = append(fileList, ArchivedFile{
Name: util.FormSlash(f.Name),
Size: info.Size(),
UpdatedAt: &modTime,
IsDirectory: info.IsDir(),
})
}
return fileList, nil
}
func getArchiveListCacheKey(entity int, encoding string) string {
return fmt.Sprintf("archive_list_%d_%s", entity, encoding)
}

@ -120,7 +120,7 @@ func (m *manager) GetDirectLink(ctx context.Context, urls ...*fs.URI) ([]DirectL
}
source := entitysource.NewEntitySource(target, d, policy, m.auth, m.settings, m.hasher, m.dep.RequestClient(),
m.l, m.config, m.dep.MimeDetector(ctx))
m.l, m.config, m.dep.MimeDetector(ctx), m.dep.EncryptorFactory(ctx))
sourceUrl, err := source.Url(ctx,
entitysource.WithSpeedLimit(int64(m.user.Edges.Group.SpeedLimit)),
entitysource.WithDisplayName(file.Name()),
@ -168,7 +168,7 @@ func (m *manager) GetUrlForRedirectedDirectLink(ctx context.Context, dl *ent.Dir
)
// Try to read from cache.
cacheKey := entityUrlCacheKey(primaryEntity.ID(), int64(dl.Speed), dl.Name, false,
cacheKey := entityUrlCacheKey(primaryEntity.ID(), int64(dl.Speed), dl.Name, o.IsDownload,
m.settings.SiteURL(ctx).String())
if cached, ok := m.kv.Get(cacheKey); ok {
cachedItem := cached.(EntityUrlCache)
@ -182,10 +182,10 @@ func (m *manager) GetUrlForRedirectedDirectLink(ctx context.Context, dl *ent.Dir
}
source := entitysource.NewEntitySource(primaryEntity, d, policy, m.auth, m.settings, m.hasher, m.dep.RequestClient(),
m.l, m.config, m.dep.MimeDetector(ctx))
m.l, m.config, m.dep.MimeDetector(ctx), m.dep.EncryptorFactory(ctx))
downloadUrl, err := source.Url(ctx,
entitysource.WithExpire(o.Expire),
entitysource.WithDownload(false),
entitysource.WithDownload(o.IsDownload),
entitysource.WithSpeedLimit(int64(dl.Speed)),
entitysource.WithDisplayName(dl.Name),
)
@ -282,7 +282,7 @@ func (m *manager) GetEntityUrls(ctx context.Context, args []GetEntityUrlArgs, op
// Cache miss, Generate new url
source := entitysource.NewEntitySource(target, d, policy, m.auth, m.settings, m.hasher, m.dep.RequestClient(),
m.l, m.config, m.dep.MimeDetector(ctx))
m.l, m.config, m.dep.MimeDetector(ctx), m.dep.EncryptorFactory(ctx))
downloadUrl, err := source.Url(ctx,
entitysource.WithExpire(o.Expire),
entitysource.WithDownload(o.IsDownload),
@ -349,7 +349,7 @@ func (m *manager) GetEntitySource(ctx context.Context, entityID int, opts ...fs.
}
return entitysource.NewEntitySource(entity, handler, policy, m.auth, m.settings, m.hasher, m.dep.RequestClient(), m.l,
m.config, m.dep.MimeDetector(ctx), entitysource.WithContext(ctx), entitysource.WithThumb(o.IsThumb)), nil
m.config, m.dep.MimeDetector(ctx), m.dep.EncryptorFactory(ctx), entitysource.WithContext(ctx), entitysource.WithThumb(o.IsThumb)), nil
}
func (l *manager) SetCurrentVersion(ctx context.Context, path *fs.URI, version int) error {

@ -22,6 +22,7 @@ import (
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver/local"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/encrypt"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/mime"
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
@ -83,6 +84,7 @@ type EntitySourceOptions struct {
OneTimeDownloadKey string
Ctx context.Context
IsThumb bool
DisableCryptor bool
}
type EntityUrl struct {
@ -143,26 +145,39 @@ func WithThumb(isThumb bool) EntitySourceOption {
})
}
// WithDisableCryptor disable cryptor for file source, file stream will be
// presented as is.
func WithDisableCryptor() EntitySourceOption {
return EntitySourceOptionFunc(func(option any) {
option.(*EntitySourceOptions).DisableCryptor = true
})
}
func (f EntitySourceOptionFunc) Apply(option any) {
f(option)
}
type (
entitySource struct {
e fs.Entity
handler driver.Handler
policy *ent.StoragePolicy
generalAuth auth.Auth
settings setting.Provider
hasher hashid.Encoder
c request.Client
l logging.Logger
config conf.ConfigProvider
mime mime.MimeDetector
e fs.Entity
handler driver.Handler
policy *ent.StoragePolicy
generalAuth auth.Auth
settings setting.Provider
hasher hashid.Encoder
c request.Client
l logging.Logger
config conf.ConfigProvider
mime mime.MimeDetector
encryptorFactory encrypt.CryptorFactory
rsc io.ReadCloser
pos int64
o *EntitySourceOptions
// Cache for resetRequest URL and expiry
cachedUrl string
cachedExpiry time.Time
}
)
@ -193,20 +208,22 @@ func NewEntitySource(
l logging.Logger,
config conf.ConfigProvider,
mime mime.MimeDetector,
encryptorFactory encrypt.CryptorFactory,
opts ...EntitySourceOption,
) EntitySource {
s := &entitySource{
e: e,
handler: handler,
policy: policy,
generalAuth: generalAuth,
settings: settings,
hasher: hasher,
c: c,
config: config,
l: l,
mime: mime,
o: &EntitySourceOptions{},
e: e,
handler: handler,
policy: policy,
generalAuth: generalAuth,
settings: settings,
hasher: hasher,
c: c,
config: config,
l: l,
mime: mime,
encryptorFactory: encryptorFactory,
o: &EntitySourceOptions{},
}
for _, opt := range opts {
opt.Apply(s.o)
@ -215,6 +232,10 @@ func NewEntitySource(
}
func (f *entitySource) Apply(opts ...EntitySourceOption) {
if len(opts) > 0 {
// Clear cache when options are applied as they might affect URL generation
f.clearUrlCache()
}
for _, opt := range opts {
opt.Apply(f.o)
}
@ -229,7 +250,7 @@ func (f *entitySource) CloneToLocalSrc(t types.EntityType, src string) (EntitySo
policy := &ent.StoragePolicy{Type: types.PolicyTypeLocal}
handler := local.New(policy, f.l, f.config)
newSrc := NewEntitySource(e, handler, policy, f.generalAuth, f.settings, f.hasher, f.c, f.l, f.config, f.mime).(*entitySource)
newSrc := NewEntitySource(e, handler, policy, f.generalAuth, f.settings, f.hasher, f.c, f.l, f.config, f.mime, f.encryptorFactory).(*entitySource)
newSrc.o = f.o
return newSrc, nil
}
@ -247,6 +268,10 @@ func (f *entitySource) LocalPath(ctx context.Context) string {
}
func (f *entitySource) Serve(w http.ResponseWriter, r *http.Request, opts ...EntitySourceOption) {
if len(opts) > 0 {
// Clear cache when options are applied as they might affect URL generation
f.clearUrlCache()
}
for _, opt := range opts {
opt.Apply(f.o)
}
@ -316,6 +341,20 @@ func (f *entitySource) Serve(w http.ResponseWriter, r *http.Request, opts ...Ent
response.Header.Del("ETag")
response.Header.Del("Content-Disposition")
response.Header.Del("Cache-Control")
// If the response is successful, decrypt the body if needed
if response.StatusCode >= 200 && response.StatusCode < 300 {
// Parse offset from Content-Range header if present
offset := parseContentRangeOffset(response.Header.Get("Content-Range"))
body, err := f.getDecryptedRsc(response.Body, offset)
if err != nil {
return fmt.Errorf("failed to get decrypted rsc: %w", err)
}
response.Body = body
}
logging.Request(f.l,
false,
response.StatusCode,
@ -478,16 +517,22 @@ func (f *entitySource) Read(p []byte) (n int, err error) {
}
func (f *entitySource) ReadAt(p []byte, off int64) (n int, err error) {
if f.IsLocal() {
if f.rsc == nil {
err = f.resetRequest()
}
if readAt, ok := f.rsc.(io.ReaderAt); ok {
return readAt.ReadAt(p, off)
if f.rsc == nil {
err = f.resetRequest()
if err != nil {
return 0, err
}
}
if readAt, ok := f.rsc.(io.ReaderAt); ok {
return readAt.ReadAt(p, off)
}
return 0, errors.New("source does not support ReadAt")
// For non-local sources, use HTTP range request to read at specific offset
rsc, err := f.getRsc(off)
if err != nil {
return 0, err
}
return io.ReadFull(rsc, p)
}
func (f *entitySource) Seek(offset int64, whence int) (int64, error) {
@ -524,16 +569,26 @@ func (f *entitySource) Close() error {
return nil
}
// clearUrlCache clears the cached URL and expiry
func (f *entitySource) clearUrlCache() {
f.cachedUrl = ""
f.cachedExpiry = time.Time{}
}
func (f *entitySource) ShouldInternalProxy(opts ...EntitySourceOption) bool {
for _, opt := range opts {
opt.Apply(f.o)
}
handlerCapability := f.handler.Capabilities()
return f.e.ID() == 0 || handlerCapability.StaticFeatures.Enabled(int(driver.HandlerCapabilityProxyRequired)) ||
f.policy.Settings.InternalProxy && !f.o.NoInternalProxy
(f.policy.Settings.InternalProxy || f.e.Encrypted()) && !f.o.NoInternalProxy
}
func (f *entitySource) Url(ctx context.Context, opts ...EntitySourceOption) (*EntityUrl, error) {
if len(opts) > 0 {
// Clear cache when options are applied as they might affect URL generation
f.clearUrlCache()
}
for _, opt := range opts {
opt.Apply(f.o)
}
@ -554,6 +609,7 @@ func (f *entitySource) Url(ctx context.Context, opts ...EntitySourceOption) (*En
// 1. Internal proxy is required by driver's definition
// 2. Internal proxy is enabled in Policy setting and not disabled by option
// 3. It's an empty entity.
// 4. The entity is encrypted and internal proxy not disabled by option
handlerCapability := f.handler.Capabilities()
if f.ShouldInternalProxy() {
siteUrl := f.settings.SiteURL(ctx)
@ -613,50 +669,105 @@ func (f *entitySource) Url(ctx context.Context, opts ...EntitySourceOption) (*En
func (f *entitySource) resetRequest() error {
// For inbound files, we can use the handler to open the file directly
if f.IsLocal() && f.rsc != nil {
return nil
}
rsc, err := f.getRsc(f.pos)
if err != nil {
return fmt.Errorf("failed to get rsc: %w", err)
}
f.rsc = rsc
return nil
}
func (f *entitySource) getRsc(pos int64) (io.ReadCloser, error) {
// For inbound files, we can use the handler to open the file directly
var rsc io.ReadCloser
if f.IsLocal() {
if f.rsc == nil {
file, err := f.handler.Open(f.o.Ctx, f.e.Source())
file, err := f.handler.Open(f.o.Ctx, f.e.Source())
if err != nil {
return nil, fmt.Errorf("failed to open inbound file: %w", err)
}
if pos > 0 {
_, err = file.Seek(pos, io.SeekStart)
if err != nil {
return fmt.Errorf("failed to open inbound file: %w", err)
return nil, fmt.Errorf("failed to seek inbound file: %w", err)
}
}
if f.pos > 0 {
_, err = file.Seek(f.pos, io.SeekStart)
if err != nil {
return fmt.Errorf("failed to seek inbound file: %w", err)
}
if f.o.SpeedLimit > 0 {
bucket := ratelimit.NewBucketWithRate(float64(f.o.SpeedLimit), f.o.SpeedLimit)
rsc = lrs{file, ratelimit.Reader(file, bucket)}
} else {
rsc = file
}
} else {
var urlStr string
now := time.Now()
// Check if we have a valid cached URL and expiry
if f.cachedUrl != "" && now.Before(f.cachedExpiry.Add(-time.Minute)) {
// Use cached URL if it's still valid (with 1 minute buffer before expiry)
urlStr = f.cachedUrl
} else {
// Generate new URL and cache it
expire := now.Add(defaultUrlExpire)
u, err := f.Url(driver.WithForcePublicEndpoint(f.o.Ctx, false), WithNoInternalProxy(), WithExpire(&expire))
if err != nil {
return nil, fmt.Errorf("failed to generate download url: %w", err)
}
f.rsc = file
// Cache the URL and expiry
f.cachedUrl = u.Url
f.cachedExpiry = expire
urlStr = u.Url
}
if f.o.SpeedLimit > 0 {
bucket := ratelimit.NewBucketWithRate(float64(f.o.SpeedLimit), f.o.SpeedLimit)
f.rsc = lrs{f.rsc, ratelimit.Reader(f.rsc, bucket)}
}
h := http.Header{}
h.Set("Range", fmt.Sprintf("bytes=%d-", pos))
resp := f.c.Request(http.MethodGet, urlStr, nil,
request.WithContext(f.o.Ctx),
request.WithLogger(f.l),
request.WithHeader(h),
).CheckHTTPResponse(http.StatusOK, http.StatusPartialContent)
if resp.Err != nil {
return nil, fmt.Errorf("failed to request download url: %w", resp.Err)
}
return nil
rsc = resp.Response.Body
}
expire := time.Now().Add(defaultUrlExpire)
u, err := f.Url(driver.WithForcePublicEndpoint(f.o.Ctx, false), WithNoInternalProxy(), WithExpire(&expire))
var err error
rsc, err = f.getDecryptedRsc(rsc, pos)
if err != nil {
return fmt.Errorf("failed to generate download url: %w", err)
return nil, fmt.Errorf("failed to get decrypted rsc: %w", err)
}
h := http.Header{}
h.Set("Range", fmt.Sprintf("bytes=%d-", f.pos))
resp := f.c.Request(http.MethodGet, u.Url, nil,
request.WithContext(f.o.Ctx),
request.WithLogger(f.l),
request.WithHeader(h),
).CheckHTTPResponse(http.StatusOK, http.StatusPartialContent)
if resp.Err != nil {
return fmt.Errorf("failed to request download url: %w", resp.Err)
return rsc, nil
}
func (f *entitySource) getDecryptedRsc(rsc io.ReadCloser, pos int64) (io.ReadCloser, error) {
props := f.e.Props()
if props != nil && props.EncryptMetadata != nil && !f.o.DisableCryptor {
cryptor, err := f.encryptorFactory(props.EncryptMetadata.Algorithm)
if err != nil {
return nil, fmt.Errorf("failed to create decryptor: %w", err)
}
err = cryptor.LoadMetadata(f.o.Ctx, props.EncryptMetadata)
if err != nil {
return nil, fmt.Errorf("failed to load metadata: %w", err)
}
if err := cryptor.SetSource(rsc, nil, f.e.Size(), pos); err != nil {
return nil, fmt.Errorf("failed to set source: %w", err)
}
return cryptor, nil
}
f.rsc = resp.Response.Body
return nil
return rsc, nil
}
// capExpireTime make sure expire time is not too long or too short (if min or max is set)
@ -949,6 +1060,33 @@ func sumRangesSize(ranges []httpRange) (size int64) {
return
}
// parseContentRangeOffset parses the start offset from a Content-Range header.
// Content-Range format: "bytes start-end/total" (e.g., "bytes 100-200/1000")
// Returns 0 if the header is empty, invalid, or cannot be parsed.
func parseContentRangeOffset(contentRange string) int64 {
if contentRange == "" {
return 0
}
// Content-Range format: "bytes start-end/total"
if !strings.HasPrefix(contentRange, "bytes ") {
return 0
}
rangeSpec := strings.TrimPrefix(contentRange, "bytes ")
dashPos := strings.Index(rangeSpec, "-")
if dashPos <= 0 {
return 0
}
start, err := strconv.ParseInt(rangeSpec[:dashPos], 10, 64)
if err != nil {
return 0
}
return start
}
// countingWriter counts how many bytes have been written to it.
type countingWriter int64

@ -8,6 +8,7 @@ import (
"github.com/cloudreve/Cloudreve/v4/pkg/cluster"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver/cos"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver/ks3"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver/local"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver/obs"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver/onedrive"
@ -73,6 +74,8 @@ func (m *manager) GetStorageDriver(ctx context.Context, policy *ent.StoragePolic
return cos.New(ctx, policy, m.settings, m.config, m.l, m.dep.MimeDetector(ctx))
case types.PolicyTypeS3:
return s3.New(ctx, policy, m.settings, m.config, m.l, m.dep.MimeDetector(ctx))
case types.PolicyTypeKs3:
return ks3.New(ctx, policy, m.settings, m.config, m.l, m.dep.MimeDetector(ctx))
case types.PolicyTypeObs:
return obs.New(ctx, policy, m.settings, m.config, m.l, m.dep.MimeDetector(ctx))
case types.PolicyTypeQiniu:

@ -85,7 +85,10 @@ type (
}
Archiver interface {
// CreateArchive creates an archive
CreateArchive(ctx context.Context, uris []*fs.URI, writer io.Writer, opts ...fs.Option) (int, error)
// ListArchiveFiles lists files in an archive
ListArchiveFiles(ctx context.Context, uri *fs.URI, entity, zipEncoding string) ([]ArchivedFile, error)
}
FileManager interface {
@ -144,7 +147,8 @@ func NewFileManager(dep dependency.Dep, u *ent.User) FileManager {
user: u,
settings: dep.SettingProvider(),
fs: dbfs.NewDatabaseFS(u, dep.FileClient(), dep.ShareClient(), dep.Logger(), dep.LockSystem(),
dep.SettingProvider(), dep.StoragePolicyClient(), dep.HashIDEncoder(), dep.UserClient(), dep.KV(), dep.NavigatorStateKV(), dep.DirectLinkClient()),
dep.SettingProvider(), dep.StoragePolicyClient(), dep.HashIDEncoder(), dep.UserClient(), dep.KV(), dep.NavigatorStateKV(),
dep.DirectLinkClient(), dep.EncryptorFactory(context.TODO())),
kv: dep.KV(),
config: config,
auth: dep.GeneralAuth(),

@ -14,6 +14,7 @@ import (
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/dbfs"
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
"github.com/cloudreve/Cloudreve/v4/pkg/mediameta"
"github.com/cloudreve/Cloudreve/v4/pkg/queue"
"github.com/cloudreve/Cloudreve/v4/pkg/util"
"github.com/samber/lo"
@ -106,6 +107,11 @@ func (m *manager) ExtractAndSaveMediaMeta(ctx context.Context, uri *fs.URI, enti
return nil
}
language := ""
if file.Owner().Settings != nil {
language = file.Owner().Settings.Language
}
var (
metas []driver.MediaMeta
)
@ -117,7 +123,7 @@ func (m *manager) ExtractAndSaveMediaMeta(ctx context.Context, uri *fs.URI, enti
driverCaps := d.Capabilities()
if util.IsInExtensionList(driverCaps.MediaMetaSupportedExts, file.Name()) {
m.l.Debug("Using native driver to generate media meta.")
metas, err = d.MediaMeta(ctx, targetVersion.Source(), file.Ext())
metas, err = d.MediaMeta(ctx, targetVersion.Source(), file.Ext(), language)
if err != nil {
return fmt.Errorf("failed to get media meta using native driver: %w", err)
}
@ -130,7 +136,7 @@ func (m *manager) ExtractAndSaveMediaMeta(ctx context.Context, uri *fs.URI, enti
return fmt.Errorf("failed to get entity source: %w", err)
}
metas, err = extractor.Extract(ctx, file.Ext(), source)
metas, err = extractor.Extract(ctx, file.Ext(), source, mediameta.WithLanguage(language))
if err != nil {
return fmt.Errorf("failed to extract media meta using local extractor: %w", err)
}

@ -5,14 +5,18 @@ import (
"crypto/sha1"
"encoding/json"
"fmt"
"strconv"
"strings"
"github.com/cloudreve/Cloudreve/v4/application/constants"
"github.com/cloudreve/Cloudreve/v4/application/dependency"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/dbfs"
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
"github.com/go-playground/validator/v10"
"strings"
"github.com/samber/lo"
)
type (
@ -20,13 +24,14 @@ type (
)
const (
wildcardMetadataKey = "*"
customizeMetadataSuffix = "customize"
tagMetadataSuffix = "tag"
iconColorMetadataKey = customizeMetadataSuffix + ":icon_color"
emojiIconMetadataKey = customizeMetadataSuffix + ":emoji"
shareOwnerMetadataKey = dbfs.MetadataSysPrefix + "shared_owner"
shareRedirectMetadataKey = dbfs.MetadataSysPrefix + "shared_redirect"
wildcardMetadataKey = "*"
customizeMetadataSuffix = "customize"
tagMetadataSuffix = "tag"
customPropsMetadataSuffix = "props"
iconColorMetadataKey = customizeMetadataSuffix + ":icon_color"
emojiIconMetadataKey = customizeMetadataSuffix + ":emoji"
shareOwnerMetadataKey = dbfs.MetadataSysPrefix + "shared_owner"
shareRedirectMetadataKey = dbfs.MetadataSysPrefix + "shared_redirect"
)
var (
@ -38,6 +43,8 @@ var (
// validateColor validates a color value
validateColor = func(optional bool) metadataValidator {
return func(ctx context.Context, m *manager, patch *fs.MetadataPatch) error {
patch.UpdateModifiedAt = true
if patch.Remove {
return nil
}
@ -62,6 +69,8 @@ var (
return fmt.Errorf("cannot remove system metadata")
}
patch.UpdateModifiedAt = true
dep := dependency.FromContext(ctx)
// Validate share owner is valid hashid
if patch.Key == shareOwnerMetadataKey {
@ -88,9 +97,20 @@ var (
},
},
"dav": {},
// Allow manipulating thumbnail metadata via public PatchMetadata API
"thumb": {
// Only supported thumb metadata currently is thumb:disabled
dbfs.ThumbDisabledKey: func(ctx context.Context, m *manager, patch *fs.MetadataPatch) error {
// Presence of this key disables thumbnails; value is ignored.
// We allow both setting and removing this key.
return nil
},
},
customizeMetadataSuffix: {
iconColorMetadataKey: validateColor(false),
emojiIconMetadataKey: func(ctx context.Context, m *manager, patch *fs.MetadataPatch) error {
patch.UpdateModifiedAt = true
if patch.Remove {
return nil
}
@ -120,6 +140,8 @@ var (
},
tagMetadataSuffix: {
wildcardMetadataKey: func(ctx context.Context, m *manager, patch *fs.MetadataPatch) error {
patch.UpdateModifiedAt = true
if err := validateColor(true)(ctx, m, patch); err != nil {
return err
}
@ -131,44 +153,170 @@ var (
return nil
},
},
customPropsMetadataSuffix: {
wildcardMetadataKey: func(ctx context.Context, m *manager, patch *fs.MetadataPatch) error {
patch.UpdateModifiedAt = true
if patch.Remove {
return nil
}
customProps := m.settings.CustomProps(ctx)
propId := strings.TrimPrefix(patch.Key, customPropsMetadataSuffix+":")
for _, prop := range customProps {
if prop.ID == propId {
switch prop.Type {
case types.CustomPropsTypeText:
if prop.Min > 0 && prop.Min > len(patch.Value) {
return fmt.Errorf("value is too short")
}
if prop.Max > 0 && prop.Max < len(patch.Value) {
return fmt.Errorf("value is too long")
}
return nil
case types.CustomPropsTypeRating:
if patch.Value == "" {
return nil
}
// validate the value is a number
rating, err := strconv.Atoi(patch.Value)
if err != nil {
return fmt.Errorf("value is not a number")
}
if prop.Max < rating {
return fmt.Errorf("value is too large")
}
return nil
case types.CustomPropsTypeNumber:
if patch.Value == "" {
return nil
}
value, err := strconv.Atoi(patch.Value)
if err != nil {
return fmt.Errorf("value is not a number")
}
if prop.Min > value {
return fmt.Errorf("value is too small")
}
if prop.Max > 0 && prop.Max < value {
return fmt.Errorf("value is too large")
}
return nil
case types.CustomPropsTypeBoolean:
if patch.Value == "" {
return nil
}
if patch.Value != "true" && patch.Value != "false" {
return fmt.Errorf("value is not a boolean")
}
return nil
case types.CustomPropsTypeSelect:
if patch.Value == "" {
return nil
}
for _, option := range prop.Options {
if option == patch.Value {
return nil
}
}
return fmt.Errorf("invalid option")
case types.CustomPropsTypeMultiSelect:
if patch.Value == "" {
return nil
}
var values []string
if err := json.Unmarshal([]byte(patch.Value), &values); err != nil {
return fmt.Errorf("invalid multi select value: %w", err)
}
// make sure all values are in the options
for _, value := range values {
if !lo.Contains(prop.Options, value) {
return fmt.Errorf("invalid option")
}
}
return nil
case types.CustomPropsTypeLink:
if patch.Value == "" {
return nil
}
if prop.Min > 0 && len(patch.Value) < prop.Min {
return fmt.Errorf("value is too small")
}
if prop.Max > 0 && len(patch.Value) > prop.Max {
return fmt.Errorf("value is too large")
}
return nil
default:
return nil
}
}
}
return fmt.Errorf("unkown custom props")
},
},
}
)
func (m *manager) PatchMedata(ctx context.Context, path []*fs.URI, data ...fs.MetadataPatch) error {
if err := m.validateMetadata(ctx, data...); err != nil {
data, err := m.validateMetadata(ctx, data...)
if err != nil {
return err
}
return m.fs.PatchMetadata(ctx, path, data...)
}
func (m *manager) validateMetadata(ctx context.Context, data ...fs.MetadataPatch) error {
func (m *manager) validateMetadata(ctx context.Context, data ...fs.MetadataPatch) ([]fs.MetadataPatch, error) {
validated := make([]fs.MetadataPatch, 0, len(data))
for _, patch := range data {
category := strings.Split(patch.Key, ":")
if len(category) < 2 {
return serializer.NewError(serializer.CodeParamErr, "Invalid metadata key", nil)
return validated, serializer.NewError(serializer.CodeParamErr, "Invalid metadata key", nil)
}
categoryValidators, ok := validators[category[0]]
if !ok {
return serializer.NewError(serializer.CodeParamErr, "Invalid metadata key",
return validated, serializer.NewError(serializer.CodeParamErr, "Invalid metadata key",
fmt.Errorf("unknown category: %s", category[0]))
}
// Explicit validators
if v, ok := categoryValidators[patch.Key]; ok {
if err := v(ctx, m, &patch); err != nil {
return serializer.NewError(serializer.CodeParamErr, "Invalid metadata patch", err)
return validated, serializer.NewError(serializer.CodeParamErr, "Invalid metadata patch", err)
}
}
// Wildcard validators
if v, ok := categoryValidators[wildcardMetadataKey]; ok {
if err := v(ctx, m, &patch); err != nil {
return serializer.NewError(serializer.CodeParamErr, "Invalid metadata patch", err)
return validated, serializer.NewError(serializer.CodeParamErr, "Invalid metadata patch", err)
}
}
validated = append(validated, patch)
}
return nil
return validated, nil
}

@ -115,7 +115,7 @@ func (m *manager) Create(ctx context.Context, path *fs.URI, fileType types.FileT
isSymbolic := false
if o.Metadata != nil {
if err := m.validateMetadata(ctx, lo.MapToSlice(o.Metadata, func(key string, value string) fs.MetadataPatch {
_, err := m.validateMetadata(ctx, lo.MapToSlice(o.Metadata, func(key string, value string) fs.MetadataPatch {
if key == shareRedirectMetadataKey {
isSymbolic = true
}
@ -124,7 +124,8 @@ func (m *manager) Create(ctx context.Context, path *fs.URI, fileType types.FileT
Key: key,
Value: value,
}
})...); err != nil {
})...)
if err != nil {
return nil, err
}
}

@ -222,7 +222,7 @@ func (m *manager) RecycleEntities(ctx context.Context, force bool, entityIDs ...
toBeDeletedSrc := lo.Map(lo.Filter(chunk, func(item fs.Entity, index int) bool {
// Only delete entities that are not marked as "unlink only"
return item.Model().RecycleOptions == nil || !item.Model().RecycleOptions.UnlinkOnly
return item.Model().Props == nil || !item.Model().Props.UnlinkOnly
}), func(entity fs.Entity, index int) string {
return entity.Source()
})
@ -311,6 +311,7 @@ func CronCollectTrashBin(ctx context.Context) {
res, err := fm.fs.AllFilesInTrashBin(ctx, fs.WithPageSize(pageSize))
if err != nil {
l.Error("Failed to get files in trash bin: %s", err)
return
}
expired := lo.Filter(res.Files, func(file fs.File, index int) bool {

@ -4,8 +4,8 @@ import (
"context"
"errors"
"fmt"
"github.com/cloudreve/Cloudreve/v4/pkg/thumb"
"os"
"path"
"runtime"
"time"
@ -18,6 +18,7 @@ import (
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/manager/entitysource"
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
"github.com/cloudreve/Cloudreve/v4/pkg/queue"
"github.com/cloudreve/Cloudreve/v4/pkg/thumb"
"github.com/cloudreve/Cloudreve/v4/pkg/util"
"github.com/samber/lo"
)
@ -64,7 +65,8 @@ func (m *manager) Thumbnail(ctx context.Context, uri *fs.URI) (entitysource.Enti
capabilities := handler.Capabilities()
// Check if file extension and size is supported by native policy generator.
if capabilities.ThumbSupportAllExts || util.IsInExtensionList(capabilities.ThumbSupportedExts, file.DisplayName()) &&
(capabilities.ThumbMaxSize == 0 || latest.Size() <= capabilities.ThumbMaxSize) {
(capabilities.ThumbMaxSize == 0 || latest.Size() <= capabilities.ThumbMaxSize) &&
!latest.Encrypted() {
thumbSource, err := m.GetEntitySource(ctx, 0, fs.WithEntity(latest), fs.WithUseThumb(true))
if err != nil {
return nil, fmt.Errorf("failed to get latest entity source: %w", err)
@ -182,14 +184,9 @@ func (m *manager) generateThumb(ctx context.Context, uri *fs.URI, ext string, es
entityType := types.EntityTypeThumbnail
req := &fs.UploadRequest{
Props: &fs.UploadProps{
Uri: uri,
Size: fileInfo.Size(),
SavePath: fmt.Sprintf(
"%s.%s%s",
es.Entity().Source(),
util.RandStringRunes(16),
m.settings.ThumbEntitySuffix(ctx),
),
Uri: uri,
Size: fileInfo.Size(),
SavePath: path.Clean(util.ReplaceMagicVar(m.settings.ThumbEntitySuffix(ctx), fs.Separator, true, true, time.Now(), m.user.ID, uri.Name(), uri.Path(), es.Entity().Source())),
MimeType: m.dep.MimeDetector(ctx).TypeByName("thumb.jpg"),
EntityType: &entityType,
},

@ -29,7 +29,7 @@ type (
// ConfirmUploadSession confirms whether upload session is valid for upload.
ConfirmUploadSession(ctx context.Context, session *fs.UploadSession, chunkIndex int) (fs.File, error)
// Upload uploads file data to storage
Upload(ctx context.Context, req *fs.UploadRequest, policy *ent.StoragePolicy) error
Upload(ctx context.Context, req *fs.UploadRequest, policy *ent.StoragePolicy, session *fs.UploadSession) error
// CompleteUpload completes upload session and returns file object
CompleteUpload(ctx context.Context, session *fs.UploadSession) (fs.File, error)
// CancelUploadSession cancels upload session
@ -55,7 +55,7 @@ func (m *manager) CreateUploadSession(ctx context.Context, req *fs.UploadRequest
// Validate metadata
if req.Props.Metadata != nil {
if err := m.validateMetadata(ctx, lo.MapToSlice(req.Props.Metadata, func(key string, value string) fs.MetadataPatch {
if _, err := m.validateMetadata(ctx, lo.MapToSlice(req.Props.Metadata, func(key string, value string) fs.MetadataPatch {
return fs.MetadataPatch{
Key: key,
Value: value,
@ -93,7 +93,8 @@ func (m *manager) CreateUploadSession(ctx context.Context, req *fs.UploadRequest
uploadSession.ChunkSize = uploadSession.Policy.Settings.ChunkSize
// Create upload credential for underlying storage driver
credential := &fs.UploadCredential{}
if !uploadSession.Policy.Settings.Relay || m.stateless {
unrelayed := !uploadSession.Policy.Settings.Relay || m.stateless
if unrelayed {
credential, err = d.Token(ctx, uploadSession, req)
if err != nil {
m.OnUploadFailed(ctx, uploadSession)
@ -103,12 +104,18 @@ func (m *manager) CreateUploadSession(ctx context.Context, req *fs.UploadRequest
// For relayed upload, we don't need to create credential
uploadSession.ChunkSize = 0
credential.ChunkSize = 0
credential.EncryptMetadata = nil
uploadSession.Props.ClientSideEncrypted = false
}
credential.SessionID = uploadSession.Props.UploadSessionID
credential.Expires = req.Props.ExpireAt.Unix()
credential.StoragePolicy = uploadSession.Policy
credential.CallbackSecret = uploadSession.CallbackSecret
credential.Uri = uploadSession.Props.Uri.String()
credential.EncryptMetadata = uploadSession.EncryptMetadata
if !unrelayed {
credential.EncryptMetadata = nil
}
// If upload sentinel check is required, queue a check task
if d.Capabilities().StaticFeatures.Enabled(int(driver.HandlerCapabilityUploadSentinelRequired)) {
@ -178,12 +185,34 @@ func (m *manager) PrepareUpload(ctx context.Context, req *fs.UploadRequest, opts
return m.fs.PrepareUpload(ctx, req, opts...)
}
func (m *manager) Upload(ctx context.Context, req *fs.UploadRequest, policy *ent.StoragePolicy) error {
func (m *manager) Upload(ctx context.Context, req *fs.UploadRequest, policy *ent.StoragePolicy, session *fs.UploadSession) error {
d, err := m.GetStorageDriver(ctx, m.CastStoragePolicyOnSlave(ctx, policy))
if err != nil {
return err
}
if session != nil && session.EncryptMetadata != nil && !req.Props.ClientSideEncrypted {
cryptor, err := m.dep.EncryptorFactory(ctx)(session.EncryptMetadata.Algorithm)
if err != nil {
return fmt.Errorf("failed to create cryptor: %w", err)
}
err = cryptor.LoadMetadata(ctx, session.EncryptMetadata)
if err != nil {
return fmt.Errorf("failed to load encrypt metadata: %w", err)
}
if err := cryptor.SetSource(req.File, req.Seeker, req.Props.Size, 0); err != nil {
return fmt.Errorf("failed to set source: %w", err)
}
req.File = cryptor
if req.Seeker != nil {
req.Seeker = cryptor
}
}
if err := d.Put(ctx, req); err != nil {
return serializer.NewError(serializer.CodeIOFailed, "Failed to upload file", err)
}
@ -301,6 +330,8 @@ func (m *manager) Update(ctx context.Context, req *fs.UploadRequest, opts ...fs.
}
req.Props.UploadSessionID = uuid.Must(uuid.NewV4()).String()
// Sever side supported encryption algorithms
req.Props.EncryptionSupported = []types.Cipher{types.CipherAES256CTR}
if m.stateless {
return m.updateStateless(ctx, req, o)
@ -312,7 +343,7 @@ func (m *manager) Update(ctx context.Context, req *fs.UploadRequest, opts ...fs.
return nil, fmt.Errorf("faield to prepare uplaod: %w", err)
}
if err := m.Upload(ctx, req, uploadSession.Policy); err != nil {
if err := m.Upload(ctx, req, uploadSession.Policy, uploadSession); err != nil {
m.OnUploadFailed(ctx, uploadSession)
return nil, fmt.Errorf("failed to upload new entity: %w", err)
}
@ -368,7 +399,7 @@ func (m *manager) updateStateless(ctx context.Context, req *fs.UploadRequest, o
}
req.Props = res.Req.Props
if err := m.Upload(ctx, req, res.Session.Policy); err != nil {
if err := m.Upload(ctx, req, res.Session.Policy, res.Session); err != nil {
if err := o.Node.OnUploadFailed(ctx, &fs.StatelessOnUploadFailedService{
UploadSession: res.Session,
UserID: o.StatelessUserID,

@ -18,6 +18,7 @@ import (
"github.com/cloudreve/Cloudreve/v4/inventory"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/cluster"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/encrypt"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/manager"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/manager/entitysource"
@ -217,11 +218,18 @@ func (m *CreateArchiveTask) listEntitiesAndSendToSlave(ctx context.Context, dep
user := inventory.UserFromContext(ctx)
fm := manager.NewFileManager(dep, user)
storagePolicyClient := dep.StoragePolicyClient()
masterKey, _ := dep.MasterEncryptKeyVault(ctx).GetMasterKey(ctx)
failed, err := fm.CreateArchive(ctx, uris, io.Discard,
fs.WithDryRun(func(name string, e fs.Entity) {
entityModel, err := decryptEntityKeyIfNeeded(masterKey, e.Model())
if err != nil {
m.l.Warning("Failed to decrypt entity key for %q: %s", name, err)
return
}
payload.Entities = append(payload.Entities, SlaveCreateArchiveEntity{
Entity: e.Model(),
Entity: entityModel,
Path: name,
})
if _, ok := payload.Policies[e.PolicyID()]; !ok {
@ -680,3 +688,18 @@ func (m *SlaveCreateArchiveTask) Progress(ctx context.Context) queue.Progresses
return m.progress
}
func decryptEntityKeyIfNeeded(masterKey []byte, entity *ent.Entity) (*ent.Entity, error) {
if entity.Props == nil || entity.Props.EncryptMetadata == nil || entity.Props.EncryptMetadata.KeyPlainText != nil {
return entity, nil
}
decryptedKey, err := encrypt.DecryptWithMasterKey(masterKey, entity.Props.EncryptMetadata.Key)
if err != nil {
return nil, fmt.Errorf("failed to decrypt entity key: %w", err)
}
entity.Props.EncryptMetadata.KeyPlainText = decryptedKey
entity.Props.EncryptMetadata.Key = nil
return entity, nil
}

@ -26,7 +26,7 @@ import (
"github.com/cloudreve/Cloudreve/v4/pkg/queue"
"github.com/cloudreve/Cloudreve/v4/pkg/util"
"github.com/gofrs/uuid"
"github.com/mholt/archiver/v4"
"github.com/mholt/archives"
)
type (
@ -40,13 +40,15 @@ type (
}
ExtractArchiveTaskPhase string
ExtractArchiveTaskState struct {
Uri string `json:"uri,omitempty"`
Encoding string `json:"encoding,omitempty"`
Dst string `json:"dst,omitempty"`
TempPath string `json:"temp_path,omitempty"`
TempZipFilePath string `json:"temp_zip_file_path,omitempty"`
ProcessedCursor string `json:"processed_cursor,omitempty"`
SlaveTaskID int `json:"slave_task_id,omitempty"`
Uri string `json:"uri,omitempty"`
Encoding string `json:"encoding,omitempty"`
Dst string `json:"dst,omitempty"`
TempPath string `json:"temp_path,omitempty"`
TempZipFilePath string `json:"temp_zip_file_path,omitempty"`
ProcessedCursor string `json:"processed_cursor,omitempty"`
SlaveTaskID int `json:"slave_task_id,omitempty"`
Password string `json:"password,omitempty"`
FileMask []string `json:"file_mask,omitempty"`
NodeState `json:",inline"`
Phase ExtractArchiveTaskPhase `json:"phase,omitempty"`
}
@ -71,12 +73,14 @@ func init() {
}
// NewExtractArchiveTask creates a new ExtractArchiveTask
func NewExtractArchiveTask(ctx context.Context, src, dst, encoding string) (queue.Task, error) {
func NewExtractArchiveTask(ctx context.Context, src, dst, encoding, password string, mask []string) (queue.Task, error) {
state := &ExtractArchiveTaskState{
Uri: src,
Dst: dst,
Encoding: encoding,
NodeState: NodeState{},
Password: password,
FileMask: mask,
}
stateBytes, err := json.Marshal(state)
if err != nil {
@ -190,13 +194,21 @@ func (m *ExtractArchiveTask) createSlaveExtractTask(ctx context.Context, dep dep
return task.StatusError, fmt.Errorf("failed to get policy: %w", err)
}
masterKey, _ := dep.MasterEncryptKeyVault(ctx).GetMasterKey(ctx)
entityModel, err := decryptEntityKeyIfNeeded(masterKey, archiveFile.PrimaryEntity().Model())
if err != nil {
return task.StatusError, fmt.Errorf("failed to decrypt entity key for archive file %q: %s", archiveFile.DisplayName(), err)
}
payload := &SlaveExtractArchiveTaskState{
FileName: archiveFile.DisplayName(),
Entity: archiveFile.PrimaryEntity().Model(),
Entity: entityModel,
Policy: policy,
Encoding: m.state.Encoding,
Dst: m.state.Dst,
UserID: user.ID,
Password: m.state.Password,
FileMask: m.state.FileMask,
}
payloadStr, err := json.Marshal(payload)
@ -277,20 +289,21 @@ func (m *ExtractArchiveTask) masterExtractArchive(ctx context.Context, dep depen
m.l.Info("Extracting archive %q to %q", uri, m.state.Dst)
// Identify file format
format, readStream, err := archiver.Identify(archiveFile.DisplayName(), es)
format, readStream, err := archives.Identify(ctx, archiveFile.DisplayName(), es)
if err != nil {
return task.StatusError, fmt.Errorf("failed to identify archive format: %w", err)
}
m.l.Info("Archive file %q format identified as %q", uri, format.Name())
m.l.Info("Archive file %q format identified as %q", uri, format.Extension())
extractor, ok := format.(archiver.Extractor)
extractor, ok := format.(archives.Extractor)
if !ok {
return task.StatusError, fmt.Errorf("format not an extractor %s")
return task.StatusError, fmt.Errorf("format not an extractor %s", format.Extension())
}
if format.Name() == ".zip" {
// Zip extractor requires a Seeker+ReadAt
formatExt := format.Extension()
if formatExt == ".zip" || formatExt == ".7z" {
// Zip/7Z extractor requires a Seeker+ReadAt
if m.state.TempZipFilePath == "" && !es.IsLocal() {
m.state.Phase = ExtractArchivePhaseDownloadZip
m.ResumeAfter(0)
@ -315,11 +328,25 @@ func (m *ExtractArchiveTask) masterExtractArchive(ctx context.Context, dep depen
readStream = es
}
}
if zipExtractor, ok := extractor.(archives.Zip); ok {
if m.state.Encoding != "" {
m.l.Info("Using encoding %q for zip archive", m.state.Encoding)
extractor = archiver.Zip{TextEncoding: m.state.Encoding}
encoding, ok := manager.ZipEncodings[strings.ToLower(m.state.Encoding)]
if !ok {
m.l.Warning("Unknown encoding %q, fallback to default encoding", m.state.Encoding)
} else {
zipExtractor.TextEncoding = encoding
extractor = zipExtractor
}
}
} else if rarExtractor, ok := extractor.(archives.Rar); ok && m.state.Password != "" {
rarExtractor.Password = m.state.Password
extractor = rarExtractor
} else if sevenZipExtractor, ok := extractor.(archives.SevenZip); ok && m.state.Password != "" {
sevenZipExtractor.Password = m.state.Password
extractor = sevenZipExtractor
}
needSkipToCursor := false
@ -332,7 +359,7 @@ func (m *ExtractArchiveTask) masterExtractArchive(ctx context.Context, dep depen
m.Unlock()
// extract and upload
err = extractor.Extract(ctx, readStream, nil, func(ctx context.Context, f archiver.File) error {
err = extractor.Extract(ctx, readStream, func(ctx context.Context, f archives.FileInfo) error {
if needSkipToCursor && f.NameInArchive != m.state.ProcessedCursor {
atomic.AddInt64(&m.progress[ProgressTypeExtractCount].Current, 1)
atomic.AddInt64(&m.progress[ProgressTypeExtractSize].Current, f.Size())
@ -351,6 +378,14 @@ func (m *ExtractArchiveTask) masterExtractArchive(ctx context.Context, dep depen
rawPath := util.FormSlash(f.NameInArchive)
savePath := dst.JoinRaw(rawPath)
// If file mask is not empty, check if the path is in the mask
if len(m.state.FileMask) > 0 && !isFileInMask(rawPath, m.state.FileMask) {
m.l.Warning("File %q is not in the mask, skipping...", f.NameInArchive)
atomic.AddInt64(&m.progress[ProgressTypeExtractCount].Current, 1)
atomic.AddInt64(&m.progress[ProgressTypeExtractSize].Current, f.Size())
return nil
}
// Check if path is legit
if !strings.HasPrefix(savePath.Path(), util.FillSlash(path.Clean(dst.Path()))) {
m.l.Warning("Path %q is not legit, skipping...", f.NameInArchive)
@ -380,6 +415,10 @@ func (m *ExtractArchiveTask) masterExtractArchive(ctx context.Context, dep depen
Props: &fs.UploadProps{
Uri: savePath,
Size: f.Size(),
LastModified: func() *time.Time {
t := f.FileInfo.ModTime().Local()
return &t
}(),
},
ProgressFunc: func(current, diff int64, total int64) {
atomic.AddInt64(&m.progress[ProgressTypeExtractSize].Current, diff)
@ -533,6 +572,8 @@ type (
TempPath string `json:"temp_path,omitempty"`
TempZipFilePath string `json:"temp_zip_file_path,omitempty"`
ProcessedCursor string `json:"processed_cursor,omitempty"`
Password string `json:"password,omitempty"`
FileMask []string `json:"file_mask,omitempty"`
}
)
@ -602,18 +643,19 @@ func (m *SlaveExtractArchiveTask) Do(ctx context.Context) (task.Status, error) {
defer es.Close()
// 2. Identify file format
format, readStream, err := archiver.Identify(m.state.FileName, es)
format, readStream, err := archives.Identify(ctx, m.state.FileName, es)
if err != nil {
return task.StatusError, fmt.Errorf("failed to identify archive format: %w", err)
}
m.l.Info("Archive file %q format identified as %q", m.state.FileName, format.Name())
m.l.Info("Archive file %q format identified as %q", m.state.FileName, format.Extension())
extractor, ok := format.(archiver.Extractor)
extractor, ok := format.(archives.Extractor)
if !ok {
return task.StatusError, fmt.Errorf("format not an extractor %s")
return task.StatusError, fmt.Errorf("format not an extractor %q", format.Extension())
}
if format.Name() == ".zip" {
formatExt := format.Extension()
if formatExt == ".zip" || formatExt == ".7z" {
if _, err = es.Seek(0, 0); err != nil {
return task.StatusError, fmt.Errorf("failed to seek entity source: %w", err)
}
@ -666,11 +708,25 @@ func (m *SlaveExtractArchiveTask) Do(ctx context.Context) (task.Status, error) {
if es.IsLocal() {
readStream = es
}
}
if zipExtractor, ok := extractor.(archives.Zip); ok {
if m.state.Encoding != "" {
m.l.Info("Using encoding %q for zip archive", m.state.Encoding)
extractor = archiver.Zip{TextEncoding: m.state.Encoding}
encoding, ok := manager.ZipEncodings[strings.ToLower(m.state.Encoding)]
if !ok {
m.l.Warning("Unknown encoding %q, fallback to default encoding", m.state.Encoding)
} else {
zipExtractor.TextEncoding = encoding
extractor = zipExtractor
}
}
} else if rarExtractor, ok := extractor.(archives.Rar); ok && m.state.Password != "" {
rarExtractor.Password = m.state.Password
extractor = rarExtractor
} else if sevenZipExtractor, ok := extractor.(archives.SevenZip); ok && m.state.Password != "" {
sevenZipExtractor.Password = m.state.Password
extractor = sevenZipExtractor
}
needSkipToCursor := false
@ -679,7 +735,7 @@ func (m *SlaveExtractArchiveTask) Do(ctx context.Context) (task.Status, error) {
}
// 3. Extract and upload
err = extractor.Extract(ctx, readStream, nil, func(ctx context.Context, f archiver.File) error {
err = extractor.Extract(ctx, readStream, func(ctx context.Context, f archives.FileInfo) error {
if needSkipToCursor && f.NameInArchive != m.state.ProcessedCursor {
atomic.AddInt64(&m.progress[ProgressTypeExtractCount].Current, 1)
atomic.AddInt64(&m.progress[ProgressTypeExtractSize].Current, f.Size())
@ -698,6 +754,12 @@ func (m *SlaveExtractArchiveTask) Do(ctx context.Context) (task.Status, error) {
rawPath := util.FormSlash(f.NameInArchive)
savePath := dst.JoinRaw(rawPath)
// If file mask is not empty, check if the path is in the mask
if len(m.state.FileMask) > 0 && !isFileInMask(rawPath, m.state.FileMask) {
m.l.Debug("File %q is not in the mask, skipping...", f.NameInArchive)
return nil
}
// Check if path is legit
if !strings.HasPrefix(savePath.Path(), util.FillSlash(path.Clean(dst.Path()))) {
atomic.AddInt64(&m.progress[ProgressTypeExtractCount].Current, 1)
@ -727,6 +789,10 @@ func (m *SlaveExtractArchiveTask) Do(ctx context.Context) (task.Status, error) {
Props: &fs.UploadProps{
Uri: savePath,
Size: f.Size(),
LastModified: func() *time.Time {
t := f.FileInfo.ModTime().Local()
return &t
}(),
},
ProgressFunc: func(current, diff int64, total int64) {
atomic.AddInt64(&m.progress[ProgressTypeExtractSize].Current, diff)
@ -765,3 +831,17 @@ func (m *SlaveExtractArchiveTask) Progress(ctx context.Context) queue.Progresses
defer m.Unlock()
return m.progress
}
func isFileInMask(path string, mask []string) bool {
if len(mask) == 0 {
return true
}
for _, m := range mask {
if path == m || strings.HasPrefix(path, m+"/") {
return true
}
}
return false
}

@ -8,6 +8,7 @@ import (
"os"
"path"
"path/filepath"
"strings"
"sync"
"sync/atomic"
"time"
@ -318,7 +319,7 @@ func (m *RemoteDownloadTask) slaveTransfer(ctx context.Context, dep dependency.D
continue
}
dst := dstUri.JoinRaw(f.Name)
dst := dstUri.JoinRaw(sanitizeFileName(f.Name))
src := path.Join(m.state.Status.SavePath, f.Name)
payload.Files = append(payload.Files, SlaveUploadEntity{
Src: src,
@ -431,25 +432,29 @@ func (m *RemoteDownloadTask) masterTransfer(ctx context.Context, dep dependency.
ae := serializer.NewAggregateError()
transferFunc := func(workerId int, file downloader.TaskFile) {
defer func() {
atomic.AddInt64(&m.progress[ProgressTypeUploadCount].Current, 1)
worker <- workerId
wg.Done()
}()
dst := dstUri.JoinRaw(file.Name)
sanitizedName := sanitizeFileName(file.Name)
dst := dstUri.JoinRaw(sanitizedName)
src := filepath.FromSlash(path.Join(m.state.Status.SavePath, file.Name))
m.l.Info("Uploading file %s to %s...", src, file.Name, dst)
m.l.Info("Uploading file %s to %s...", src, sanitizedName, dst)
progressKey := fmt.Sprintf("%s%d", ProgressTypeUploadSinglePrefix, workerId)
m.Lock()
m.progress[progressKey] = &queue.Progress{Identifier: dst.String(), Total: file.Size}
fileProgress := m.progress[progressKey]
uploadProgress := m.progress[ProgressTypeUpload]
uploadCountProgress := m.progress[ProgressTypeUploadCount]
m.Unlock()
defer func() {
atomic.AddInt64(&uploadCountProgress.Current, 1)
worker <- workerId
wg.Done()
}()
fileStream, err := os.Open(src)
if err != nil {
m.l.Warning("Failed to open file %s: %s", src, err.Error())
atomic.AddInt64(&m.progress[ProgressTypeUpload].Current, file.Size)
atomic.AddInt64(&uploadProgress.Current, file.Size)
atomic.AddInt64(&failed, 1)
ae.Add(file.Name, fmt.Errorf("failed to open file: %w", err))
return
@ -463,8 +468,8 @@ func (m *RemoteDownloadTask) masterTransfer(ctx context.Context, dep dependency.
Size: file.Size,
},
ProgressFunc: func(current, diff int64, total int64) {
atomic.AddInt64(&m.progress[progressKey].Current, diff)
atomic.AddInt64(&m.progress[ProgressTypeUpload].Current, diff)
atomic.AddInt64(&fileProgress.Current, diff)
atomic.AddInt64(&uploadProgress.Current, diff)
},
File: fileStream,
}
@ -473,7 +478,7 @@ func (m *RemoteDownloadTask) masterTransfer(ctx context.Context, dep dependency.
if err != nil {
m.l.Warning("Failed to upload file %s: %s", src, err.Error())
atomic.AddInt64(&failed, 1)
atomic.AddInt64(&m.progress[ProgressTypeUpload].Current, file.Size)
atomic.AddInt64(&uploadProgress.Current, file.Size)
ae.Add(file.Name, fmt.Errorf("failed to upload file: %w", err))
return
}
@ -488,8 +493,10 @@ func (m *RemoteDownloadTask) masterTransfer(ctx context.Context, dep dependency.
// Check if file is already transferred
if _, ok := m.state.Transferred[file.Index]; ok {
m.l.Info("File %s already transferred, skipping...", file.Name)
m.Lock()
atomic.AddInt64(&m.progress[ProgressTypeUpload].Current, file.Size)
atomic.AddInt64(&m.progress[ProgressTypeUploadCount].Current, 1)
m.Unlock()
continue
}
@ -538,7 +545,7 @@ func (m *RemoteDownloadTask) validateFiles(ctx context.Context, dep dependency.D
validateArgs := lo.Map(selectedFiles, func(f downloader.TaskFile, _ int) fs.PreValidateFile {
return fs.PreValidateFile{
Name: f.Name,
Name: sanitizeFileName(f.Name),
Size: f.Size,
OmitName: f.Name == "",
}
@ -623,17 +630,21 @@ func (m *RemoteDownloadTask) Progress(ctx context.Context) queue.Progresses {
m.Lock()
defer m.Unlock()
if m.state.NodeState.progress != nil {
merged := make(queue.Progresses)
for k, v := range m.progress {
merged[k] = v
}
merged := make(queue.Progresses)
for k, v := range m.progress {
merged[k] = v
}
if m.state.NodeState.progress != nil {
for k, v := range m.state.NodeState.progress {
merged[k] = v
}
return merged
}
return m.progress
return merged
}
func sanitizeFileName(name string) string {
r := strings.NewReplacer("\\", "_", ":", "_", "*", "_", "?", "_", "\"", "_", "<", "_", ">", "_", "|", "_")
return r.Replace(name)
}

@ -115,23 +115,26 @@ func (t *SlaveUploadTask) Do(ctx context.Context) (task.Status, error) {
atomic.StoreInt64(&t.progress[ProgressTypeUpload].Total, totalSize)
ae := serializer.NewAggregateError()
transferFunc := func(workerId, fileId int, file SlaveUploadEntity) {
defer func() {
atomic.AddInt64(&t.progress[ProgressTypeUploadCount].Current, 1)
worker <- workerId
wg.Done()
}()
t.l.Info("Uploading file %s to %s...", file.Src, file.Uri.String())
progressKey := fmt.Sprintf("%s%d", ProgressTypeUploadSinglePrefix, workerId)
t.Lock()
t.progress[progressKey] = &queue.Progress{Identifier: file.Uri.String(), Total: file.Size}
fileProgress := t.progress[progressKey]
uploadProgress := t.progress[ProgressTypeUpload]
uploadCountProgress := t.progress[ProgressTypeUploadCount]
t.Unlock()
defer func() {
atomic.AddInt64(&uploadCountProgress.Current, 1)
worker <- workerId
wg.Done()
}()
handle, err := os.Open(filepath.FromSlash(file.Src))
if err != nil {
t.l.Warning("Failed to open file %s: %s", file.Src, err.Error())
atomic.AddInt64(&t.progress[ProgressTypeUpload].Current, file.Size)
atomic.AddInt64(&fileProgress.Current, file.Size)
ae.Add(path.Base(file.Src), fmt.Errorf("failed to open file: %w", err))
return
}
@ -140,7 +143,7 @@ func (t *SlaveUploadTask) Do(ctx context.Context) (task.Status, error) {
if err != nil {
t.l.Warning("Failed to get file stat for %s: %s", file.Src, err.Error())
handle.Close()
atomic.AddInt64(&t.progress[ProgressTypeUpload].Current, file.Size)
atomic.AddInt64(&fileProgress.Current, file.Size)
ae.Add(path.Base(file.Src), fmt.Errorf("failed to get file stat: %w", err))
return
}
@ -151,9 +154,9 @@ func (t *SlaveUploadTask) Do(ctx context.Context) (task.Status, error) {
Size: stat.Size(),
},
ProgressFunc: func(current, diff int64, total int64) {
atomic.AddInt64(&t.progress[progressKey].Current, diff)
atomic.AddInt64(&t.progress[ProgressTypeUpload].Current, diff)
atomic.StoreInt64(&t.progress[progressKey].Total, total)
atomic.AddInt64(&fileProgress.Current, diff)
atomic.AddInt64(&uploadCountProgress.Current, 1)
atomic.StoreInt64(&fileProgress.Total, total)
},
File: handle,
Seeker: handle,
@ -163,7 +166,7 @@ func (t *SlaveUploadTask) Do(ctx context.Context) (task.Status, error) {
if err != nil {
handle.Close()
t.l.Warning("Failed to upload file %s: %s", file.Src, err.Error())
atomic.AddInt64(&t.progress[ProgressTypeUpload].Current, file.Size)
atomic.AddInt64(&uploadProgress.Current, file.Size)
ae.Add(path.Base(file.Src), fmt.Errorf("failed to upload file: %w", err))
return
}
@ -179,8 +182,10 @@ func (t *SlaveUploadTask) Do(ctx context.Context) (task.Status, error) {
// Check if file is already transferred
if _, ok := t.state.Transferred[fileId]; ok {
t.l.Info("File %s already transferred, skipping...", file.Src)
t.Lock()
atomic.AddInt64(&t.progress[ProgressTypeUpload].Current, file.Size)
atomic.AddInt64(&t.progress[ProgressTypeUploadCount].Current, 1)
t.Unlock()
continue
}
@ -221,5 +226,9 @@ func (m *SlaveUploadTask) Progress(ctx context.Context) queue.Progresses {
m.Lock()
defer m.Unlock()
return m.progress
res := make(queue.Progresses)
for k, v := range m.progress {
res[k] = v
}
return res
}

@ -145,7 +145,12 @@ func (e *exifExtractor) Exts() []string {
}
// Reference: https://github.com/photoprism/photoprism/blob/602097635f1c84d91f2d919f7aedaef7a07fc458/internal/meta/exif.go
func (e *exifExtractor) Extract(ctx context.Context, ext string, source entitysource.EntitySource) ([]driver.MediaMeta, error) {
func (e *exifExtractor) Extract(ctx context.Context, ext string, source entitysource.EntitySource, opts ...optionFunc) ([]driver.MediaMeta, error) {
option := &option{}
for _, opt := range opts {
opt.apply(option)
}
localLimit, remoteLimit := e.settings.MediaMetaExifSizeLimit(ctx)
if err := checkFileSize(localLimit, remoteLimit, source); err != nil {
return nil, err

@ -4,12 +4,14 @@ import (
"context"
"encoding/gob"
"errors"
"io"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/manager/entitysource"
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
"github.com/cloudreve/Cloudreve/v4/pkg/request"
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
"github.com/samber/lo"
"io"
)
type (
@ -17,7 +19,7 @@ type (
// Exts returns the supported file extensions.
Exts() []string
// Extract extracts the media meta from the given source.
Extract(ctx context.Context, ext string, source entitysource.EntitySource) ([]driver.MediaMeta, error)
Extract(ctx context.Context, ext string, source entitysource.EntitySource, opts ...optionFunc) ([]driver.MediaMeta, error)
}
)
@ -29,7 +31,7 @@ func init() {
gob.Register([]driver.MediaMeta{})
}
func NewExtractorManager(ctx context.Context, settings setting.Provider, l logging.Logger) Extractor {
func NewExtractorManager(ctx context.Context, settings setting.Provider, l logging.Logger, client request.Client) Extractor {
e := &extractorManager{
settings: settings,
extMap: make(map[string][]Extractor),
@ -52,6 +54,11 @@ func NewExtractorManager(ctx context.Context, settings setting.Provider, l loggi
extractors = append(extractors, ffprobeE)
}
if e.settings.MediaMetaGeocodingEnabled(ctx) {
geocodingE := newGeocodingExtractor(settings, l, client)
extractors = append(extractors, geocodingE)
}
for _, extractor := range extractors {
for _, ext := range extractor.Exts() {
if e.extMap[ext] == nil {
@ -73,12 +80,12 @@ func (e *extractorManager) Exts() []string {
return lo.Keys(e.extMap)
}
func (e *extractorManager) Extract(ctx context.Context, ext string, source entitysource.EntitySource) ([]driver.MediaMeta, error) {
func (e *extractorManager) Extract(ctx context.Context, ext string, source entitysource.EntitySource, opts ...optionFunc) ([]driver.MediaMeta, error) {
if extractor, ok := e.extMap[ext]; ok {
res := []driver.MediaMeta{}
for _, e := range extractor {
_, _ = source.Seek(0, io.SeekStart)
data, err := e.Extract(ctx, ext, source)
data, err := e.Extract(ctx, ext, source, append(opts, WithExtracted(res))...)
if err != nil {
return nil, err
}
@ -92,6 +99,29 @@ func (e *extractorManager) Extract(ctx context.Context, ext string, source entit
}
}
type option struct {
extracted []driver.MediaMeta
language string
}
type optionFunc func(*option)
func (f optionFunc) apply(o *option) {
f(o)
}
func WithExtracted(extracted []driver.MediaMeta) optionFunc {
return optionFunc(func(o *option) {
o.extracted = extracted
})
}
func WithLanguage(language string) optionFunc {
return optionFunc(func(o *option) {
o.language = language
})
}
// checkFileSize checks if the file size exceeds the limit.
func checkFileSize(localLimit, remoteLimit int64, source entitysource.EntitySource) error {
if source.IsLocal() && localLimit > 0 && source.Entity().Size() > localLimit {

@ -88,14 +88,19 @@ func (f *ffprobeExtractor) Exts() []string {
return ffprobeExts
}
func (f *ffprobeExtractor) Extract(ctx context.Context, ext string, source entitysource.EntitySource) ([]driver.MediaMeta, error) {
func (f *ffprobeExtractor) Extract(ctx context.Context, ext string, source entitysource.EntitySource, opts ...optionFunc) ([]driver.MediaMeta, error) {
option := &option{}
for _, opt := range opts {
opt.apply(option)
}
localLimit, remoteLimit := f.settings.MediaMetaFFProbeSizeLimit(ctx)
if err := checkFileSize(localLimit, remoteLimit, source); err != nil {
return nil, err
}
var input string
if source.IsLocal() {
if source.IsLocal() && !source.Entity().Encrypted() {
input = source.LocalPath(ctx)
} else {
expire := time.Now().Add(UrlExpire)

@ -0,0 +1,236 @@
package mediameta
import (
"context"
"encoding/json"
"fmt"
"net/http"
"net/url"
"strconv"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/manager/entitysource"
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
"github.com/cloudreve/Cloudreve/v4/pkg/request"
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
)
const mapBoxURL = "https://api.mapbox.com/search/geocode/v6/reverse"
const (
Street = "street"
Locality = "locality"
Place = "place"
District = "district"
Region = "region"
Country = "country"
)
type geocodingExtractor struct {
settings setting.Provider
l logging.Logger
client request.Client
}
func newGeocodingExtractor(settings setting.Provider, l logging.Logger, client request.Client) *geocodingExtractor {
return &geocodingExtractor{
settings: settings,
l: l,
client: client,
}
}
func (e *geocodingExtractor) Exts() []string {
return exifExts
}
func (e *geocodingExtractor) Extract(ctx context.Context, ext string, source entitysource.EntitySource, opts ...optionFunc) ([]driver.MediaMeta, error) {
option := &option{}
for _, opt := range opts {
opt.apply(option)
}
// Find GPS info from extracted
var latStr, lngStr string
for _, meta := range option.extracted {
if meta.Key == GpsLat {
latStr = meta.Value
}
if meta.Key == GpsLng {
lngStr = meta.Value
}
}
if latStr == "" || lngStr == "" {
return nil, nil
}
lat, err := strconv.ParseFloat(latStr, 64)
if err != nil {
return nil, fmt.Errorf("geocoding: failed to parse latitude: %w", err)
}
lng, err := strconv.ParseFloat(lngStr, 64)
if err != nil {
return nil, fmt.Errorf("geocoding: failed to parse longitude: %w", err)
}
metas, err := e.getGeocoding(ctx, lat, lng, option.language)
if err != nil {
return nil, fmt.Errorf("geocoding: failed to get geocoding: %w", err)
}
for i, _ := range metas {
metas[i].Type = driver.MetaTypeGeocoding
}
return metas, nil
}
func (e *geocodingExtractor) getGeocoding(ctx context.Context, lat, lng float64, language string) ([]driver.MediaMeta, error) {
values := url.Values{}
values.Add("longitude", fmt.Sprintf("%f", lng))
values.Add("latitude", fmt.Sprintf("%f", lat))
values.Add("limit", "1")
values.Add("access_token", e.settings.MediaMetaGeocodingMapboxAK(ctx))
if language != "" {
values.Add("language", language)
}
resp, err := e.client.Request(
"GET",
mapBoxURL+"?"+values.Encode(),
nil,
request.WithContext(ctx),
request.WithLogger(e.l),
).CheckHTTPResponse(http.StatusOK).GetResponse()
if err != nil {
return nil, fmt.Errorf("failed to get geocoding from mapbox: %w", err)
}
var geocoding MapboxGeocodingResponse
if err := json.Unmarshal([]byte(resp), &geocoding); err != nil {
return nil, fmt.Errorf("failed to unmarshal geocoding from mapbox: %w", err)
}
if len(geocoding.Features) == 0 {
return nil, nil
}
metas := make([]driver.MediaMeta, 0)
contexts := geocoding.Features[0].Properties.Context
if contexts.Street != nil {
metas = append(metas, driver.MediaMeta{
Key: Street,
Value: contexts.Street.Name,
})
}
if contexts.Locality != nil {
metas = append(metas, driver.MediaMeta{
Key: Locality,
Value: contexts.Locality.Name,
})
}
if contexts.Place != nil {
metas = append(metas, driver.MediaMeta{
Key: Place,
Value: contexts.Place.Name,
})
}
if contexts.District != nil {
metas = append(metas, driver.MediaMeta{
Key: District,
Value: contexts.District.Name,
})
}
if contexts.Region != nil {
metas = append(metas, driver.MediaMeta{
Key: Region,
Value: contexts.Region.Name,
})
}
if contexts.Country != nil {
metas = append(metas, driver.MediaMeta{
Key: Country,
Value: contexts.Country.Name,
})
}
return metas, nil
}
// MapboxGeocodingResponse represents the response from Mapbox Geocoding API
type MapboxGeocodingResponse struct {
Type string `json:"type"` // "FeatureCollection"
Features []Feature `json:"features"` // Array of feature objects
Attribution string `json:"attribution"` // Attribution to Mapbox
}
// Feature represents a feature object in the geocoding response
type Feature struct {
ID string `json:"id"` // Feature ID (same as mapbox_id)
Type string `json:"type"` // "Feature"
Geometry Geometry `json:"geometry"` // Spatial geometry of the feature
Properties Properties `json:"properties"` // Feature details
}
// Geometry represents the spatial geometry of a feature
type Geometry struct {
Type string `json:"type"` // "Point"
Coordinates []float64 `json:"coordinates"` // [longitude, latitude]
}
// Properties contains the feature's detailed information
type Properties struct {
MapboxID string `json:"mapbox_id"` // Unique feature identifier
FeatureType string `json:"feature_type"` // Type of feature (country, region, etc.)
Name string `json:"name"` // Formatted address string
NamePreferred string `json:"name_preferred"` // Canonical or common alias
PlaceFormatted string `json:"place_formatted"` // Formatted context string
FullAddress string `json:"full_address"` // Full formatted address
Context Context `json:"context"` // Hierarchy of parent features
Coordinates Coordinates `json:"coordinates"` // Geographic position and accuracy
BBox []float64 `json:"bbox,omitempty"` // Bounding box [minLon,minLat,maxLon,maxLat]
MatchCode MatchCode `json:"match_code"` // Metadata about result matching
}
// Context represents the hierarchy of encompassing parent features
type Context struct {
Country *ContextFeature `json:"country,omitempty"`
Region *ContextFeature `json:"region,omitempty"`
Postcode *ContextFeature `json:"postcode,omitempty"`
District *ContextFeature `json:"district,omitempty"`
Place *ContextFeature `json:"place,omitempty"`
Locality *ContextFeature `json:"locality,omitempty"`
Neighborhood *ContextFeature `json:"neighborhood,omitempty"`
Street *ContextFeature `json:"street,omitempty"`
}
// ContextFeature represents a feature in the context hierarchy
type ContextFeature struct {
ID string `json:"id"`
Name string `json:"name"`
NamePreferred string `json:"name_preferred,omitempty"`
MapboxID string `json:"mapbox_id"`
}
// Coordinates represents geographical position and accuracy information
type Coordinates struct {
Longitude float64 `json:"longitude"` // Longitude of result
Latitude float64 `json:"latitude"` // Latitude of result
Accuracy string `json:"accuracy,omitempty"` // Accuracy metric for address results
RoutablePoints []RoutablePoint `json:"routable_points,omitempty"` // Array of routable points
}
// RoutablePoint represents a routable point for an address feature
type RoutablePoint struct {
Name string `json:"name"` // Name of the routable point
Longitude float64 `json:"longitude"` // Longitude coordinate
Latitude float64 `json:"latitude"` // Latitude coordinate
}
// MatchCode contains metadata about how result components match the input query
type MatchCode struct {
// Add specific match code fields as needed based on Mapbox documentation
// This structure may vary depending on the specific match codes returned
}

@ -48,7 +48,12 @@ func (a *musicExtractor) Exts() []string {
return audioExts
}
func (a *musicExtractor) Extract(ctx context.Context, ext string, source entitysource.EntitySource) ([]driver.MediaMeta, error) {
func (a *musicExtractor) Extract(ctx context.Context, ext string, source entitysource.EntitySource, opts ...optionFunc) ([]driver.MediaMeta, error) {
option := &option{}
for _, opt := range opts {
opt.apply(option)
}
localLimit, remoteLimit := a.settings.MediaMetaMusicSizeLimit(ctx)
if err := checkFileSize(localLimit, remoteLimit, source); err != nil {
return nil, err

@ -253,6 +253,8 @@ const (
CodeNodeUsedByStoragePolicy = 40086
// CodeDomainNotLicensed domain not licensed
CodeDomainNotLicensed = 40087
// CodeAnonymouseAccessDenied 匿名用户无法访问分享
CodeAnonymouseAccessDenied = 40088
// CodeDBError 数据库操作失败
CodeDBError = 50001
// CodeEncryptError 加密失败

@ -2,13 +2,14 @@ package setting
import (
"context"
"os"
"strings"
"github.com/cloudreve/Cloudreve/v4/inventory"
"github.com/cloudreve/Cloudreve/v4/pkg/cache"
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
"github.com/samber/lo"
"os"
"strings"
)
const (

@ -2,6 +2,7 @@ package setting
import (
"context"
"encoding/base64"
"encoding/json"
"fmt"
"net/url"
@ -10,7 +11,6 @@ import (
"time"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/auth/requestinfo"
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
)
@ -102,6 +102,10 @@ type (
MediaMetaFFProbeSizeLimit(ctx context.Context) (int64, int64)
// MediaMetaFFProbePath returns the path of ffprobe executable.
MediaMetaFFProbePath(ctx context.Context) string
// MediaMetaGeocodingEnabled returns true if media meta geocoding is enabled.
MediaMetaGeocodingEnabled(ctx context.Context) bool
// MediaMetaGeocodingMapboxAK returns the Mapbox access token.
MediaMetaGeocodingMapboxAK(ctx context.Context) string
// ThumbSize returns the size limit of thumbnails.
ThumbSize(ctx context.Context) (int, int)
// ThumbEncode returns the thumbnail encoding settings.
@ -196,6 +200,22 @@ type (
LibRawThumbExts(ctx context.Context) []string
// LibRawThumbPath returns the path of libraw executable.
LibRawThumbPath(ctx context.Context) string
// CustomProps returns the custom props settings.
CustomProps(ctx context.Context) []types.CustomProps
// CustomNavItems returns the custom nav items settings.
CustomNavItems(ctx context.Context) []CustomNavItem
// CustomHTML returns the custom HTML settings.
CustomHTML(ctx context.Context) *CustomHTML
// FFMpegExtraArgs returns the extra arguments of ffmpeg thumb generator.
FFMpegExtraArgs(ctx context.Context) string
// MasterEncryptKey returns the master encrypt key.
MasterEncryptKey(ctx context.Context) []byte
// MasterEncryptKeyVault returns the master encrypt key vault type.
MasterEncryptKeyVault(ctx context.Context) MasterEncryptKeyVaultType
// MasterEncryptKeyFile returns the master encrypt key file.
MasterEncryptKeyFile(ctx context.Context) string
// ShowEncryptionStatus returns true if encryption status is shown.
ShowEncryptionStatus(ctx context.Context) bool
}
UseFirstSiteUrlCtxKey = struct{}
)
@ -223,6 +243,51 @@ type (
}
)
func (s *settingProvider) ShowEncryptionStatus(ctx context.Context) bool {
return s.getBoolean(ctx, "show_encryption_status", true)
}
func (s *settingProvider) MasterEncryptKeyFile(ctx context.Context) string {
return s.getString(ctx, "encrypt_master_key_file", "")
}
func (s *settingProvider) MasterEncryptKeyVault(ctx context.Context) MasterEncryptKeyVaultType {
return MasterEncryptKeyVaultType(s.getString(ctx, "encrypt_master_key_vault", "setting"))
}
func (s *settingProvider) MasterEncryptKey(ctx context.Context) []byte {
encoded := s.getString(ctx, "encrypt_master_key", "")
key, err := base64.StdEncoding.DecodeString(encoded)
if err != nil {
return nil
}
return key
}
func (s *settingProvider) CustomHTML(ctx context.Context) *CustomHTML {
return &CustomHTML{
HeadlessFooter: s.getString(ctx, "headless_footer_html", ""),
HeadlessBody: s.getString(ctx, "headless_bottom_html", ""),
SidebarBottom: s.getString(ctx, "sidebar_bottom_html", ""),
}
}
func (s *settingProvider) CustomNavItems(ctx context.Context) []CustomNavItem {
raw := s.getString(ctx, "custom_nav_items", "[]")
var items []CustomNavItem
if err := json.Unmarshal([]byte(raw), &items); err != nil {
return []CustomNavItem{}
}
return items
}
func (s *settingProvider) CustomProps(ctx context.Context) []types.CustomProps {
raw := s.getString(ctx, "custom_props", "[]")
var props []types.CustomProps
if err := json.Unmarshal([]byte(raw), &props); err != nil {
return []types.CustomProps{}
}
return props
}
func (s *settingProvider) License(ctx context.Context) string {
return s.getString(ctx, "license", "")
}
@ -274,6 +339,7 @@ func (s *settingProvider) MapSetting(ctx context.Context) *MapSetting {
return &MapSetting{
Provider: MapProvider(s.getString(ctx, "map_provider", "openstreetmap")),
GoogleTileType: MapGoogleTileType(s.getString(ctx, "map_google_tile_type", "roadmap")),
MapboxAK: s.getString(ctx, "map_mapbox_ak", ""),
}
}
@ -376,6 +442,10 @@ func (s *settingProvider) FFMpegThumbSeek(ctx context.Context) string {
return s.getString(ctx, "thumb_ffmpeg_seek", "00:00:01.00")
}
func (s *settingProvider) FFMpegExtraArgs(ctx context.Context) string {
return s.getString(ctx, "thumb_ffmpeg_extra_args", "")
}
func (s *settingProvider) FFMpegThumbMaxSize(ctx context.Context) int64 {
return s.getInt64(ctx, "thumb_ffmpeg_max_size", 10737418240)
}
@ -440,7 +510,7 @@ func (s *settingProvider) ThumbEncode(ctx context.Context) *ThumbEncode {
}
func (s *settingProvider) ThumbEntitySuffix(ctx context.Context) string {
return s.getString(ctx, "thumb_entity_suffix", "._thumb")
return s.getString(ctx, "thumb_entity_suffix", "{blob_path}/{blob_name}._thumb")
}
func (s *settingProvider) ThumbSlaveSidecarSuffix(ctx context.Context) string {
@ -491,6 +561,14 @@ func (s *settingProvider) MediaMetaEnabled(ctx context.Context) bool {
return s.getBoolean(ctx, "media_meta", true)
}
func (s *settingProvider) MediaMetaGeocodingEnabled(ctx context.Context) bool {
return s.getBoolean(ctx, "media_meta_geocoding", false)
}
func (s *settingProvider) MediaMetaGeocodingMapboxAK(ctx context.Context) string {
return s.getString(ctx, "media_meta_geocoding_mapbox_ak", "")
}
func (s *settingProvider) PublicResourceMaxAge(ctx context.Context) int {
return s.getInt(ctx, "public_resource_maxage", 0)
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save