Merge branch 'master' into feature-p2p-quic-server

pull/2507/head
eason.ran 6 months ago
commit 1202b50bcb

4
.gitignore vendored

@ -31,4 +31,6 @@ conf/conf.ini
dist/
data/
tmp/
tmp/
.devcontainer/
cloudreve

@ -38,7 +38,7 @@
## :sparkles: Features
- :cloud: Support storing files into Local, Remote node, OneDrive, S3 compatible API, Qiniu, Aliyun OSS, Tencent COS, Upyun, OneDrive.
- :cloud: Support storing files into Local, Remote node, OneDrive, S3 compatible API, Qiniu, Aliyun OSS, Tencent COS, Upyun.
- :outbox_tray: Upload/Download in directly transmission from client to storage providers.
- 💾 Integrate with Aria2/qBittorrent to download files in background, use multiple download nodes to share the load.
- 📚 Compress/Extract files, download files in batch.

@ -3,7 +3,7 @@ package constants
// These values will be injected at build time, DO NOT EDIT.
// BackendVersion 当前后端版本号
var BackendVersion = "4.0.0-alpha.1"
var BackendVersion = "4.1.0"
// IsPro 是否为Pro版本
var IsPro = "false"

@ -329,11 +329,7 @@ func (d *dependency) KV() cache.Driver {
d.kv = cache.NewRedisStore(
d.Logger(),
10,
config.Network,
config.Server,
config.User,
config.Password,
config.DB,
config,
)
} else {
d.kv = cache.NewMemoStore(util.DataPath(cache.DefaultCacheFile), d.Logger())

@ -28,6 +28,11 @@ func Init() error {
if confDBType == "sqlite3" {
confDBType = "sqlite"
}
// 兼容 "mariadb" 数据库
if confDBType == "mariadb" {
confDBType = "mysql"
}
switch confDBType {
case "UNSET", "sqlite":

@ -1 +1 @@
Subproject commit ededea6c45922365d92c2bf576fb1c25e632594d
Subproject commit e9b91c4e03654d5968f8a676a13fc4badf530b5d

@ -4,6 +4,7 @@ go 1.23.0
require (
entgo.io/ent v0.13.0
github.com/Masterminds/semver/v3 v3.3.1
github.com/abslant/gzip v0.0.9
github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible
github.com/aws/aws-sdk-go v1.31.5
@ -15,7 +16,7 @@ require (
github.com/dsoprea/go-png-image-structure v0.0.0-20210512210324-29b889a6093d
github.com/dsoprea/go-tiff-image-structure v0.0.0-20221003165014-8ecc4f52edca
github.com/dsoprea/go-utility v0.0.0-20200711062821-fab8125e9bdf
github.com/fatih/color v1.9.0
github.com/fatih/color v1.18.0
github.com/gin-contrib/cors v1.3.0
github.com/gin-contrib/sessions v1.0.2
github.com/gin-contrib/static v0.0.0-20191128031702-f81c604d8ac2
@ -115,7 +116,7 @@ require (
github.com/klauspost/pgzip v1.2.5 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/leodido/go-urn v1.4.0 // indirect
github.com/mattn/go-colorable v0.1.6 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect

@ -76,6 +76,8 @@ github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF0
github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
github.com/Masterminds/semver/v3 v3.0.3/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
github.com/Masterminds/semver/v3 v3.1.0/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
github.com/Masterminds/semver/v3 v3.3.1 h1:QtNSWtVZ3nBfk8mAOu/B6v7FMJ+NHTIgUPi7rj+4nv4=
github.com/Masterminds/semver/v3 v3.3.1/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o=
github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
@ -274,6 +276,8 @@ github.com/etcd-io/gofail v0.0.0-20190801230047-ad7f989257ca/go.mod h1:49H/RkXP8
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
@ -666,6 +670,8 @@ github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVc
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE=
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
@ -675,6 +681,7 @@ github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
@ -1275,6 +1282,7 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220928140112-f11e5e49a4ec/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=

@ -5,20 +5,12 @@ import (
rawsql "database/sql"
"database/sql/driver"
"fmt"
"os"
"time"
"entgo.io/ent/dialect/sql"
"github.com/cloudreve/Cloudreve/v4/application/constants"
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/ent/group"
"github.com/cloudreve/Cloudreve/v4/ent/node"
_ "github.com/cloudreve/Cloudreve/v4/ent/runtime"
"github.com/cloudreve/Cloudreve/v4/ent/setting"
"github.com/cloudreve/Cloudreve/v4/ent/storagepolicy"
"github.com/cloudreve/Cloudreve/v4/inventory/debug"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
"github.com/cloudreve/Cloudreve/v4/pkg/cache"
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
@ -60,51 +52,65 @@ func NewRawEntClient(l logging.Logger, config conf.ConfigProvider) (*ent.Client,
if confDBType == conf.SQLite3DB || confDBType == "" {
confDBType = conf.SQLiteDB
}
if confDBType == conf.MariaDB {
confDBType = conf.MySqlDB
}
var (
err error
client *sql.Driver
)
switch confDBType {
case conf.SQLiteDB:
dbFile := util.RelativePath(dbConfig.DBFile)
l.Info("Connect to SQLite database %q.", dbFile)
client, err = sql.Open("sqlite3", util.RelativePath(dbConfig.DBFile))
case conf.PostgresDB:
l.Info("Connect to Postgres database %q.", dbConfig.Host)
client, err = sql.Open("postgres", fmt.Sprintf("host=%s user=%s password=%s dbname=%s port=%d sslmode=disable",
dbConfig.Host,
dbConfig.User,
dbConfig.Password,
dbConfig.Name,
dbConfig.Port))
case conf.MySqlDB, conf.MsSqlDB:
l.Info("Connect to MySQL/SQLServer database %q.", dbConfig.Host)
var host string
if dbConfig.UnixSocket {
host = fmt.Sprintf("unix(%s)",
dbConfig.Host)
} else {
host = fmt.Sprintf("(%s:%d)",
// Check if the database type is supported.
if confDBType != conf.SQLiteDB && confDBType != conf.MySqlDB && confDBType != conf.PostgresDB {
return nil, fmt.Errorf("unsupported database type: %s", confDBType)
}
// If Database connection string provided, use it directly.
if dbConfig.DatabaseURL != "" {
l.Info("Connect to database with connection string %q.", dbConfig.DatabaseURL)
client, err = sql.Open(string(confDBType), dbConfig.DatabaseURL)
} else {
switch confDBType {
case conf.SQLiteDB:
dbFile := util.RelativePath(dbConfig.DBFile)
l.Info("Connect to SQLite database %q.", dbFile)
client, err = sql.Open("sqlite3", util.RelativePath(dbConfig.DBFile))
case conf.PostgresDB:
l.Info("Connect to Postgres database %q.", dbConfig.Host)
client, err = sql.Open("postgres", fmt.Sprintf("host=%s user=%s password=%s dbname=%s port=%d sslmode=disable",
dbConfig.Host,
dbConfig.Port)
dbConfig.User,
dbConfig.Password,
dbConfig.Name,
dbConfig.Port))
case conf.MySqlDB, conf.MsSqlDB:
l.Info("Connect to MySQL/SQLServer database %q.", dbConfig.Host)
var host string
if dbConfig.UnixSocket {
host = fmt.Sprintf("unix(%s)",
dbConfig.Host)
} else {
host = fmt.Sprintf("(%s:%d)",
dbConfig.Host,
dbConfig.Port)
}
client, err = sql.Open(string(confDBType), fmt.Sprintf("%s:%s@%s/%s?charset=%s&parseTime=True&loc=Local",
dbConfig.User,
dbConfig.Password,
host,
dbConfig.Name,
dbConfig.Charset))
default:
return nil, fmt.Errorf("unsupported database type %q", confDBType)
}
client, err = sql.Open(string(confDBType), fmt.Sprintf("%s:%s@%s/%s?charset=%s&parseTime=True&loc=Local",
dbConfig.User,
dbConfig.Password,
host,
dbConfig.Name,
dbConfig.Charset))
default:
return nil, fmt.Errorf("unsupported database type %q", confDBType)
}
if err != nil {
return nil, fmt.Errorf("failed to open database: %w", err)
}
if err != nil {
return nil, fmt.Errorf("failed to open database: %w", err)
}
// Set connection pool
db := client.DB()
db.SetMaxIdleConns(50)
@ -153,302 +159,3 @@ func (d sqlite3Driver) Open(name string) (conn driver.Conn, err error) {
func init() {
rawsql.Register("sqlite3", sqlite3Driver{Driver: &sqlite.Driver{}})
}
// needMigration exams if required schema version is satisfied.
func needMigration(client *ent.Client, ctx context.Context, requiredDbVersion string) bool {
c, _ := client.Setting.Query().Where(setting.NameEQ(DBVersionPrefix + requiredDbVersion)).Count(ctx)
return c == 0
}
func migrate(l logging.Logger, client *ent.Client, ctx context.Context, kv cache.Driver, requiredDbVersion string) error {
l.Info("Start initializing database schema...")
l.Info("Creating basic table schema...")
if err := client.Schema.Create(ctx); err != nil {
return fmt.Errorf("Failed creating schema resources: %w", err)
}
migrateDefaultSettings(l, client, ctx, kv)
if err := migrateDefaultStoragePolicy(l, client, ctx); err != nil {
return fmt.Errorf("failed migrating default storage policy: %w", err)
}
if err := migrateSysGroups(l, client, ctx); err != nil {
return fmt.Errorf("failed migrating default storage policy: %w", err)
}
client.Setting.Create().SetName(DBVersionPrefix + requiredDbVersion).SetValue("installed").Save(ctx)
return nil
}
func migrateDefaultSettings(l logging.Logger, client *ent.Client, ctx context.Context, kv cache.Driver) {
// clean kv cache
if err := kv.DeleteAll(); err != nil {
l.Warning("Failed to remove all KV entries while schema migration: %s", err)
}
// List existing settings into a map
existingSettings := make(map[string]struct{})
settings, err := client.Setting.Query().All(ctx)
if err != nil {
l.Warning("Failed to query existing settings: %s", err)
}
for _, s := range settings {
existingSettings[s.Name] = struct{}{}
}
l.Info("Insert default settings...")
for k, v := range DefaultSettings {
if _, ok := existingSettings[k]; ok {
l.Debug("Skip inserting setting %s, already exists.", k)
continue
}
if override, ok := os.LookupEnv(EnvDefaultOverwritePrefix + k); ok {
l.Info("Override default setting %q with env value %q", k, override)
v = override
}
client.Setting.Create().SetName(k).SetValue(v).SaveX(ctx)
}
}
func migrateDefaultStoragePolicy(l logging.Logger, client *ent.Client, ctx context.Context) error {
if _, err := client.StoragePolicy.Query().Where(storagepolicy.ID(1)).First(ctx); err == nil {
l.Info("Default storage policy (ID=1) already exists, skip migrating.")
return nil
}
l.Info("Insert default storage policy...")
if _, err := client.StoragePolicy.Create().
SetName("Default storage policy").
SetType(types.PolicyTypeLocal).
SetDirNameRule(util.DataPath("uploads/{uid}/{path}")).
SetFileNameRule("{uid}_{randomkey8}_{originname}").
SetSettings(&types.PolicySetting{
ChunkSize: 25 << 20, // 25MB
PreAllocate: true,
}).
Save(ctx); err != nil {
return fmt.Errorf("failed to create default storage policy: %w", err)
}
return nil
}
func migrateSysGroups(l logging.Logger, client *ent.Client, ctx context.Context) error {
if err := migrateAdminGroup(l, client, ctx); err != nil {
return err
}
if err := migrateUserGroup(l, client, ctx); err != nil {
return err
}
if err := migrateAnonymousGroup(l, client, ctx); err != nil {
return err
}
if err := migrateMasterNode(l, client, ctx); err != nil {
return err
}
return nil
}
func migrateAdminGroup(l logging.Logger, client *ent.Client, ctx context.Context) error {
if _, err := client.Group.Query().Where(group.ID(1)).First(ctx); err == nil {
l.Info("Default admin group (ID=1) already exists, skip migrating.")
return nil
}
l.Info("Insert default admin group...")
permissions := &boolset.BooleanSet{}
boolset.Sets(map[types.GroupPermission]bool{
types.GroupPermissionIsAdmin: true,
types.GroupPermissionShare: true,
types.GroupPermissionWebDAV: true,
types.GroupPermissionWebDAVProxy: true,
types.GroupPermissionArchiveDownload: true,
types.GroupPermissionArchiveTask: true,
types.GroupPermissionShareDownload: true,
types.GroupPermissionRemoteDownload: true,
types.GroupPermissionRedirectedSource: true,
types.GroupPermissionAdvanceDelete: true,
types.GroupPermissionIgnoreFileOwnership: true,
// TODO: review default permission
}, permissions)
if _, err := client.Group.Create().
SetName("Admin").
SetStoragePoliciesID(1).
SetMaxStorage(1 * constants.TB). // 1 TB default storage
SetPermissions(permissions).
SetSettings(&types.GroupSetting{
SourceBatchSize: 1000,
Aria2BatchSize: 50,
MaxWalkedFiles: 100000,
TrashRetention: 7 * 24 * 3600,
RedirectedSource: true,
}).
Save(ctx); err != nil {
return fmt.Errorf("failed to create default admin group: %w", err)
}
return nil
}
func migrateUserGroup(l logging.Logger, client *ent.Client, ctx context.Context) error {
if _, err := client.Group.Query().Where(group.ID(2)).First(ctx); err == nil {
l.Info("Default user group (ID=2) already exists, skip migrating.")
return nil
}
l.Info("Insert default user group...")
permissions := &boolset.BooleanSet{}
boolset.Sets(map[types.GroupPermission]bool{
types.GroupPermissionShare: true,
types.GroupPermissionShareDownload: true,
types.GroupPermissionRedirectedSource: true,
}, permissions)
if _, err := client.Group.Create().
SetName("User").
SetStoragePoliciesID(1).
SetMaxStorage(1 * constants.GB). // 1 GB default storage
SetPermissions(permissions).
SetSettings(&types.GroupSetting{
SourceBatchSize: 10,
Aria2BatchSize: 1,
MaxWalkedFiles: 100000,
TrashRetention: 7 * 24 * 3600,
RedirectedSource: true,
}).
Save(ctx); err != nil {
return fmt.Errorf("failed to create default user group: %w", err)
}
return nil
}
func migrateAnonymousGroup(l logging.Logger, client *ent.Client, ctx context.Context) error {
if _, err := client.Group.Query().Where(group.ID(AnonymousGroupID)).First(ctx); err == nil {
l.Info("Default anonymous group (ID=3) already exists, skip migrating.")
return nil
}
l.Info("Insert default anonymous group...")
permissions := &boolset.BooleanSet{}
boolset.Sets(map[types.GroupPermission]bool{
types.GroupPermissionIsAnonymous: true,
types.GroupPermissionShareDownload: true,
}, permissions)
if _, err := client.Group.Create().
SetName("Anonymous").
SetPermissions(permissions).
SetSettings(&types.GroupSetting{
MaxWalkedFiles: 100000,
RedirectedSource: true,
}).
Save(ctx); err != nil {
return fmt.Errorf("failed to create default anonymous group: %w", err)
}
return nil
}
func migrateMasterNode(l logging.Logger, client *ent.Client, ctx context.Context) error {
if _, err := client.Node.Query().Where(node.TypeEQ(node.TypeMaster)).First(ctx); err == nil {
l.Info("Default master node already exists, skip migrating.")
return nil
}
capabilities := &boolset.BooleanSet{}
boolset.Sets(map[types.NodeCapability]bool{
types.NodeCapabilityCreateArchive: true,
types.NodeCapabilityExtractArchive: true,
types.NodeCapabilityRemoteDownload: true,
}, capabilities)
stm := client.Node.Create().
SetType(node.TypeMaster).
SetCapabilities(capabilities).
SetName("Master").
SetSettings(&types.NodeSetting{
Provider: types.DownloaderProviderAria2,
}).
SetStatus(node.StatusActive)
_, enableAria2 := os.LookupEnv(EnvEnableAria2)
if enableAria2 {
l.Info("Aria2 is override as enabled.")
stm.SetSettings(&types.NodeSetting{
Provider: types.DownloaderProviderAria2,
Aria2Setting: &types.Aria2Setting{
Server: "http://127.0.0.1:6800/jsonrpc",
},
})
}
l.Info("Insert default master node...")
if _, err := stm.Save(ctx); err != nil {
return fmt.Errorf("failed to create default master node: %w", err)
}
return nil
}
func createMockData(client *ent.Client, ctx context.Context) {
//userCount := 100
//folderCount := 10000
//fileCount := 25000
//
//// create users
//pwdDigest, _ := digestPassword("52121225")
//userCreates := make([]*ent.UserCreate, userCount)
//for i := 0; i < userCount; i++ {
// nick := uuid.Must(uuid.NewV4()).String()
// userCreates[i] = client.User.Create().
// SetEmail(nick + "@cloudreve.org").
// SetNick(nick).
// SetPassword(pwdDigest).
// SetStatus(user.StatusActive).
// SetGroupID(1)
//}
//users, err := client.User.CreateBulk(userCreates...).Save(ctx)
//if err != nil {
// panic(err)
//}
//
//// Create root folder
//rootFolderCreates := make([]*ent.FileCreate, userCount)
//folderIds := make([][]int, 0, folderCount*userCount+userCount)
//for i, user := range users {
// rootFolderCreates[i] = client.File.Create().
// SetName(RootFolderName).
// SetOwnerID(user.ID).
// SetType(int(FileTypeFolder))
//}
//rootFolders, err := client.File.CreateBulk(rootFolderCreates...).Save(ctx)
//for _, rootFolders := range rootFolders {
// folderIds = append(folderIds, []int{rootFolders.ID, rootFolders.OwnerID})
//}
//if err != nil {
// panic(err)
//}
//
//// create random folder
//for i := 0; i < folderCount*userCount; i++ {
// parent := lo.Sample(folderIds)
// res := client.File.Create().
// SetName(uuid.Must(uuid.NewV4()).String()).
// SetType(int(FileTypeFolder)).
// SetOwnerID(parent[1]).
// SetFileChildren(parent[0]).
// SaveX(ctx)
// folderIds = append(folderIds, []int{res.ID, res.OwnerID})
//}
for i := 0; i < 255; i++ {
fmt.Printf("%d/", i)
}
}

@ -5,6 +5,7 @@ import (
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/ent/directlink"
"github.com/cloudreve/Cloudreve/v4/ent/schema"
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
)
@ -16,6 +17,8 @@ type (
GetByNameID(ctx context.Context, id int, name string) (*ent.DirectLink, error)
// GetByID get direct link by id
GetByID(ctx context.Context, id int) (*ent.DirectLink, error)
// Delete delete direct link by id
Delete(ctx context.Context, id int) error
}
LoadDirectLinkFile struct{}
)
@ -60,6 +63,12 @@ func (d *directLinkClient) GetByNameID(ctx context.Context, id int, name string)
return res, nil
}
func (d *directLinkClient) Delete(ctx context.Context, id int) error {
ctx = schema.SkipSoftDelete(ctx)
_, err := d.client.DirectLink.Delete().Where(directlink.ID(id)).Exec(ctx)
return err
}
func withDirectLinkEagerLoading(ctx context.Context, q *ent.DirectLinkQuery) *ent.DirectLinkQuery {
if v, ok := ctx.Value(LoadDirectLinkFile{}).(bool); ok && v {
q.WithFile(func(m *ent.FileQuery) {

@ -192,7 +192,7 @@ type FileClient interface {
// UnlinkEntity unlinks an entity from a file
UnlinkEntity(ctx context.Context, entity *ent.Entity, file *ent.File, owner *ent.User) (StorageDiff, error)
// CreateDirectLink creates a direct link for a file
CreateDirectLink(ctx context.Context, fileID int, name string, speed int) (*ent.DirectLink, error)
CreateDirectLink(ctx context.Context, fileID int, name string, speed int, reuse bool) (*ent.DirectLink, error)
// CountByTimeRange counts files created in a given time range
CountByTimeRange(ctx context.Context, start, end *time.Time) (int, error)
// CountEntityByTimeRange counts entities created in a given time range
@ -322,13 +322,15 @@ func (f *fileClient) CountEntityByStoragePolicyID(ctx context.Context, storagePo
return v[0].Count, v[0].Sum, nil
}
func (f *fileClient) CreateDirectLink(ctx context.Context, file int, name string, speed int) (*ent.DirectLink, error) {
// Find existed
existed, err := f.client.DirectLink.
Query().
Where(directlink.FileID(file), directlink.Name(name), directlink.Speed(speed)).First(ctx)
if err == nil {
return existed, nil
func (f *fileClient) CreateDirectLink(ctx context.Context, file int, name string, speed int, reuse bool) (*ent.DirectLink, error) {
if reuse {
// Find existed
existed, err := f.client.DirectLink.
Query().
Where(directlink.FileID(file), directlink.Name(name), directlink.Speed(speed)).First(ctx)
if err == nil {
return existed, nil
}
}
return f.client.DirectLink.

@ -77,27 +77,34 @@ func (c *groupClient) ListAll(ctx context.Context) ([]*ent.Group, error) {
}
func (c *groupClient) Upsert(ctx context.Context, group *ent.Group) (*ent.Group, error) {
if group.ID == 0 {
return c.client.Group.Create().
stm := c.client.Group.Create().
SetName(group.Name).
SetMaxStorage(group.MaxStorage).
SetSpeedLimit(group.SpeedLimit).
SetPermissions(group.Permissions).
SetSettings(group.Settings).
SetStoragePoliciesID(group.Edges.StoragePolicies.ID).
Save(ctx)
SetSettings(group.Settings)
if group.Edges.StoragePolicies != nil && group.Edges.StoragePolicies.ID > 0 {
stm.SetStoragePolicyID(group.Edges.StoragePolicies.ID)
}
return stm.Save(ctx)
}
res, err := c.client.Group.UpdateOne(group).
stm := c.client.Group.UpdateOne(group).
SetName(group.Name).
SetMaxStorage(group.MaxStorage).
SetSpeedLimit(group.SpeedLimit).
SetPermissions(group.Permissions).
SetSettings(group.Settings).
ClearStoragePolicies().
SetStoragePoliciesID(group.Edges.StoragePolicies.ID).
Save(ctx)
ClearStoragePolicies()
if group.Edges.StoragePolicies != nil && group.Edges.StoragePolicies.ID > 0 {
stm.SetStoragePolicyID(group.Edges.StoragePolicies.ID)
}
res, err := stm.Save(ctx)
if err != nil {
return nil, err
}

@ -0,0 +1,416 @@
package inventory
import (
"context"
"encoding/json"
"fmt"
"os"
"strings"
"github.com/Masterminds/semver/v3"
"github.com/cloudreve/Cloudreve/v4/application/constants"
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/ent/group"
"github.com/cloudreve/Cloudreve/v4/ent/node"
"github.com/cloudreve/Cloudreve/v4/ent/setting"
"github.com/cloudreve/Cloudreve/v4/ent/storagepolicy"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
"github.com/cloudreve/Cloudreve/v4/pkg/cache"
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
"github.com/cloudreve/Cloudreve/v4/pkg/util"
"github.com/samber/lo"
)
// needMigration exams if required schema version is satisfied.
func needMigration(client *ent.Client, ctx context.Context, requiredDbVersion string) bool {
c, _ := client.Setting.Query().Where(setting.NameEQ(DBVersionPrefix + requiredDbVersion)).Count(ctx)
return c == 0
}
func migrate(l logging.Logger, client *ent.Client, ctx context.Context, kv cache.Driver, requiredDbVersion string) error {
l.Info("Start initializing database schema...")
l.Info("Creating basic table schema...")
if err := client.Schema.Create(ctx); err != nil {
return fmt.Errorf("Failed creating schema resources: %w", err)
}
migrateDefaultSettings(l, client, ctx, kv)
if err := migrateDefaultStoragePolicy(l, client, ctx); err != nil {
return fmt.Errorf("failed migrating default storage policy: %w", err)
}
if err := migrateSysGroups(l, client, ctx); err != nil {
return fmt.Errorf("failed migrating default storage policy: %w", err)
}
if err := applyPatches(l, client, ctx, requiredDbVersion); err != nil {
return fmt.Errorf("failed applying schema patches: %w", err)
}
client.Setting.Create().SetName(DBVersionPrefix + requiredDbVersion).SetValue("installed").Save(ctx)
return nil
}
func migrateDefaultSettings(l logging.Logger, client *ent.Client, ctx context.Context, kv cache.Driver) {
// clean kv cache
if err := kv.DeleteAll(); err != nil {
l.Warning("Failed to remove all KV entries while schema migration: %s", err)
}
// List existing settings into a map
existingSettings := make(map[string]struct{})
settings, err := client.Setting.Query().All(ctx)
if err != nil {
l.Warning("Failed to query existing settings: %s", err)
}
for _, s := range settings {
existingSettings[s.Name] = struct{}{}
}
l.Info("Insert default settings...")
for k, v := range DefaultSettings {
if _, ok := existingSettings[k]; ok {
l.Debug("Skip inserting setting %s, already exists.", k)
continue
}
if override, ok := os.LookupEnv(EnvDefaultOverwritePrefix + k); ok {
l.Info("Override default setting %q with env value %q", k, override)
v = override
}
client.Setting.Create().SetName(k).SetValue(v).SaveX(ctx)
}
}
func migrateDefaultStoragePolicy(l logging.Logger, client *ent.Client, ctx context.Context) error {
if _, err := client.StoragePolicy.Query().Where(storagepolicy.ID(1)).First(ctx); err == nil {
l.Info("Default storage policy (ID=1) already exists, skip migrating.")
return nil
}
l.Info("Insert default storage policy...")
if _, err := client.StoragePolicy.Create().
SetName("Default storage policy").
SetType(types.PolicyTypeLocal).
SetDirNameRule(util.DataPath("uploads/{uid}/{path}")).
SetFileNameRule("{uid}_{randomkey8}_{originname}").
SetSettings(&types.PolicySetting{
ChunkSize: 25 << 20, // 25MB
PreAllocate: true,
}).
Save(ctx); err != nil {
return fmt.Errorf("failed to create default storage policy: %w", err)
}
return nil
}
func migrateSysGroups(l logging.Logger, client *ent.Client, ctx context.Context) error {
if err := migrateAdminGroup(l, client, ctx); err != nil {
return err
}
if err := migrateUserGroup(l, client, ctx); err != nil {
return err
}
if err := migrateAnonymousGroup(l, client, ctx); err != nil {
return err
}
if err := migrateMasterNode(l, client, ctx); err != nil {
return err
}
return nil
}
func migrateAdminGroup(l logging.Logger, client *ent.Client, ctx context.Context) error {
if _, err := client.Group.Query().Where(group.ID(1)).First(ctx); err == nil {
l.Info("Default admin group (ID=1) already exists, skip migrating.")
return nil
}
l.Info("Insert default admin group...")
permissions := &boolset.BooleanSet{}
boolset.Sets(map[types.GroupPermission]bool{
types.GroupPermissionIsAdmin: true,
types.GroupPermissionShare: true,
types.GroupPermissionWebDAV: true,
types.GroupPermissionWebDAVProxy: true,
types.GroupPermissionArchiveDownload: true,
types.GroupPermissionArchiveTask: true,
types.GroupPermissionShareDownload: true,
types.GroupPermissionRemoteDownload: true,
types.GroupPermissionRedirectedSource: true,
types.GroupPermissionAdvanceDelete: true,
types.GroupPermissionIgnoreFileOwnership: true,
// TODO: review default permission
}, permissions)
if _, err := client.Group.Create().
SetName("Admin").
SetStoragePoliciesID(1).
SetMaxStorage(1 * constants.TB). // 1 TB default storage
SetPermissions(permissions).
SetSettings(&types.GroupSetting{
SourceBatchSize: 1000,
Aria2BatchSize: 50,
MaxWalkedFiles: 100000,
TrashRetention: 7 * 24 * 3600,
RedirectedSource: true,
}).
Save(ctx); err != nil {
return fmt.Errorf("failed to create default admin group: %w", err)
}
return nil
}
func migrateUserGroup(l logging.Logger, client *ent.Client, ctx context.Context) error {
if _, err := client.Group.Query().Where(group.ID(2)).First(ctx); err == nil {
l.Info("Default user group (ID=2) already exists, skip migrating.")
return nil
}
l.Info("Insert default user group...")
permissions := &boolset.BooleanSet{}
boolset.Sets(map[types.GroupPermission]bool{
types.GroupPermissionShare: true,
types.GroupPermissionShareDownload: true,
types.GroupPermissionRedirectedSource: true,
}, permissions)
if _, err := client.Group.Create().
SetName("User").
SetStoragePoliciesID(1).
SetMaxStorage(1 * constants.GB). // 1 GB default storage
SetPermissions(permissions).
SetSettings(&types.GroupSetting{
SourceBatchSize: 10,
Aria2BatchSize: 1,
MaxWalkedFiles: 100000,
TrashRetention: 7 * 24 * 3600,
RedirectedSource: true,
}).
Save(ctx); err != nil {
return fmt.Errorf("failed to create default user group: %w", err)
}
return nil
}
func migrateAnonymousGroup(l logging.Logger, client *ent.Client, ctx context.Context) error {
if _, err := client.Group.Query().Where(group.ID(AnonymousGroupID)).First(ctx); err == nil {
l.Info("Default anonymous group (ID=3) already exists, skip migrating.")
return nil
}
l.Info("Insert default anonymous group...")
permissions := &boolset.BooleanSet{}
boolset.Sets(map[types.GroupPermission]bool{
types.GroupPermissionIsAnonymous: true,
types.GroupPermissionShareDownload: true,
}, permissions)
if _, err := client.Group.Create().
SetName("Anonymous").
SetPermissions(permissions).
SetSettings(&types.GroupSetting{
MaxWalkedFiles: 100000,
RedirectedSource: true,
}).
Save(ctx); err != nil {
return fmt.Errorf("failed to create default anonymous group: %w", err)
}
return nil
}
func migrateMasterNode(l logging.Logger, client *ent.Client, ctx context.Context) error {
if _, err := client.Node.Query().Where(node.TypeEQ(node.TypeMaster)).First(ctx); err == nil {
l.Info("Default master node already exists, skip migrating.")
return nil
}
capabilities := &boolset.BooleanSet{}
boolset.Sets(map[types.NodeCapability]bool{
types.NodeCapabilityCreateArchive: true,
types.NodeCapabilityExtractArchive: true,
types.NodeCapabilityRemoteDownload: true,
}, capabilities)
stm := client.Node.Create().
SetType(node.TypeMaster).
SetCapabilities(capabilities).
SetName("Master").
SetSettings(&types.NodeSetting{
Provider: types.DownloaderProviderAria2,
}).
SetStatus(node.StatusActive)
_, enableAria2 := os.LookupEnv(EnvEnableAria2)
if enableAria2 {
l.Info("Aria2 is override as enabled.")
stm.SetSettings(&types.NodeSetting{
Provider: types.DownloaderProviderAria2,
Aria2Setting: &types.Aria2Setting{
Server: "http://127.0.0.1:6800/jsonrpc",
},
})
}
l.Info("Insert default master node...")
if _, err := stm.Save(ctx); err != nil {
return fmt.Errorf("failed to create default master node: %w", err)
}
return nil
}
type (
PatchFunc func(l logging.Logger, client *ent.Client, ctx context.Context) error
Patch struct {
Name string
EndVersion string
Func PatchFunc
}
)
var patches = []Patch{
{
Name: "apply_default_excalidraw_viewer",
EndVersion: "4.1.0",
Func: func(l logging.Logger, client *ent.Client, ctx context.Context) error {
// 1. Apply excalidraw file icons
// 1.1 Check if it's already applied
iconSetting, err := client.Setting.Query().Where(setting.Name("explorer_icons")).First(ctx)
if err != nil {
return fmt.Errorf("failed to query explorer_icons setting: %w", err)
}
var icons []types.FileTypeIconSetting
if err := json.Unmarshal([]byte(iconSetting.Value), &icons); err != nil {
return fmt.Errorf("failed to unmarshal explorer_icons setting: %w", err)
}
iconExisted := false
for _, icon := range icons {
if lo.Contains(icon.Exts, "excalidraw") {
iconExisted = true
break
}
}
// 1.2 If not existed, add it
if !iconExisted {
// Found existing excalidraw icon default setting
var defaultExcalidrawIcon types.FileTypeIconSetting
for _, icon := range defaultIcons {
if lo.Contains(icon.Exts, "excalidraw") {
defaultExcalidrawIcon = icon
break
}
}
icons = append(icons, defaultExcalidrawIcon)
newIconSetting, err := json.Marshal(icons)
if err != nil {
return fmt.Errorf("failed to marshal explorer_icons setting: %w", err)
}
if _, err := client.Setting.UpdateOne(iconSetting).SetValue(string(newIconSetting)).Save(ctx); err != nil {
return fmt.Errorf("failed to update explorer_icons setting: %w", err)
}
}
// 2. Apply default file viewers
// 2.1 Check if it's already applied
fileViewersSetting, err := client.Setting.Query().Where(setting.Name("file_viewers")).First(ctx)
if err != nil {
return fmt.Errorf("failed to query file_viewers setting: %w", err)
}
var fileViewers []types.ViewerGroup
if err := json.Unmarshal([]byte(fileViewersSetting.Value), &fileViewers); err != nil {
return fmt.Errorf("failed to unmarshal file_viewers setting: %w", err)
}
fileViewerExisted := false
for _, viewer := range fileViewers[0].Viewers {
if viewer.ID == "excalidraw" {
fileViewerExisted = true
break
}
}
// 2.2 If not existed, add it
if !fileViewerExisted {
// Found existing excalidraw viewer default setting
var defaultExcalidrawViewer types.Viewer
for _, viewer := range defaultFileViewers[0].Viewers {
if viewer.ID == "excalidraw" {
defaultExcalidrawViewer = viewer
break
}
}
fileViewers[0].Viewers = append(fileViewers[0].Viewers, defaultExcalidrawViewer)
newFileViewersSetting, err := json.Marshal(fileViewers)
if err != nil {
return fmt.Errorf("failed to marshal file_viewers setting: %w", err)
}
if _, err := client.Setting.UpdateOne(fileViewersSetting).SetValue(string(newFileViewersSetting)).Save(ctx); err != nil {
return fmt.Errorf("failed to update file_viewers setting: %w", err)
}
}
return nil
},
},
}
func applyPatches(l logging.Logger, client *ent.Client, ctx context.Context, requiredDbVersion string) error {
allVersionMarks, err := client.Setting.Query().Where(setting.NameHasPrefix(DBVersionPrefix)).All(ctx)
if err != nil {
return err
}
requiredDbVersion = strings.TrimSuffix(requiredDbVersion, "-pro")
// Find the latest applied version
var latestAppliedVersion *semver.Version
for _, v := range allVersionMarks {
v.Name = strings.TrimSuffix(v.Name, "-pro")
version, err := semver.NewVersion(strings.TrimPrefix(v.Name, DBVersionPrefix))
if err != nil {
l.Warning("Failed to parse past version %s: %s", v.Name, err)
continue
}
if latestAppliedVersion == nil || version.Compare(latestAppliedVersion) > 0 {
latestAppliedVersion = version
}
}
requiredVersion, err := semver.NewVersion(requiredDbVersion)
if err != nil {
return fmt.Errorf("failed to parse required version %s: %w", requiredDbVersion, err)
}
if latestAppliedVersion == nil || requiredVersion.Compare(requiredVersion) > 0 {
latestAppliedVersion = requiredVersion
}
for _, patch := range patches {
if latestAppliedVersion.Compare(semver.MustParse(patch.EndVersion)) < 0 {
l.Info("Applying schema patch %s...", patch.Name)
if err := patch.Func(l, client, ctx); err != nil {
return err
}
}
}
return nil
}

@ -27,6 +27,7 @@ type (
SkipStoragePolicyCache struct{}
StoragePolicyClient interface {
TxOperator
// GetByGroup returns the storage policies of the group.
GetByGroup(ctx context.Context, group *ent.Group) (*ent.StoragePolicy, error)
// GetPolicyByID returns the storage policy by id.
@ -64,6 +65,14 @@ type storagePolicyClient struct {
cache cache.Driver
}
func (c *storagePolicyClient) SetClient(newClient *ent.Client) TxOperator {
return &storagePolicyClient{client: newClient, cache: c.cache}
}
func (c *storagePolicyClient) GetClient() *ent.Client {
return c.client
}
func (c *storagePolicyClient) Delete(ctx context.Context, policy *ent.StoragePolicy) error {
if err := c.client.StoragePolicy.DeleteOne(policy).Exec(ctx); err != nil {
return fmt.Errorf("failed to delete storage policy: %w", err)

File diff suppressed because one or more lines are too long

@ -3,6 +3,7 @@ package inventory
import (
"context"
"fmt"
"time"
"entgo.io/ent/dialect/sql"
"github.com/cloudreve/Cloudreve/v4/ent"
@ -44,6 +45,8 @@ type TaskClient interface {
List(ctx context.Context, args *ListTaskArgs) (*ListTaskResult, error)
// DeleteByIDs deletes the tasks with the given IDs.
DeleteByIDs(ctx context.Context, ids ...int) error
// DeleteBy deletes the tasks with the given args.
DeleteBy(ctx context.Context, args *DeleteTaskArgs) error
}
type (
@ -59,6 +62,12 @@ type (
*PaginationResults
Tasks []*ent.Task
}
DeleteTaskArgs struct {
NotAfter time.Time
Types []string
Status []task.Status
}
)
func NewTaskClient(client *ent.Client, dbType conf.DBType, hasher hashid.Encoder) TaskClient {
@ -113,6 +122,23 @@ func (c *taskClient) DeleteByIDs(ctx context.Context, ids ...int) error {
return err
}
func (c *taskClient) DeleteBy(ctx context.Context, args *DeleteTaskArgs) error {
query := c.client.Task.
Delete().
Where(task.CreatedAtLTE(args.NotAfter))
if len(args.Status) > 0 {
query.Where(task.StatusIn(args.Status...))
}
if len(args.Types) > 0 {
query.Where(task.TypeIn(args.Types...))
}
_, err := query.Exec(ctx)
return err
}
func (c *taskClient) Update(ctx context.Context, task *ent.Task, args *TaskArgs) (*ent.Task, error) {
stm := c.client.Task.UpdateOne(task).
SetPublicState(args.PublicState)

@ -90,6 +90,8 @@ type (
UseCname bool `json:"use_cname,omitempty"`
// CDN domain does not need to be signed.
SourceAuth bool `json:"source_auth,omitempty"`
// QiniuUploadCdn whether to use CDN for Qiniu upload.
QiniuUploadCdn bool `json:"qiniu_upload_cdn,omitempty"`
}
FileType int
@ -177,6 +179,16 @@ type (
ShareProps struct {
// Whether to share view setting from owner
ShareView bool `json:"share_view,omitempty"`
// Whether to automatically show readme file in share view
ShowReadMe bool `json:"show_read_me,omitempty"`
}
FileTypeIconSetting struct {
Exts []string `json:"exts"`
Icon string `json:"icon,omitempty"`
Color string `json:"color,omitempty"`
ColorDark string `json:"color_dark,omitempty"`
Img string `json:"img,omitempty"`
}
)
@ -198,6 +210,7 @@ const (
GroupPermission_CommunityPlaceholder4
GroupPermissionSetExplicitUser_placeholder
GroupPermissionIgnoreFileOwnership // not used
GroupPermissionUniqueRedirectDirectLink
)
const (
@ -250,3 +263,41 @@ const (
DownloaderProviderAria2 = DownloaderProvider("aria2")
DownloaderProviderQBittorrent = DownloaderProvider("qbittorrent")
)
type (
ViewerAction string
ViewerType string
)
const (
ViewerActionView = "view"
ViewerActionEdit = "edit"
ViewerTypeBuiltin = "builtin"
ViewerTypeWopi = "wopi"
ViewerTypeCustom = "custom"
)
type Viewer struct {
ID string `json:"id"`
Type ViewerType `json:"type"`
DisplayName string `json:"display_name"`
Exts []string `json:"exts"`
Url string `json:"url,omitempty"`
Icon string `json:"icon,omitempty"`
WopiActions map[string]map[ViewerAction]string `json:"wopi_actions,omitempty"`
Props map[string]string `json:"props,omitempty"`
MaxSize int64 `json:"max_size,omitempty"`
Disabled bool `json:"disabled,omitempty"`
Templates []NewFileTemplate `json:"templates,omitempty"`
Platform string `json:"platform,omitempty"`
}
type ViewerGroup struct {
Viewers []Viewer `json:"viewers"`
}
type NewFileTemplate struct {
Ext string `json:"ext"`
DisplayName string `json:"display_name"`
}

@ -3,6 +3,12 @@ package middleware
import (
"bytes"
"encoding/json"
"io"
"net/http"
"net/url"
"strings"
"time"
"github.com/cloudreve/Cloudreve/v4/application/dependency"
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
"github.com/cloudreve/Cloudreve/v4/pkg/recaptcha"
@ -11,11 +17,6 @@ import (
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
"github.com/gin-gonic/gin"
"github.com/mojocn/base64Captcha"
"io"
"net/http"
"net/url"
"strings"
"time"
)
type req struct {
@ -38,6 +39,9 @@ type (
turnstileResponse struct {
Success bool `json:"success"`
}
capResponse struct {
Success bool `json:"success"`
}
)
// CaptchaRequired 验证请求签名
@ -127,6 +131,62 @@ func CaptchaRequired(enabled func(c *gin.Context) bool) gin.HandlerFunc {
return
}
break
case setting.CaptchaCap:
captchaSetting := settings.CapCaptcha(c)
if captchaSetting.InstanceURL == "" || captchaSetting.SiteKey == "" || captchaSetting.SecretKey == "" {
l.Warning("Cap verification failed: missing configuration")
c.JSON(200, serializer.ErrWithDetails(c, serializer.CodeCaptchaError, "Captcha configuration error", nil))
c.Abort()
return
}
r := dep.RequestClient(
request2.WithContext(c),
request2.WithLogger(logging.FromContext(c)),
request2.WithHeader(http.Header{"Content-Type": []string{"application/json"}}),
)
// Cap 2.0 API format: /{siteKey}/siteverify
capEndpoint := strings.TrimSuffix(captchaSetting.InstanceURL, "/") + "/" + captchaSetting.SiteKey + "/siteverify"
requestBody := map[string]string{
"secret": captchaSetting.SecretKey,
"response": service.Ticket,
}
requestData, err := json.Marshal(requestBody)
if err != nil {
l.Warning("Cap verification failed: %s", err)
c.JSON(200, serializer.ErrWithDetails(c, serializer.CodeCaptchaError, "Captcha validation failed", err))
c.Abort()
return
}
res, err := r.Request("POST", capEndpoint, strings.NewReader(string(requestData))).
CheckHTTPResponse(http.StatusOK).
GetResponse()
if err != nil {
l.Warning("Cap verification failed: %s", err)
c.JSON(200, serializer.ErrWithDetails(c, serializer.CodeCaptchaError, "Captcha validation failed", err))
c.Abort()
return
}
var capRes capResponse
err = json.Unmarshal([]byte(res), &capRes)
if err != nil {
l.Warning("Cap verification failed: %s", err)
c.JSON(200, serializer.ErrWithDetails(c, serializer.CodeCaptchaError, "Captcha validation failed", err))
c.Abort()
return
}
if !capRes.Success {
l.Warning("Cap verification failed: validation returned false")
c.JSON(200, serializer.ErrWithDetails(c, serializer.CodeCaptchaError, "Captcha validation failed", nil))
c.Abort()
return
}
break
}
}

@ -2,9 +2,9 @@ package middleware
import (
"github.com/cloudreve/Cloudreve/v4/application/dependency"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/manager"
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
"github.com/cloudreve/Cloudreve/v4/pkg/util"
"github.com/cloudreve/Cloudreve/v4/pkg/wopi"
"github.com/gin-gonic/gin"
@ -67,7 +67,7 @@ func ViewerSessionValidation() gin.HandlerFunc {
// Check if the viewer is still available
viewers := settings.FileViewers(c)
var v *setting.Viewer
var v *types.Viewer
for _, group := range viewers {
for _, viewer := range group.Viewers {
if viewer.ID == session.ViewerID && !viewer.Disabled {

@ -220,5 +220,8 @@ func getUrlSignContent(ctx context.Context, url *url.URL) string {
// host = strings.TrimSuffix(host, "/")
// // remove port if it exists
// host = strings.Split(host, ":")[0]
if url.Path == "" {
return "/"
}
return url.Path
}

18
pkg/cache/redis.go vendored

@ -3,10 +3,12 @@ package cache
import (
"bytes"
"encoding/gob"
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
"strconv"
"time"
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
"github.com/gomodule/redigo/redis"
)
@ -44,7 +46,7 @@ func deserializer(value []byte) (any, error) {
}
// NewRedisStore 创建新的redis存储
func NewRedisStore(l logging.Logger, size int, network, address, user, password, database string) *RedisStore {
func NewRedisStore(l logging.Logger, size int, redisConfig *conf.Redis) *RedisStore {
return &RedisStore{
pool: &redis.Pool{
MaxIdle: size,
@ -54,17 +56,19 @@ func NewRedisStore(l logging.Logger, size int, network, address, user, password,
return err
},
Dial: func() (redis.Conn, error) {
db, err := strconv.Atoi(database)
db, err := strconv.Atoi(redisConfig.DB)
if err != nil {
return nil, err
}
c, err := redis.Dial(
network,
address,
redisConfig.Network,
redisConfig.Server,
redis.DialDatabase(db),
redis.DialPassword(password),
redis.DialUsername(user),
redis.DialPassword(redisConfig.Password),
redis.DialUsername(redisConfig.User),
redis.DialUseTLS(redisConfig.UseTLS),
redis.DialTLSSkipVerify(redisConfig.TLSSkipVerify),
)
if err != nil {
l.Panic("Failed to create Redis connection: %s", err)

@ -2,12 +2,13 @@ package conf
import (
"fmt"
"os"
"strings"
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
"github.com/cloudreve/Cloudreve/v4/pkg/util"
"github.com/go-ini/ini"
"github.com/go-playground/validator/v10"
"os"
"strings"
)
const (

@ -10,6 +10,7 @@ var (
MySqlDB DBType = "mysql"
MsSqlDB DBType = "mssql"
PostgresDB DBType = "postgres"
MariaDB DBType = "mariadb"
)
// Database 数据库
@ -24,6 +25,10 @@ type Database struct {
Port int
Charset string
UnixSocket bool
// 允许直接使用DATABASE_URL来配置数据库连接
DatabaseURL string
// SSLMode 允许使用SSL连接数据库, 用户可以在sslmode string中添加证书等配置
SSLMode string
}
type SysMode string
@ -65,11 +70,13 @@ type Slave struct {
// Redis 配置
type Redis struct {
Network string
Server string
User string
Password string
DB string
Network string
Server string
User string
Password string
DB string
UseTLS bool
TLSSkipVerify bool
}
// 跨域配置
@ -85,18 +92,21 @@ type Cors struct {
// RedisConfig Redis服务器配置
var RedisConfig = &Redis{
Network: "tcp",
Server: "",
Password: "",
DB: "0",
Network: "tcp",
Server: "",
Password: "",
DB: "0",
UseTLS: false,
TLSSkipVerify: true,
}
// DatabaseConfig 数据库配置
var DatabaseConfig = &Database{
Charset: "utf8mb4",
DBFile: util.DataPath("cloudreve.db"),
Port: 3306,
UnixSocket: false,
Charset: "utf8mb4",
DBFile: util.DataPath("cloudreve.db"),
Port: 3306,
UnixSocket: false,
DatabaseURL: "",
}
// SystemConfig 系统公用配置

@ -67,7 +67,10 @@ func New(ctx context.Context, policy *ent.StoragePolicy, settings setting.Provid
}
mac := qbox.NewMac(policy.AccessKey, policy.SecretKey)
cfg := &storage.Config{UseHTTPS: true}
cfg := &storage.Config{
UseHTTPS: true,
UseCdnDomains: policy.Settings.QiniuUploadCdn,
}
driver := &Driver{
policy: policy,

@ -384,6 +384,10 @@ func (f *DBFS) Get(ctx context.Context, path *fs.URI, opts ...fs.Option) (fs.Fil
ctx = context.WithValue(ctx, inventory.LoadFileEntity{}, true)
}
if o.extendedInfo {
ctx = context.WithValue(ctx, inventory.LoadFileDirectLink{}, true)
}
if o.loadFileShareIfOwned {
ctx = context.WithValue(ctx, inventory.LoadFileShare{}, true)
}
@ -407,6 +411,11 @@ func (f *DBFS) Get(ctx context.Context, path *fs.URI, opts ...fs.Option) (fs.Fil
StorageUsed: target.SizeUsed(),
EntityStoragePolicies: make(map[int]*ent.StoragePolicy),
}
if f.user.ID == target.OwnerID() {
extendedInfo.DirectLinks = target.Model.Edges.DirectLinks
}
policyID := target.PolicyID()
if policyID > 0 {
policy, err := f.storagePolicyClient.GetPolicyByID(ctx, policyID)

@ -191,6 +191,7 @@ type (
Shares []*ent.Share
EntityStoragePolicies map[int]*ent.StoragePolicy
View *types.ExplorerView
DirectLinks []*ent.DirectLink
}
FolderSummary struct {

@ -98,8 +98,9 @@ func (m *manager) GetDirectLink(ctx context.Context, urls ...*fs.URI) ([]DirectL
}
if useRedirect {
reuseExisting := !m.user.Edges.Group.Permissions.Enabled(int(types.GroupPermissionUniqueRedirectDirectLink))
// Use redirect source
link, err := fileClient.CreateDirectLink(ctx, file.ID(), file.Name(), m.user.Edges.Group.SpeedLimit)
link, err := fileClient.CreateDirectLink(ctx, file.ID(), file.Name(), m.user.Edges.Group.SpeedLimit, reuseExisting)
if err != nil {
ae.Add(url.String(), err)
continue

@ -54,7 +54,7 @@ type (
// UpsertMedata update or insert metadata of given file
PatchMedata(ctx context.Context, path []*fs.URI, data ...fs.MetadataPatch) error
// CreateViewerSession creates a viewer session for given file
CreateViewerSession(ctx context.Context, uri *fs.URI, version string, viewer *setting.Viewer) (*ViewerSession, error)
CreateViewerSession(ctx context.Context, uri *fs.URI, version string, viewer *types.Viewer) (*ViewerSession, error)
// TraverseFile traverses a file to its root file, return the file with linked root.
TraverseFile(ctx context.Context, fileID int) (fs.File, error)
}
@ -115,6 +115,7 @@ type (
RemainDownloads int
Expire *time.Time
ShareView bool
ShowReadMe bool
}
)

@ -267,7 +267,8 @@ func (l *manager) CreateOrUpdateShare(ctx context.Context, path *fs.URI, args *C
}
props := &types.ShareProps{
ShareView: args.ShareView,
ShareView: args.ShareView,
ShowReadMe: args.ShowReadMe,
}
share, err := shareClient.Upsert(ctx, &inventory.CreateShareParams{

@ -9,7 +9,6 @@ import (
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/dbfs"
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
"github.com/cloudreve/Cloudreve/v4/pkg/util"
"github.com/gofrs/uuid"
)
@ -44,7 +43,7 @@ func init() {
gob.Register(ViewerSessionCache{})
}
func (m *manager) CreateViewerSession(ctx context.Context, uri *fs.URI, version string, viewer *setting.Viewer) (*ViewerSession, error) {
func (m *manager) CreateViewerSession(ctx context.Context, uri *fs.URI, version string, viewer *types.Viewer) (*ViewerSession, error) {
file, err := m.fs.Get(ctx, uri, dbfs.WithFileEntities(), dbfs.WithNotRoot())
if err != nil {
return nil, err
@ -88,6 +87,6 @@ func ViewerSessionFromContext(ctx context.Context) *ViewerSessionCache {
return ctx.Value(ViewerSessionCacheCtx{}).(*ViewerSessionCache)
}
func ViewerFromContext(ctx context.Context) *setting.Viewer {
return ctx.Value(ViewerCtx{}).(*setting.Viewer)
func ViewerFromContext(ctx context.Context) *types.Viewer {
return ctx.Value(ViewerCtx{}).(*types.Viewer)
}

@ -319,7 +319,7 @@ func (m *RemoteDownloadTask) slaveTransfer(ctx context.Context, dep dependency.D
}
dst := dstUri.JoinRaw(f.Name)
src := filepath.FromSlash(path.Join(m.state.Status.SavePath, f.Name))
src := path.Join(m.state.Status.SavePath, f.Name)
payload.Files = append(payload.Files, SlaveUploadEntity{
Src: src,
Uri: dst,

@ -5,6 +5,7 @@ import (
"encoding/json"
"fmt"
"os"
"path"
"path/filepath"
"sync"
"sync/atomic"
@ -127,11 +128,11 @@ func (t *SlaveUploadTask) Do(ctx context.Context) (task.Status, error) {
t.progress[progressKey] = &queue.Progress{Identifier: file.Uri.String(), Total: file.Size}
t.Unlock()
handle, err := os.Open(file.Src)
handle, err := os.Open(filepath.FromSlash(file.Src))
if err != nil {
t.l.Warning("Failed to open file %s: %s", file.Src, err.Error())
atomic.AddInt64(&t.progress[ProgressTypeUpload].Current, file.Size)
ae.Add(filepath.Base(file.Src), fmt.Errorf("failed to open file: %w", err))
ae.Add(path.Base(file.Src), fmt.Errorf("failed to open file: %w", err))
return
}
@ -140,7 +141,7 @@ func (t *SlaveUploadTask) Do(ctx context.Context) (task.Status, error) {
t.l.Warning("Failed to get file stat for %s: %s", file.Src, err.Error())
handle.Close()
atomic.AddInt64(&t.progress[ProgressTypeUpload].Current, file.Size)
ae.Add(filepath.Base(file.Src), fmt.Errorf("failed to get file stat: %w", err))
ae.Add(path.Base(file.Src), fmt.Errorf("failed to get file stat: %w", err))
return
}
@ -163,7 +164,7 @@ func (t *SlaveUploadTask) Do(ctx context.Context) (task.Status, error) {
handle.Close()
t.l.Warning("Failed to upload file %s: %s", file.Src, err.Error())
atomic.AddInt64(&t.progress[ProgressTypeUpload].Current, file.Size)
ae.Add(filepath.Base(file.Src), fmt.Errorf("failed to upload file: %w", err))
ae.Add(path.Base(file.Src), fmt.Errorf("failed to upload file: %w", err))
return
}

@ -9,6 +9,8 @@ import (
"strings"
"time"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/auth/requestinfo"
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
)
@ -38,6 +40,8 @@ type (
TcCaptcha(ctx context.Context) *TcCaptcha
// TurnstileCaptcha returns the Cloudflare Turnstile settings.
TurnstileCaptcha(ctx context.Context) *Turnstile
// CapCaptcha returns the Cap settings.
CapCaptcha(ctx context.Context) *Cap
// EmailActivationEnabled returns true if email activation is required.
EmailActivationEnabled(ctx context.Context) bool
// DefaultGroup returns the default group ID for new users.
@ -167,7 +171,7 @@ type (
// FolderPropsCacheTTL returns the cache TTL of folder summary.
FolderPropsCacheTTL(ctx context.Context) int
// FileViewers returns the file viewers settings.
FileViewers(ctx context.Context) []ViewerGroup
FileViewers(ctx context.Context) []types.ViewerGroup
// ViewerSessionTTL returns the TTL of viewer session.
ViewerSessionTTL(ctx context.Context) int
// MimeMapping returns the extension to MIME mapping settings.
@ -184,6 +188,14 @@ type (
AvatarProcess(ctx context.Context) *AvatarProcess
// UseFirstSiteUrl returns the first site URL.
AllSiteURLs(ctx context.Context) []*url.URL
// LibRawThumbGeneratorEnabled returns true if libraw thumb generator is enabled.
LibRawThumbGeneratorEnabled(ctx context.Context) bool
// LibRawThumbMaxSize returns the maximum size of libraw thumb generator.
LibRawThumbMaxSize(ctx context.Context) int64
// LibRawThumbExts returns the supported extensions of libraw thumb generator.
LibRawThumbExts(ctx context.Context) []string
// LibRawThumbPath returns the path of libraw executable.
LibRawThumbPath(ctx context.Context) string
}
UseFirstSiteUrlCtxKey = struct{}
)
@ -230,11 +242,11 @@ func (s *settingProvider) Avatar(ctx context.Context) *Avatar {
}
}
func (s *settingProvider) FileViewers(ctx context.Context) []ViewerGroup {
func (s *settingProvider) FileViewers(ctx context.Context) []types.ViewerGroup {
raw := s.getString(ctx, "file_viewers", "[]")
var viewers []ViewerGroup
var viewers []types.ViewerGroup
if err := json.Unmarshal([]byte(raw), &viewers); err != nil {
return []ViewerGroup{}
return []types.ViewerGroup{}
}
return viewers
@ -384,6 +396,22 @@ func (s *settingProvider) VipsPath(ctx context.Context) string {
return s.getString(ctx, "thumb_vips_path", "vips")
}
func (s *settingProvider) LibRawThumbGeneratorEnabled(ctx context.Context) bool {
return s.getBoolean(ctx, "thumb_libraw_enabled", false)
}
func (s *settingProvider) LibRawThumbMaxSize(ctx context.Context) int64 {
return s.getInt64(ctx, "thumb_libraw_max_size", 78643200)
}
func (s *settingProvider) LibRawThumbExts(ctx context.Context) []string {
return s.getStringList(ctx, "thumb_libraw_exts", []string{})
}
func (s *settingProvider) LibRawThumbPath(ctx context.Context) string {
return s.getString(ctx, "thumb_libraw_path", "simple_dcraw")
}
func (s *settingProvider) LibreOfficeThumbGeneratorEnabled(ctx context.Context) bool {
return s.getBoolean(ctx, "thumb_libreoffice_enabled", false)
}
@ -638,6 +666,15 @@ func (s *settingProvider) TurnstileCaptcha(ctx context.Context) *Turnstile {
}
}
func (s *settingProvider) CapCaptcha(ctx context.Context) *Cap {
return &Cap{
InstanceURL: s.getString(ctx, "captcha_cap_instance_url", ""),
SiteKey: s.getString(ctx, "captcha_cap_site_key", ""),
SecretKey: s.getString(ctx, "captcha_cap_secret_key", ""),
AssetServer: s.getString(ctx, "captcha_cap_asset_server", "jsdelivr"),
}
}
func (s *settingProvider) ReCaptcha(ctx context.Context) *ReCaptcha {
return &ReCaptcha{
Secret: s.getString(ctx, "captcha_ReCaptchaSecret", ""),

@ -28,6 +28,7 @@ const (
CaptchaReCaptcha = CaptchaType("recaptcha")
CaptchaTcaptcha = CaptchaType("tcaptcha")
CaptchaTurnstile = CaptchaType("turnstile")
CaptchaCap = CaptchaType("cap")
)
type ReCaptcha struct {
@ -47,6 +48,13 @@ type Turnstile struct {
Secret string
}
type Cap struct {
InstanceURL string
SiteKey string
SecretKey string
AssetServer string
}
type SMTP struct {
FromName string
From string
@ -169,42 +177,6 @@ type MapSetting struct {
// Viewer related
type (
ViewerAction string
ViewerType string
)
const (
ViewerActionView = "view"
ViewerActionEdit = "edit"
ViewerTypeBuiltin = "builtin"
ViewerTypeWopi = "wopi"
)
type Viewer struct {
ID string `json:"id"`
Type ViewerType `json:"type"`
DisplayName string `json:"display_name"`
Exts []string `json:"exts"`
Url string `json:"url,omitempty"`
Icon string `json:"icon,omitempty"`
WopiActions map[string]map[ViewerAction]string `json:"wopi_actions,omitempty"`
Props map[string]string `json:"props,omitempty"`
MaxSize int64 `json:"max_size,omitempty"`
Disabled bool `json:"disabled,omitempty"`
Templates []NewFileTemplate `json:"templates,omitempty"`
}
type ViewerGroup struct {
Viewers []Viewer `json:"viewers"`
}
type NewFileTemplate struct {
Ext string `json:"ext"`
DisplayName string `json:"display_name"`
}
type (
SearchCategory string
)

@ -0,0 +1,261 @@
package thumb
import (
"bytes"
"context"
"errors"
"fmt"
"image"
"image/jpeg"
"image/png"
"io"
"os"
"os/exec"
"path/filepath"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/manager/entitysource"
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
"github.com/cloudreve/Cloudreve/v4/pkg/util"
"github.com/gofrs/uuid"
)
func NewLibRawGenerator(l logging.Logger, settings setting.Provider) *LibRawGenerator {
return &LibRawGenerator{l: l, settings: settings}
}
type LibRawGenerator struct {
l logging.Logger
settings setting.Provider
}
func (l *LibRawGenerator) Generate(ctx context.Context, es entitysource.EntitySource, ext string, previous *Result) (*Result, error) {
if !util.IsInExtensionListExt(l.settings.LibRawThumbExts(ctx), ext) {
return nil, fmt.Errorf("unsupported video format: %w", ErrPassThrough)
}
if es.Entity().Size() > l.settings.LibRawThumbMaxSize(ctx) {
return nil, fmt.Errorf("file is too big: %w", ErrPassThrough)
}
// If download/copy files to temp folder
tempFolder := filepath.Join(
util.DataPath(l.settings.TempPath(ctx)),
"thumb",
fmt.Sprintf("libraw_%s", uuid.Must(uuid.NewV4()).String()),
)
tempInputFileName := fmt.Sprintf("libraw_%s.%s", uuid.Must(uuid.NewV4()).String(), ext)
tempPath := filepath.Join(tempFolder, tempInputFileName)
tempInputFile, err := util.CreatNestedFile(tempPath)
if err != nil {
return nil, fmt.Errorf("failed to create temp file: %w", err)
}
defer os.Remove(tempPath)
defer tempInputFile.Close()
if _, err = io.Copy(tempInputFile, es); err != nil {
return &Result{Path: tempPath}, fmt.Errorf("failed to write input file: %w", err)
}
tempInputFile.Close()
cmd := exec.CommandContext(ctx,
l.settings.LibRawThumbPath(ctx), "-e", tempPath)
// Redirect IO
var dcrawErr bytes.Buffer
cmd.Stderr = &dcrawErr
if err := cmd.Run(); err != nil {
l.l.Warning("Failed to invoke dcraw: %s", dcrawErr.String())
return &Result{Path: tempPath}, fmt.Errorf("failed to invoke dcraw: %w, raw output: %s", err, dcrawErr.String())
}
return &Result{
Path: filepath.Join(
tempFolder,
tempInputFileName+".thumb.jpg",
),
Continue: true,
Cleanup: []func(){func() { _ = os.RemoveAll(tempFolder) }},
}, nil
}
func (l *LibRawGenerator) Priority() int {
return 50
}
func (l *LibRawGenerator) Enabled(ctx context.Context) bool {
return l.settings.LibRawThumbGeneratorEnabled(ctx)
}
func rotateImg(filePath string, orientation int) error {
resultImg, err := os.OpenFile(filePath, os.O_RDWR, 0777)
if err != nil {
return err
}
defer func() { _ = resultImg.Close() }()
imgFlag := make([]byte, 3)
if _, err = io.ReadFull(resultImg, imgFlag); err != nil {
return err
}
if _, err = resultImg.Seek(0, 0); err != nil {
return err
}
var img image.Image
if bytes.Equal(imgFlag, []byte{0xFF, 0xD8, 0xFF}) {
img, err = jpeg.Decode(resultImg)
} else {
img, err = png.Decode(resultImg)
}
if err != nil {
return err
}
switch orientation {
case 8:
img = rotate90(img)
case 3:
img = rotate90(rotate90(img))
case 6:
img = rotate90(rotate90(rotate90(img)))
case 2:
img = mirrorImg(img)
case 7:
img = rotate90(mirrorImg(img))
case 4:
img = rotate90(rotate90(mirrorImg(img)))
case 5:
img = rotate90(rotate90(rotate90(mirrorImg(img))))
}
if err = resultImg.Truncate(0); err != nil {
return err
}
if _, err = resultImg.Seek(0, 0); err != nil {
return err
}
if bytes.Equal(imgFlag, []byte{0xFF, 0xD8, 0xFF}) {
return jpeg.Encode(resultImg, img, nil)
}
return png.Encode(resultImg, img)
}
func getJpegOrientation(fileName string) (int, error) {
f, err := os.Open(fileName)
if err != nil {
return 0, err
}
defer func() { _ = f.Close() }()
header := make([]byte, 6)
defer func() { header = nil }()
if _, err = io.ReadFull(f, header); err != nil {
return 0, err
}
// jpeg format header
if !bytes.Equal(header[:3], []byte{0xFF, 0xD8, 0xFF}) {
return 0, errors.New("not a jpeg")
}
// not a APP1 marker
if header[3] != 0xE1 {
return 1, nil
}
// exif data total length
totalLen := int(header[4])<<8 + int(header[5]) - 2
buf := make([]byte, totalLen)
defer func() { buf = nil }()
if _, err = io.ReadFull(f, buf); err != nil {
return 0, err
}
// remove Exif identifier code
buf = buf[6:]
// byte order
parse16, parse32, err := initParseMethod(buf[:2])
if err != nil {
return 0, err
}
// version
_ = buf[2:4]
// first IFD offset
offset := parse32(buf[4:8])
// first DE offset
offset += 2
buf = buf[offset:]
const (
orientationTag = 0x112
deEntryLength = 12
)
for len(buf) > deEntryLength {
tag := parse16(buf[:2])
if tag == orientationTag {
return int(parse32(buf[8:12])), nil
}
buf = buf[deEntryLength:]
}
return 0, errors.New("orientation not found")
}
func initParseMethod(buf []byte) (func([]byte) int16, func([]byte) int32, error) {
if bytes.Equal(buf, []byte{0x49, 0x49}) {
return littleEndian16, littleEndian32, nil
}
if bytes.Equal(buf, []byte{0x4D, 0x4D}) {
return bigEndian16, bigEndian32, nil
}
return nil, nil, errors.New("invalid byte order")
}
func littleEndian16(buf []byte) int16 {
return int16(buf[0]) | int16(buf[1])<<8
}
func bigEndian16(buf []byte) int16 {
return int16(buf[1]) | int16(buf[0])<<8
}
func littleEndian32(buf []byte) int32 {
return int32(buf[0]) | int32(buf[1])<<8 | int32(buf[2])<<16 | int32(buf[3])<<24
}
func bigEndian32(buf []byte) int32 {
return int32(buf[3]) | int32(buf[2])<<8 | int32(buf[1])<<16 | int32(buf[0])<<24
}
func rotate90(img image.Image) image.Image {
bounds := img.Bounds()
width, height := bounds.Dx(), bounds.Dy()
newImg := image.NewRGBA(image.Rect(0, 0, height, width))
for x := 0; x < width; x++ {
for y := 0; y < height; y++ {
newImg.Set(y, width-x-1, img.At(x, y))
}
}
return newImg
}
func mirrorImg(img image.Image) image.Image {
bounds := img.Bounds()
width, height := bounds.Dx(), bounds.Dy()
newImg := image.NewRGBA(image.Rect(0, 0, width, height))
for x := 0; x < width; x++ {
for y := 0; y < height; y++ {
newImg.Set(width-x-1, y, img.At(x, y))
}
}
return newImg
}

@ -4,14 +4,15 @@ import (
"context"
"errors"
"fmt"
"io"
"reflect"
"sort"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/manager/entitysource"
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
"github.com/cloudreve/Cloudreve/v4/pkg/util"
"io"
"reflect"
"sort"
)
type (
@ -71,6 +72,7 @@ func NewPipeline(settings setting.Provider, l logging.Logger) Generator {
NewVipsGenerator(l, settings),
NewLibreOfficeGenerator(l, settings),
NewMusicCoverGenerator(l, settings),
NewLibRawGenerator(l, settings),
)
sort.Sort(generators)

@ -25,6 +25,8 @@ func TestGenerator(ctx context.Context, name, executable string) (string, error)
return testLibreOfficeGenerator(ctx, executable)
case "ffprobe":
return testFFProbeGenerator(ctx, executable)
case "libraw":
return testLibRawGenerator(ctx, executable)
default:
return "", ErrUnknownGenerator
}
@ -89,3 +91,20 @@ func testLibreOfficeGenerator(ctx context.Context, executable string) (string, e
return output.String(), nil
}
func testLibRawGenerator(ctx context.Context, executable string) (string, error) {
cmd := exec.CommandContext(ctx, executable, "-L")
var output bytes.Buffer
cmd.Stdout = &output
if err := cmd.Run(); err != nil {
return "", fmt.Errorf("failed to invoke libraw executable: %w", err)
}
if !strings.Contains(output.String(), "Sony") {
return "", ErrUnknownOutput
}
cameraList := strings.Split(output.String(), "\n")
return fmt.Sprintf("N/A, %d cameras supported", len(cameraList)), nil
}

@ -3,7 +3,7 @@ package wopi
import (
"encoding/xml"
"fmt"
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/gofrs/uuid"
"github.com/samber/lo"
)
@ -16,23 +16,23 @@ var (
ActionEdit = ActonType("edit")
)
func DiscoveryXmlToViewerGroup(xmlStr string) (*setting.ViewerGroup, error) {
func DiscoveryXmlToViewerGroup(xmlStr string) (*types.ViewerGroup, error) {
var discovery WopiDiscovery
if err := xml.Unmarshal([]byte(xmlStr), &discovery); err != nil {
return nil, fmt.Errorf("failed to parse WOPI discovery XML: %w", err)
}
group := &setting.ViewerGroup{
Viewers: make([]setting.Viewer, 0, len(discovery.NetZone.App)),
group := &types.ViewerGroup{
Viewers: make([]types.Viewer, 0, len(discovery.NetZone.App)),
}
for _, app := range discovery.NetZone.App {
viewer := setting.Viewer{
viewer := types.Viewer{
ID: uuid.Must(uuid.NewV4()).String(),
DisplayName: app.Name,
Type: setting.ViewerTypeWopi,
Type: types.ViewerTypeWopi,
Icon: app.FavIconUrl,
WopiActions: make(map[string]map[setting.ViewerAction]string),
WopiActions: make(map[string]map[types.ViewerAction]string),
}
for _, action := range app.Action {
@ -41,21 +41,21 @@ func DiscoveryXmlToViewerGroup(xmlStr string) (*setting.ViewerGroup, error) {
}
if _, ok := viewer.WopiActions[action.Ext]; !ok {
viewer.WopiActions[action.Ext] = make(map[setting.ViewerAction]string)
viewer.WopiActions[action.Ext] = make(map[types.ViewerAction]string)
}
if action.Name == string(ActionPreview) {
viewer.WopiActions[action.Ext][setting.ViewerActionView] = action.Urlsrc
viewer.WopiActions[action.Ext][types.ViewerActionView] = action.Urlsrc
} else if action.Name == string(ActionPreviewFallback) {
viewer.WopiActions[action.Ext][setting.ViewerActionView] = action.Urlsrc
viewer.WopiActions[action.Ext][types.ViewerActionView] = action.Urlsrc
} else if action.Name == string(ActionEdit) {
viewer.WopiActions[action.Ext][setting.ViewerActionEdit] = action.Urlsrc
viewer.WopiActions[action.Ext][types.ViewerActionEdit] = action.Urlsrc
} else if len(viewer.WopiActions[action.Ext]) == 0 {
delete(viewer.WopiActions, action.Ext)
}
}
viewer.Exts = lo.MapToSlice(viewer.WopiActions, func(key string, value map[setting.ViewerAction]string) string {
viewer.Exts = lo.MapToSlice(viewer.WopiActions, func(key string, value map[types.ViewerAction]string) string {
return key
})

@ -4,6 +4,7 @@ import (
"context"
"errors"
"fmt"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"net/url"
"strings"
"time"
@ -56,7 +57,7 @@ const (
LockDuration = time.Duration(30) * time.Minute
)
func GenerateWopiSrc(ctx context.Context, action setting.ViewerAction, viewer *setting.Viewer, viewerSession *manager.ViewerSession) (*url.URL, error) {
func GenerateWopiSrc(ctx context.Context, action types.ViewerAction, viewer *types.Viewer, viewerSession *manager.ViewerSession) (*url.URL, error) {
dep := dependency.FromContext(ctx)
base := dep.SettingProvider().SiteURL(setting.UseFirstSiteUrl(ctx))
hasher := dep.HashIDEncoder()
@ -69,7 +70,7 @@ func GenerateWopiSrc(ctx context.Context, action setting.ViewerAction, viewer *s
var (
src string
)
fallbackOrder := []setting.ViewerAction{action, setting.ViewerActionView, setting.ViewerActionEdit}
fallbackOrder := []types.ViewerAction{action, types.ViewerActionView, types.ViewerActionEdit}
for _, a := range fallbackOrder {
if src, ok = availableActions[a]; ok {
break

@ -518,6 +518,17 @@ func AdminBatchDeleteEntity(c *gin.Context) {
}
}
func AdminCleanupTask(c *gin.Context) {
service := ParametersFromContext[*admin.CleanupTaskService](c, admin.CleanupTaskParameterCtx{})
err := service.CleanupTask(c)
if err != nil {
c.JSON(200, serializer.Err(c, err))
return
}
c.JSON(200, serializer.Response{})
}
func AdminListTasks(c *gin.Context) {
service := ParametersFromContext[*admin.AdminListService](c, admin.AdminListServiceParamsCtx{})
res, err := service.Tasks(c)

@ -116,6 +116,16 @@ func GetSource(c *gin.Context) {
c.JSON(200, serializer.Response{Data: res})
}
func DeleteDirectLink(c *gin.Context) {
err := explorer.DeleteDirectLink(c)
if err != nil {
c.JSON(200, serializer.Err(c, err))
return
}
c.JSON(200, serializer.Response{})
}
// Thumb 获取文件缩略图
func Thumb(c *gin.Context) {
service := ParametersFromContext[*explorer.FileThumbService](c, explorer.FileThumbParameterCtx{})

@ -697,10 +697,18 @@ func initMasterRouter(dep dependency.Dep) *gin.Engine {
)
// 取得文件外链
file.PUT("source",
controllers.FromJSON[explorer.GetDirectLinkService](explorer.GetDirectLinkParamCtx{}),
middleware.ValidateBatchFileCount(dep, explorer.GetDirectLinkParamCtx{}),
controllers.GetSource)
source := file.Group("source")
{
source.PUT("",
controllers.FromJSON[explorer.GetDirectLinkService](explorer.GetDirectLinkParamCtx{}),
middleware.ValidateBatchFileCount(dep, explorer.GetDirectLinkParamCtx{}),
controllers.GetSource,
)
source.DELETE(":id",
middleware.HashID(hashid.SourceLinkID),
controllers.DeleteDirectLink,
)
}
// Patch view
file.PATCH("view",
controllers.FromJSON[explorer.PatchViewService](explorer.PatchViewParameterCtx{}),
@ -846,6 +854,11 @@ func initMasterRouter(dep dependency.Dep) *gin.Engine {
controllers.FromJSON[adminsvc.BatchTaskService](adminsvc.BatchTaskParamCtx{}),
controllers.AdminBatchDeleteTask,
)
// Cleanup tasks
queue.POST("cleanup",
controllers.FromJSON[adminsvc.CleanupTaskService](adminsvc.CleanupTaskParameterCtx{}),
controllers.AdminCleanupTask,
)
// // 列出任务
// queue.POST("list", controllers.AdminListTask)
// // 新建文件导入任务

@ -294,11 +294,22 @@ func (service *UpdateStoragePolicyService) Update(c *gin.Context) (*GetStoragePo
}
service.Policy.ID = idInt
_, err = storagePolicyClient.Upsert(c, service.Policy)
sc, tx, ctx, err := inventory.WithTx(c, storagePolicyClient)
if err != nil {
return nil, serializer.NewError(serializer.CodeDBError, "Failed to create transaction", err)
}
_, err = sc.Upsert(ctx, service.Policy)
if err != nil {
_ = inventory.Rollback(tx)
return nil, serializer.NewError(serializer.CodeDBError, "Failed to update policy", err)
}
if err := inventory.Commit(tx); err != nil {
return nil, serializer.NewError(serializer.CodeDBError, "Failed to commit transaction", err)
}
_ = dep.KV().Delete(manager.EntityUrlCacheKeyPrefix)
s := SingleStoragePolicyService{ID: idInt}

@ -3,6 +3,7 @@ package admin
import (
"context"
"strconv"
"time"
"github.com/cloudreve/Cloudreve/v4/application/dependency"
"github.com/cloudreve/Cloudreve/v4/ent"
@ -251,3 +252,31 @@ func (s *BatchTaskService) Delete(c *gin.Context) error {
return nil
}
type (
CleanupTaskService struct {
NotAfter time.Time `json:"not_after" binding:"required"`
Types []string `json:"types"`
Status []task.Status `json:"status"`
}
CleanupTaskParameterCtx struct{}
)
func (s *CleanupTaskService) CleanupTask(c *gin.Context) error {
dep := dependency.FromContext(c)
taskClient := dep.TaskClient()
if len(s.Status) == 0 {
s.Status = []task.Status{task.StatusCanceled, task.StatusCompleted, task.StatusError}
}
if err := taskClient.DeleteBy(c, &inventory.DeleteTaskArgs{
NotAfter: s.NotAfter,
Types: s.Types,
Status: s.Status,
}); err != nil {
return serializer.NewError(serializer.CodeDBError, "Failed to cleanup tasks", err)
}
return nil
}

@ -12,6 +12,7 @@ import (
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
"github.com/cloudreve/Cloudreve/v4/pkg/wopi"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/gin-gonic/gin"
"github.com/go-mail/mail"
)
@ -107,7 +108,7 @@ type (
FetchWOPIDiscoveryParamCtx struct{}
)
func (s *FetchWOPIDiscoveryService) Fetch(c *gin.Context) (*setting.ViewerGroup, error) {
func (s *FetchWOPIDiscoveryService) Fetch(c *gin.Context) (*types.ViewerGroup, error) {
dep := dependency.FromContext(c)
requestClient := dep.RequestClient(request2.WithContext(c), request2.WithLogger(dep.Logger()))
content, err := requestClient.Request("GET", s.Endpoint, nil).CheckHTTPResponse(http.StatusOK).GetResponse()

@ -3,6 +3,7 @@ package basic
import (
"github.com/cloudreve/Cloudreve/v4/application/dependency"
"github.com/cloudreve/Cloudreve/v4/inventory"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
"github.com/cloudreve/Cloudreve/v4/service/user"
"github.com/gin-gonic/gin"
@ -28,6 +29,9 @@ type SiteConfig struct {
ReCaptchaKey string `json:"captcha_ReCaptchaKey,omitempty"`
CaptchaType setting.CaptchaType `json:"captcha_type,omitempty"`
TurnstileSiteID string `json:"turnstile_site_id,omitempty"`
CapInstanceURL string `json:"captcha_cap_instance_url,omitempty"`
CapSiteKey string `json:"captcha_cap_site_key,omitempty"`
CapAssetServer string `json:"captcha_cap_asset_server,omitempty"`
RegisterEnabled bool `json:"register_enabled,omitempty"`
TosUrl string `json:"tos_url,omitempty"`
PrivacyPolicyUrl string `json:"privacy_policy_url,omitempty"`
@ -37,7 +41,7 @@ type SiteConfig struct {
EmojiPreset string `json:"emoji_preset,omitempty"`
MapProvider setting.MapProvider `json:"map_provider,omitempty"`
GoogleMapTileType setting.MapGoogleTileType `json:"google_map_tile_type,omitempty"`
FileViewers []setting.ViewerGroup `json:"file_viewers,omitempty"`
FileViewers []types.ViewerGroup `json:"file_viewers,omitempty"`
MaxBatchSize int `json:"max_batch_size,omitempty"`
ThumbnailWidth int `json:"thumbnail_width,omitempty"`
ThumbnailHeight int `json:"thumbnail_height,omitempty"`
@ -119,6 +123,7 @@ func (s *GetSettingService) GetSiteConfig(c *gin.Context) (*SiteConfig, error) {
userRes := user.BuildUser(u, dep.HashIDEncoder())
logo := settings.Logo(c)
reCaptcha := settings.ReCaptcha(c)
capCaptcha := settings.CapCaptcha(c)
appSetting := settings.AppSetting(c)
return &SiteConfig{
@ -132,6 +137,9 @@ func (s *GetSettingService) GetSiteConfig(c *gin.Context) (*SiteConfig, error) {
CaptchaType: settings.CaptchaType(c),
TurnstileSiteID: settings.TurnstileCaptcha(c).Key,
ReCaptchaKey: reCaptcha.Key,
CapInstanceURL: capCaptcha.InstanceURL,
CapSiteKey: capCaptcha.SiteKey,
CapAssetServer: capCaptcha.AssetServer,
AppPromotion: appSetting.Promotion,
}, nil
}

@ -120,6 +120,31 @@ func (s *GetDirectLinkService) Get(c *gin.Context) ([]DirectLinkResponse, error)
return BuildDirectLinkResponse(res), err
}
func DeleteDirectLink(c *gin.Context) error {
dep := dependency.FromContext(c)
user := inventory.UserFromContext(c)
m := manager.NewFileManager(dep, user)
defer m.Recycle()
linkId := hashid.FromContext(c)
linkClient := dep.DirectLinkClient()
ctx := context.WithValue(c, inventory.LoadDirectLinkFile{}, true)
link, err := linkClient.GetByID(ctx, linkId)
if err != nil || link.Edges.File == nil {
return serializer.NewError(serializer.CodeNotFound, "Direct link not found", err)
}
if link.Edges.File.OwnerID != user.ID {
return serializer.NewError(serializer.CodeNotFound, "Direct link not found", err)
}
if err := linkClient.Delete(c, link.ID); err != nil {
return serializer.NewError(serializer.CodeDBError, "Failed to delete direct link", err)
}
return nil
}
type (
// ListFileParameterCtx define key fore ListFileService
ListFileParameterCtx struct{}

@ -239,6 +239,14 @@ type ExtendedInfo struct {
Shares []Share `json:"shares,omitempty"`
Entities []Entity `json:"entities,omitempty"`
View *types.ExplorerView `json:"view,omitempty"`
DirectLinks []DirectLink `json:"direct_links,omitempty"`
}
type DirectLink struct {
ID string `json:"id"`
URL string `json:"url"`
Downloaded int `json:"downloaded"`
CreatedAt time.Time `json:"created_at"`
}
type StoragePolicy struct {
@ -272,6 +280,7 @@ type Share struct {
CreatedAt time.Time `json:"created_at,omitempty"`
Expired bool `json:"expired"`
Url string `json:"url"`
ShowReadMe bool `json:"show_readme,omitempty"`
// Only viewable by owner
IsPrivate bool `json:"is_private,omitempty"`
@ -305,6 +314,7 @@ func BuildShare(s *ent.Share, base *url.URL, hasher hashid.Encoder, requester *e
res.Downloaded = s.Downloads
res.Expires = s.Expires
res.Password = s.Password
res.ShowReadMe = s.Props != nil && s.Props.ShowReadMe
}
if requester.ID == owner.ID {
@ -372,16 +382,20 @@ func BuildExtendedInfo(ctx context.Context, u *ent.User, f fs.File, hasher hashi
return nil
}
dep := dependency.FromContext(ctx)
base := dep.SettingProvider().SiteURL(ctx)
ext := &ExtendedInfo{
StoragePolicy: BuildStoragePolicy(extendedInfo.StoragePolicy, hasher),
StorageUsed: extendedInfo.StorageUsed,
Entities: lo.Map(f.Entities(), func(e fs.Entity, index int) Entity {
return BuildEntity(extendedInfo, e, hasher)
}),
DirectLinks: lo.Map(extendedInfo.DirectLinks, func(d *ent.DirectLink, index int) DirectLink {
return BuildDirectLink(d, hasher, base)
}),
}
dep := dependency.FromContext(ctx)
base := dep.SettingProvider().SiteURL(ctx)
if u.ID == f.OwnerID() {
// Only owner can see the shares settings.
ext.Shares = lo.Map(extendedInfo.Shares, func(s *ent.Share, index int) Share {
@ -393,6 +407,15 @@ func BuildExtendedInfo(ctx context.Context, u *ent.User, f fs.File, hasher hashi
return ext
}
func BuildDirectLink(d *ent.DirectLink, hasher hashid.Encoder, base *url.URL) DirectLink {
return DirectLink{
ID: hashid.EncodeSourceLinkID(hasher, d.ID),
URL: routes.MasterDirectLink(base, hashid.EncodeSourceLinkID(hasher, d.ID), d.Name).String(),
Downloaded: d.Downloads,
CreatedAt: d.CreatedAt,
}
}
func BuildEntity(extendedInfo *fs.FileExtendedInfo, e fs.Entity, hasher hashid.Encoder) Entity {
var u *user.User
createdBy := e.CreatedBy()

@ -21,7 +21,6 @@ import (
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/manager/entitysource"
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
"github.com/cloudreve/Cloudreve/v4/pkg/wopi"
"github.com/gin-gonic/gin"
)
@ -371,7 +370,7 @@ type (
Uri string `json:"uri" form:"uri" binding:"required"`
Version string `json:"version" form:"version"`
ViewerID string `json:"viewer_id" form:"viewer_id" binding:"required"`
PreferredAction setting.ViewerAction `json:"preferred_action" form:"preferred_action" binding:"required"`
PreferredAction types.ViewerAction `json:"preferred_action" form:"preferred_action" binding:"required"`
}
CreateViewerSessionParamCtx struct{}
)
@ -389,7 +388,7 @@ func (s *CreateViewerSessionService) Create(c *gin.Context) (*ViewerSessionRespo
// Find the given viewer
viewers := dep.SettingProvider().FileViewers(c)
var targetViewer *setting.Viewer
var targetViewer *types.Viewer
for _, group := range viewers {
for _, viewer := range group.Viewers {
if viewer.ID == s.ViewerID && !viewer.Disabled {
@ -413,7 +412,7 @@ func (s *CreateViewerSessionService) Create(c *gin.Context) (*ViewerSessionRespo
}
res := &ViewerSessionResponse{Session: viewerSession}
if targetViewer.Type == setting.ViewerTypeWopi {
if targetViewer.Type == types.ViewerTypeWopi {
// For WOPI viewer, generate WOPI src
wopiSrc, err := wopi.GenerateWopiSrc(c, s.PreferredAction, targetViewer, viewerSession)
if err != nil {

@ -24,6 +24,7 @@ type (
RemainDownloads int `json:"downloads"`
Expire int `json:"expire"`
ShareView bool `json:"share_view"`
ShowReadMe bool `json:"show_readme"`
}
ShareCreateParamCtx struct{}
)
@ -58,6 +59,7 @@ func (service *ShareCreateService) Upsert(c *gin.Context, existed int) (string,
Expire: expires,
ExistedShareID: existed,
ShareView: service.ShareView,
ShowReadMe: service.ShowReadMe,
})
if err != nil {
return "", err

Loading…
Cancel
Save