feat(workflow): import files from external storage

pull/2224/merge
Aaron Liu 4 months ago
parent 5d72faf688
commit a10a008ed7

@ -562,7 +562,7 @@ func (d *dependency) IoIntenseQueue(ctx context.Context) queue.Queue {
queue.WithWorkerCount(queueSetting.WorkerNum), queue.WithWorkerCount(queueSetting.WorkerNum),
queue.WithName("IoIntenseQueue"), queue.WithName("IoIntenseQueue"),
queue.WithMaxTaskExecution(queueSetting.MaxExecution), queue.WithMaxTaskExecution(queueSetting.MaxExecution),
queue.WithResumeTaskType(queue.CreateArchiveTaskType, queue.ExtractArchiveTaskType, queue.RelocateTaskType), queue.WithResumeTaskType(queue.CreateArchiveTaskType, queue.ExtractArchiveTaskType, queue.RelocateTaskType, queue.ImportTaskType),
queue.WithTaskPullInterval(10*time.Second), queue.WithTaskPullInterval(10*time.Second),
) )
return d.ioIntenseQueue return d.ioIntenseQueue

@ -1 +1 @@
Subproject commit c4d4d3aa6f28e04a5828f3b4b4453d239746bed0 Subproject commit 815f5857f0c673b81d4d39663b39278badaa626c

@ -120,6 +120,7 @@ type (
Source string Source string
Size int64 Size int64
UploadSessionID uuid.UUID UploadSessionID uuid.UUID
Importing bool
} }
RelocateEntityParameter struct { RelocateEntityParameter struct {
@ -716,6 +717,11 @@ func (f *fileClient) CreateFile(ctx context.Context, root *ent.File, args *Creat
SetParent(root). SetParent(root).
SetIsSymbolic(args.IsSymbolic). SetIsSymbolic(args.IsSymbolic).
SetStoragePoliciesID(args.StoragePolicyID) SetStoragePoliciesID(args.StoragePolicyID)
if args.EntityParameters != nil && args.EntityParameters.Importing {
stm.SetSize(args.EntityParameters.Size)
}
newFile, err := stm.Save(ctx) newFile, err := stm.Save(ctx)
if err != nil { if err != nil {
return nil, nil, nil, fmt.Errorf("failed to create file: %v", err) return nil, nil, nil, fmt.Errorf("failed to create file: %v", err)
@ -730,6 +736,12 @@ func (f *fileClient) CreateFile(ctx context.Context, root *ent.File, args *Creat
if err != nil { if err != nil {
return nil, nil, storageDiff, fmt.Errorf("failed to create default entity: %v", err) return nil, nil, storageDiff, fmt.Errorf("failed to create default entity: %v", err)
} }
if args.EntityParameters.Importing {
if err := f.client.File.UpdateOne(newFile).SetPrimaryEntity(defaultEntity.ID).Exec(ctx); err != nil {
return nil, nil, storageDiff, fmt.Errorf("failed to set primary entity: %v", err)
}
}
} }
// Create metadata if needed // Create metadata if needed
@ -848,7 +860,7 @@ func (f *fileClient) CreateEntity(ctx context.Context, file *ent.File, args *Ent
stm.SetUpdatedAt(*args.ModifiedAt) stm.SetUpdatedAt(*args.ModifiedAt)
} }
if args.UploadSessionID != uuid.Nil { if args.UploadSessionID != uuid.Nil && !args.Importing {
stm.SetUploadSessionID(args.UploadSessionID) stm.SetUploadSessionID(args.UploadSessionID)
} }

@ -185,6 +185,15 @@ func SlaveMediaMetaRoute(src, ext string) string {
return fmt.Sprintf("file/meta/%s/%s", src, url.PathEscape(ext)) return fmt.Sprintf("file/meta/%s/%s", src, url.PathEscape(ext))
} }
func SlaveFileListRoute(srcPath string, recursive bool) string {
base := "file/list"
query := url.Values{}
query.Set("recursive", strconv.FormatBool(recursive))
query.Set("path", srcPath)
route, _ := url.Parse(constants.APIPrefixSlave + fmt.Sprintf("%s?%s", base, query.Encode()))
return route.String()
}
func SlaveThumbUrl(base *url.URL, srcPath, ext string) *url.URL { func SlaveThumbUrl(base *url.URL, srcPath, ext string) *url.URL {
srcPath = url.PathEscape(base64.URLEncoding.EncodeToString([]byte(srcPath))) srcPath = url.PathEscape(base64.URLEncoding.EncodeToString([]byte(srcPath)))
ext = url.PathEscape(ext) ext = url.PathEscape(ext)

@ -8,6 +8,9 @@ import (
"net/http" "net/http"
"net/url" "net/url"
"os" "os"
"path"
"path/filepath"
"strings"
"time" "time"
"github.com/cloudreve/Cloudreve/v4/ent" "github.com/cloudreve/Cloudreve/v4/ent"
@ -126,80 +129,88 @@ func New(ctx context.Context, policy *ent.StoragePolicy, settings setting.Provid
return driver, nil return driver, nil
} }
// func (handler *Driver) List(ctx context.Context, base string, onProgress driver.ListProgressFunc, recursive bool) ([]fs.PhysicalObject, error) {
//// List 列出COS文件 // 初始化列目录参数
//func (handler Driver) List(ctx context.Context, base string, recursive bool) ([]response.Object, error) { opt := &cossdk.BucketGetOptions{
// // 初始化列目录参数 Prefix: strings.TrimPrefix(base, "/"),
// opt := &cossdk.BucketGetOptions{ EncodingType: "",
// Prefix: strings.TrimPrefix(base, "/"), MaxKeys: 1000,
// EncodingType: "", }
// MaxKeys: 1000,
// } // 是否为递归列出
// // 是否为递归列出
// if !recursive { if !recursive {
// opt.Delimiter = "/" opt.Delimiter = "/"
// } }
// // 手动补齐结尾的slash
// if opt.Prefix != "" { // 手动补齐结尾的slash
// opt.Prefix += "/" if opt.Prefix != "" {
// } opt.Prefix += "/"
// }
// var (
// marker string var (
// objects []cossdk.Object marker string
// commons []string objects []cossdk.Object
// ) commons []string
// )
// for {
// res, _, err := handler.client.Bucket.Get(ctx, opt) for {
// if err != nil { res, _, err := handler.client.Bucket.Get(ctx, opt)
// return nil, err if err != nil {
// } handler.l.Warning("Failed to list objects: %s", err)
// objects = append(objects, res.Contents...) return nil, err
// commons = append(commons, res.CommonPrefixes...) }
// // 如果本次未列取完则继续使用marker获取结果 objects = append(objects, res.Contents...)
// marker = res.NextMarker commons = append(commons, res.CommonPrefixes...)
// // marker 为空时结果列取完毕,跳出 // 如果本次未列取完则继续使用marker获取结果
// if marker == "" { marker = res.NextMarker
// break // marker 为空时结果列取完毕,跳出
// } if marker == "" {
// } break
// }
// // 处理列取结果 }
// res := make([]response.Object, 0, len(objects)+len(commons))
// // 处理目录 // 处理列取结果
// for _, object := range commons { res := make([]fs.PhysicalObject, 0, len(objects)+len(commons))
// rel, err := filepath.Rel(opt.Prefix, object) // 处理目录
// if err != nil {
// continue for _, object := range commons {
// } rel, err := filepath.Rel(opt.Prefix, object)
// res = append(res, response.Object{ if err != nil {
// Name: path.Base(object), handler.l.Warning("Failed to get relative path: %s", err)
// RelativePath: filepath.ToSlash(rel), continue
// Size: 0, }
// IsDir: true, res = append(res, fs.PhysicalObject{
// LastModify: time.Now(), Name: path.Base(object),
// }) RelativePath: filepath.ToSlash(rel),
// } Size: 0,
// // 处理文件 IsDir: true,
// for _, object := range objects { LastModify: time.Now(),
// rel, err := filepath.Rel(opt.Prefix, object.Key) })
// if err != nil { }
// continue onProgress(len(commons))
// }
// res = append(res, response.Object{ // 处理文件
// Name: path.Base(object.Key),
// Source: object.Key, for _, object := range objects {
// RelativePath: filepath.ToSlash(rel), rel, err := filepath.Rel(opt.Prefix, object.Key)
// Size: uint64(object.Size), if err != nil {
// IsDir: false, handler.l.Warning("Failed to get relative path: %s", err)
// LastModify: time.Now(), continue
// }) }
// } res = append(res, fs.PhysicalObject{
// Name: path.Base(object.Key),
// return res, nil Source: object.Key,
// RelativePath: filepath.ToSlash(rel),
//} Size: object.Size,
IsDir: false,
LastModify: time.Now(),
})
}
onProgress(len(res))
return res, nil
}
// CORS 创建跨域策略 // CORS 创建跨域策略
func (handler Driver) CORS() error { func (handler Driver) CORS() error {

@ -2,6 +2,7 @@ package driver
import ( import (
"context" "context"
"encoding/gob"
"os" "os"
"time" "time"
@ -76,7 +77,7 @@ type (
// List 递归列取远程端path路径下文件、目录不包含path本身 // List 递归列取远程端path路径下文件、目录不包含path本身
// 返回的对象路径以path作为起始根目录. // 返回的对象路径以path作为起始根目录.
// recursive - 是否递归列出 // recursive - 是否递归列出
// List(ctx context.Context, path string, recursive bool) ([]response.Object, error) List(ctx context.Context, base string, onProgress ListProgressFunc, recursive bool) ([]fs.PhysicalObject, error)
// Capabilities returns the capabilities of this handler // Capabilities returns the capabilities of this handler
Capabilities() *Capabilities Capabilities() *Capabilities
@ -108,6 +109,8 @@ type (
// BrowserRelayedDownload indicates whether to relay download via stream-saver. // BrowserRelayedDownload indicates whether to relay download via stream-saver.
BrowserRelayedDownload bool BrowserRelayedDownload bool
} }
ListProgressFunc func(int)
) )
const ( const (
@ -122,3 +125,7 @@ type ForceUsePublicEndpointCtx struct{}
func WithForcePublicEndpoint(ctx context.Context, value bool) context.Context { func WithForcePublicEndpoint(ctx context.Context, value bool) context.Context {
return context.WithValue(ctx, ForceUsePublicEndpointCtx{}, value) return context.WithValue(ctx, ForceUsePublicEndpointCtx{}, value)
} }
func init() {
gob.Register(fs.PhysicalObject{})
}

@ -58,51 +58,53 @@ func New(p *ent.StoragePolicy, l logging.Logger, config conf.ConfigProvider) *Dr
} }
} }
//// List 递归列取给定物理路径下所有文件 func (handler *Driver) List(ctx context.Context, path string, onProgress driver.ListProgressFunc, recursive bool) ([]fs.PhysicalObject, error) {
//func (handler *Driver) List(ctx context.Context, path string, recursive bool) ([]response.Object, error) { var res []fs.PhysicalObject
// var res []response.Object root := handler.LocalPath(ctx, path)
//
// // 取得起始路径 err := filepath.Walk(root,
// root := util.RelativePath(filepath.FromSlash(path)) func(path string, info os.FileInfo, err error) error {
// select {
// // 开始遍历路径下的文件、目录 case <-ctx.Done():
// err := filepath.Walk(root, return ctx.Err()
// func(path string, info os.FileInfo, err error) error { default:
// // 跳过根目录 }
// if path == root {
// return nil // Skip root directory
// } if path == root {
// return nil
// if err != nil { }
// util.Log().Warning("Failed to walk folder %q: %s", path, err)
// return filepath.SkipDir if err != nil {
// } handler.l.Warning("Failed to walk folder %q: %s", path, err)
// return filepath.SkipDir
// // 将遍历对象的绝对路径转换为相对路径 }
// rel, err := filepath.Rel(root, path)
// if err != nil { // Transform absolute path to relative path
// return err rel, err := filepath.Rel(root, path)
// } if err != nil {
// return err
// res = append(res, response.Object{ }
// Name: info.Name(),
// RelativePath: filepath.ToSlash(rel), res = append(res, fs.PhysicalObject{
// Source: path, Name: info.Name(),
// Size: uint64(info.Size()), RelativePath: filepath.ToSlash(rel),
// IsDir: info.IsDir(), Source: path,
// LastModify: info.ModTime(), Size: info.Size(),
// }) IsDir: info.IsDir(),
// LastModify: info.ModTime(),
// // 如果非递归,则不步入目录 })
// if !recursive && info.IsDir() { onProgress(1)
// return filepath.SkipDir // If not recursive, do not enter directory
// } if !recursive && info.IsDir() {
// return filepath.SkipDir
// return nil }
// })
// return nil
// return res, err })
//}
return res, err
}
// Get 获取文件内容 // Get 获取文件内容
func (handler *Driver) Open(ctx context.Context, path string) (*os.File, error) { func (handler *Driver) Open(ctx context.Context, path string) (*os.File, error) {

@ -9,7 +9,10 @@ import (
"io" "io"
"net/url" "net/url"
"os" "os"
"path"
"path/filepath"
"strconv" "strconv"
"strings"
"time" "time"
"github.com/cloudreve/Cloudreve/v4/ent" "github.com/cloudreve/Cloudreve/v4/ent"
@ -104,6 +107,89 @@ func New(ctx context.Context, policy *ent.StoragePolicy, settings setting.Provid
return driver, nil return driver, nil
} }
func (d *Driver) List(ctx context.Context, base string, onProgress driver.ListProgressFunc, recursive bool) ([]fs.PhysicalObject, error) {
opt := &obs.ListObjectsInput{
ListObjsInput: obs.ListObjsInput{
Prefix: strings.TrimPrefix(base, "/"),
EncodingType: "",
MaxKeys: 1000,
},
Bucket: d.policy.BucketName,
}
if !recursive {
opt.Delimiter = "/"
}
if opt.Prefix != "" {
opt.Prefix += "/"
}
var (
marker string
objects []obs.Content
commons []string
)
for {
res, err := d.obs.ListObjects(opt, obs.WithRequestContext(ctx))
if err != nil {
d.l.Warning("Failed to list objects: %s", err)
return nil, err
}
objects = append(objects, res.Contents...)
commons = append(commons, res.CommonPrefixes...)
// 如果本次未列取完则继续使用marker获取结果
marker = res.NextMarker
// marker 为空时结果列取完毕,跳出
if marker == "" {
break
}
}
// 处理列取结果
res := make([]fs.PhysicalObject, 0, len(objects)+len(commons))
// 处理目录
for _, object := range commons {
rel, err := filepath.Rel(opt.Prefix, object)
if err != nil {
d.l.Warning("Failed to get relative path: %s", err)
continue
}
res = append(res, fs.PhysicalObject{
Name: path.Base(object),
RelativePath: filepath.ToSlash(rel),
Size: 0,
IsDir: true,
LastModify: time.Now(),
})
}
onProgress(len(commons))
// 处理文件
for _, object := range objects {
rel, err := filepath.Rel(opt.Prefix, object.Key)
if err != nil {
d.l.Warning("Failed to get relative path: %s", err)
continue
}
res = append(res, fs.PhysicalObject{
Name: path.Base(object.Key),
Source: object.Key,
RelativePath: filepath.ToSlash(rel),
Size: object.Size,
IsDir: false,
LastModify: time.Now(),
})
}
onProgress(len(res))
return res, nil
}
func (d *Driver) Put(ctx context.Context, file *fs.UploadRequest) error { func (d *Driver) Put(ctx context.Context, file *fs.UploadRequest) error {
defer file.Close() defer file.Close()

@ -5,6 +5,8 @@ import (
"errors" "errors"
"fmt" "fmt"
"os" "os"
"path"
"path/filepath"
"strings" "strings"
"time" "time"
@ -23,14 +25,17 @@ import (
) )
// Driver OneDrive 适配器 // Driver OneDrive 适配器
type Driver struct { type (
policy *ent.StoragePolicy Driver struct {
client Client policy *ent.StoragePolicy
settings setting.Provider client Client
config conf.ConfigProvider settings setting.Provider
l logging.Logger config conf.ConfigProvider
chunkSize int64 l logging.Logger
} chunkSize int64
}
ListPathRealRootCtx struct{}
)
var ( var (
features = &boolset.BooleanSet{} features = &boolset.BooleanSet{}
@ -66,50 +71,52 @@ func New(ctx context.Context, policy *ent.StoragePolicy, settings setting.Provid
}, nil }, nil
} }
//// List 列取项目 // List 列取项目
//func (handler *Driver) List(ctx context.Context, base string, recursive bool) ([]response.Object, error) { func (handler *Driver) List(ctx context.Context, base string, onProgress driver.ListProgressFunc, recursive bool) ([]fs.PhysicalObject, error) {
// base = strings.TrimPrefix(base, "/") base = strings.TrimPrefix(base, "/")
// // 列取子项目 // 列取子项目
// objects, _ := handler.client.ListChildren(ctx, base) objects, _ := handler.client.ListChildren(ctx, base)
//
// // 获取真实的列取起始根目录 // 获取真实的列取起始根目录
// rootPath := base rootPath := base
// if realBase, ok := ctx.Value(fsctx.PathCtx).(string); ok { if realBase, ok := ctx.Value(ListPathRealRootCtx{}).(string); ok {
// rootPath = realBase rootPath = realBase
// } else { } else {
// ctx = context.WithValue(ctx, fsctx.PathCtx, base) ctx = context.WithValue(ctx, ListPathRealRootCtx{}, base)
// } }
//
// // 整理结果 // 整理结果
// res := make([]response.Object, 0, len(objects)) res := make([]fs.PhysicalObject, 0, len(objects))
// for _, object := range objects { for _, object := range objects {
// source := path.Join(base, object.Name) source := path.Join(base, object.Name)
// rel, err := filepath.Rel(rootPath, source) rel, err := filepath.Rel(rootPath, source)
// if err != nil { if err != nil {
// continue continue
// } }
// res = append(res, response.Object{ res = append(res, fs.PhysicalObject{
// Name: object.Name, Name: object.Name,
// RelativePath: filepath.ToSlash(rel), RelativePath: filepath.ToSlash(rel),
// Source: source, Source: source,
// Size: uint64(object.Size), Size: object.Size,
// IsDir: object.Folder != nil, IsDir: object.Folder != nil,
// LastModify: time.Now(), LastModify: time.Now(),
// }) })
// } }
//
// // 递归列取子目录 onProgress(len(objects))
// if recursive {
// for _, object := range objects { // 递归列取子目录
// if object.Folder != nil { if recursive {
// sub, _ := handler.List(ctx, path.Join(base, object.Name), recursive) for _, object := range objects {
// res = append(res, sub...) if object.Folder != nil {
// } sub, _ := handler.List(ctx, path.Join(base, object.Name), onProgress, recursive)
// } res = append(res, sub...)
// } }
// }
// return res, nil }
//}
return res, nil
}
func (handler *Driver) Open(ctx context.Context, path string) (*os.File, error) { func (handler *Driver) Open(ctx context.Context, path string) (*os.File, error) {
return nil, errors.New("not implemented") return nil, errors.New("not implemented")

@ -9,6 +9,8 @@ import (
"io" "io"
"net/url" "net/url"
"os" "os"
"path"
"path/filepath"
"strconv" "strconv"
"strings" "strings"
"time" "time"
@ -154,72 +156,75 @@ func (handler *Driver) InitOSSClient(forceUsePublicEndpoint bool) error {
return nil return nil
} }
//// List 列出OSS上的文件 // List 列出OSS上的文件
//func (handler *Driver) List(ctx context.Context, base string, recursive bool) ([]response.Object, error) { func (handler *Driver) List(ctx context.Context, base string, onProgress driver.ListProgressFunc, recursive bool) ([]fs.PhysicalObject, error) {
// // 列取文件 // 列取文件
// base = strings.TrimPrefix(base, "/") base = strings.TrimPrefix(base, "/")
// if base != "" { if base != "" {
// base += "/" base += "/"
// } }
//
// var ( var (
// delimiter string delimiter string
// marker string marker string
// objects []oss.ObjectProperties objects []oss.ObjectProperties
// commons []string commons []string
// ) )
// if !recursive { if !recursive {
// delimiter = "/" delimiter = "/"
// } }
//
// for { for {
// subRes, err := handler.bucket.ListObjects(oss.Marker(marker), oss.Prefix(base), subRes, err := handler.bucket.ListObjects(oss.Marker(marker), oss.Prefix(base),
// oss.MaxKeys(1000), oss.Delimiter(delimiter)) oss.MaxKeys(1000), oss.Delimiter(delimiter))
// if err != nil { if err != nil {
// return nil, err return nil, err
// } }
// objects = append(objects, subRes.Objects...) objects = append(objects, subRes.Objects...)
// commons = append(commons, subRes.CommonPrefixes...) commons = append(commons, subRes.CommonPrefixes...)
// marker = subRes.NextMarker marker = subRes.NextMarker
// if marker == "" { if marker == "" {
// break break
// } }
// } }
//
// // 处理列取结果 // 处理列取结果
// res := make([]response.Object, 0, len(objects)+len(commons)) res := make([]fs.PhysicalObject, 0, len(objects)+len(commons))
// // 处理目录 // 处理目录
// for _, object := range commons { for _, object := range commons {
// rel, err := filepath.Rel(base, object) rel, err := filepath.Rel(base, object)
// if err != nil { if err != nil {
// continue continue
// } }
// res = append(res, response.Object{ res = append(res, fs.PhysicalObject{
// Name: path.Base(object), Name: path.Base(object),
// RelativePath: filepath.ToSlash(rel), RelativePath: filepath.ToSlash(rel),
// Size: 0, Size: 0,
// IsDir: true, IsDir: true,
// LastModify: time.Now(), LastModify: time.Now(),
// }) })
// } }
// // 处理文件 onProgress(len(commons))
// for _, object := range objects {
// rel, err := filepath.Rel(base, object.Key) // 处理文件
// if err != nil { for _, object := range objects {
// continue rel, err := filepath.Rel(base, object.Key)
// } if err != nil {
// res = append(res, response.Object{ continue
// Name: path.Base(object.Key), }
// Source: object.Key, res = append(res, fs.PhysicalObject{
// RelativePath: filepath.ToSlash(rel), Name: path.Base(object.Key),
// Size: uint64(object.Size), Source: object.Key,
// IsDir: false, RelativePath: filepath.ToSlash(rel),
// LastModify: object.LastModified, Size: object.Size,
// }) IsDir: false,
// } LastModify: object.LastModified,
// })
// return res, nil }
//} onProgress(len(res))
return res, nil
}
// Get 获取文件 // Get 获取文件
func (handler *Driver) Open(ctx context.Context, path string) (*os.File, error) { func (handler *Driver) Open(ctx context.Context, path string) (*os.File, error) {

@ -5,6 +5,15 @@ import (
"encoding/base64" "encoding/base64"
"errors" "errors"
"fmt" "fmt"
"io"
"net/http"
"net/url"
"os"
"path"
"path/filepath"
"strings"
"time"
"github.com/cloudreve/Cloudreve/v4/ent" "github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/inventory/types" "github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/boolset" "github.com/cloudreve/Cloudreve/v4/pkg/boolset"
@ -22,11 +31,6 @@ import (
"github.com/qiniu/go-sdk/v7/auth/qbox" "github.com/qiniu/go-sdk/v7/auth/qbox"
"github.com/qiniu/go-sdk/v7/storage" "github.com/qiniu/go-sdk/v7/storage"
"github.com/samber/lo" "github.com/samber/lo"
"io"
"net/http"
"net/url"
"os"
"time"
) )
const ( const (
@ -81,73 +85,75 @@ func New(ctx context.Context, policy *ent.StoragePolicy, settings setting.Provid
return driver, nil return driver, nil
} }
// // List 列出给定路径下的文件
//// List 列出给定路径下的文件 func (handler *Driver) List(ctx context.Context, base string, onProgress driver.ListProgressFunc, recursive bool) ([]fs.PhysicalObject, error) {
//func (handler *Driver) List(ctx context.Context, base string, recursive bool) ([]response.Object, error) { base = strings.TrimPrefix(base, "/")
// base = strings.TrimPrefix(base, "/") if base != "" {
// if base != "" { base += "/"
// base += "/" }
// }
// var (
// var ( delimiter string
// delimiter string marker string
// marker string objects []storage.ListItem
// objects []storage.ListItem commons []string
// commons []string )
// ) if !recursive {
// if !recursive { delimiter = "/"
// delimiter = "/" }
// }
// for {
// for { entries, folders, nextMarker, hashNext, err := handler.bucket.ListFiles(
// entries, folders, nextMarker, hashNext, err := handler.bucket.ListFiles( handler.policy.BucketName,
// handler.policy.BucketName, base, delimiter, marker, 1000)
// base, delimiter, marker, 1000) if err != nil {
// if err != nil { return nil, err
// return nil, err }
// } objects = append(objects, entries...)
// objects = append(objects, entries...) commons = append(commons, folders...)
// commons = append(commons, folders...) if !hashNext {
// if !hashNext { break
// break }
// } marker = nextMarker
// marker = nextMarker }
// }
// // 处理列取结果
// // 处理列取结果 res := make([]fs.PhysicalObject, 0, len(objects)+len(commons))
// res := make([]response.Object, 0, len(objects)+len(commons)) // 处理目录
// // 处理目录 for _, object := range commons {
// for _, object := range commons { rel, err := filepath.Rel(base, object)
// rel, err := filepath.Rel(base, object) if err != nil {
// if err != nil { continue
// continue }
// } res = append(res, fs.PhysicalObject{
// res = append(res, response.Object{ Name: path.Base(object),
// Name: path.Base(object), RelativePath: filepath.ToSlash(rel),
// RelativePath: filepath.ToSlash(rel), Size: 0,
// Size: 0, IsDir: true,
// IsDir: true, LastModify: time.Now(),
// LastModify: time.Now(), })
// }) }
// } onProgress(len(commons))
// // 处理文件
// for _, object := range objects { // 处理文件
// rel, err := filepath.Rel(base, object.Key) for _, object := range objects {
// if err != nil { rel, err := filepath.Rel(base, object.Key)
// continue if err != nil {
// } continue
// res = append(res, response.Object{ }
// Name: path.Base(object.Key), res = append(res, fs.PhysicalObject{
// Source: object.Key, Name: path.Base(object.Key),
// RelativePath: filepath.ToSlash(rel), Source: object.Key,
// Size: uint64(object.Fsize), RelativePath: filepath.ToSlash(rel),
// IsDir: false, Size: int64(object.Fsize),
// LastModify: time.Unix(object.PutTime/10000000, 0), IsDir: false,
// }) LastModify: time.Unix(object.PutTime/10000000, 0),
// } })
// }
// return res, nil onProgress(len(objects))
//}
return res, nil
}
// Put 将文件流保存到指定目录 // Put 将文件流保存到指定目录
func (handler *Driver) Put(ctx context.Context, file *fs.UploadRequest) error { func (handler *Driver) Put(ctx context.Context, file *fs.UploadRequest) error {

@ -5,6 +5,12 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io"
"net/http"
"net/url"
"strings"
"time"
"github.com/cloudreve/Cloudreve/v4/application/constants" "github.com/cloudreve/Cloudreve/v4/application/constants"
"github.com/cloudreve/Cloudreve/v4/ent" "github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/pkg/auth" "github.com/cloudreve/Cloudreve/v4/pkg/auth"
@ -19,11 +25,6 @@ import (
"github.com/cloudreve/Cloudreve/v4/pkg/serializer" "github.com/cloudreve/Cloudreve/v4/pkg/serializer"
"github.com/cloudreve/Cloudreve/v4/pkg/setting" "github.com/cloudreve/Cloudreve/v4/pkg/setting"
"github.com/gofrs/uuid" "github.com/gofrs/uuid"
"io"
"net/http"
"net/url"
"strings"
"time"
) )
const ( const (
@ -45,6 +46,8 @@ type Client interface {
MediaMeta(ctx context.Context, src, ext string) ([]driver.MediaMeta, error) MediaMeta(ctx context.Context, src, ext string) ([]driver.MediaMeta, error)
// DeleteFiles deletes files from remote server // DeleteFiles deletes files from remote server
DeleteFiles(ctx context.Context, files ...string) ([]string, error) DeleteFiles(ctx context.Context, files ...string) ([]string, error)
// List lists files from remote server
List(ctx context.Context, path string, recursive bool) ([]fs.PhysicalObject, error)
} }
type DeleteFileRequest struct { type DeleteFileRequest struct {
@ -229,6 +232,28 @@ func (c *remoteClient) CreateUploadSession(ctx context.Context, session *fs.Uplo
return nil return nil
} }
func (c *remoteClient) List(ctx context.Context, path string, recursive bool) ([]fs.PhysicalObject, error) {
resp, err := c.httpClient.Request(
http.MethodGet,
routes.SlaveFileListRoute(path, recursive),
nil,
request.WithContext(ctx),
request.WithLogger(c.l),
).CheckHTTPResponse(200).DecodeResponse()
if err != nil {
return nil, err
}
if resp.Code != 0 {
return nil, fmt.Errorf(resp.Error)
}
var objects []fs.PhysicalObject
resp.GobDecode(&objects)
return objects, nil
}
func (c *remoteClient) GetUploadURL(ctx context.Context, expires time.Time, sessionID string) (string, string, error) { func (c *remoteClient) GetUploadURL(ctx context.Context, expires time.Time, sessionID string) (string, string, error) {
base, err := url.Parse(c.policy.Edges.Node.Server) base, err := url.Parse(c.policy.Edges.Node.Server)
if err != nil { if err != nil {

@ -4,6 +4,10 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"net/url"
"os"
"time"
"github.com/cloudreve/Cloudreve/v4/ent" "github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/inventory/types" "github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/auth" "github.com/cloudreve/Cloudreve/v4/pkg/auth"
@ -15,10 +19,6 @@ import (
"github.com/cloudreve/Cloudreve/v4/pkg/logging" "github.com/cloudreve/Cloudreve/v4/pkg/logging"
"github.com/cloudreve/Cloudreve/v4/pkg/request" "github.com/cloudreve/Cloudreve/v4/pkg/request"
"github.com/cloudreve/Cloudreve/v4/pkg/setting" "github.com/cloudreve/Cloudreve/v4/pkg/setting"
"net/url"
"os"
"path"
"time"
) )
var ( var (
@ -54,108 +54,19 @@ func New(ctx context.Context, policy *ent.StoragePolicy, settings setting.Provid
}, nil }, nil
} }
//// List 列取文件 // List 列取文件
//func (handler *Driver) List(ctx context.Context, path string, recursive bool) ([]response.Object, error) { func (handler *Driver) List(ctx context.Context, base string, onProgress driver.ListProgressFunc, recursive bool) ([]fs.PhysicalObject, error) {
// var res []response.Object res, err := handler.uploadClient.List(ctx, base, recursive)
//
// reqBody := serializer.ListRequest{
// Path: path,
// Recursive: recursive,
// }
// reqBodyEncoded, err := json.Marshal(reqBody)
// if err != nil {
// return res, err
// }
//
// // 发送列表请求
// bodyReader := strings.NewReader(string(reqBodyEncoded))
// signTTL := model.GetIntSetting("slave_api_timeout", 60)
// resp, err := handler.Client.Request(
// "POST",
// handler.getAPIUrl("list"),
// bodyReader,
// request.WithCredential(handler.AuthInstance, int64(signTTL)),
// request.WithMasterMeta(handler.settings.SiteBasic(ctx).ID, handler.settings.SiteURL(setting.UseFirstSiteUrl(ctx)).String()),
// ).CheckHTTPResponse(200).DecodeResponse()
// if err != nil {
// return res, err
// }
//
// // 处理列取结果
// if resp.Code != 0 {
// return res, errors.New(resp.Error)
// }
//
// if resStr, ok := resp.Data.(string); ok {
// err = json.Unmarshal([]byte(resStr), &res)
// if err != nil {
// return res, err
// }
// }
//
// return res, nil
//}
// getAPIUrl 获取接口请求地址
func (handler *Driver) getAPIUrl(scope string, routes ...string) string {
serverURL, err := url.Parse(handler.Policy.Edges.Node.Server)
if err != nil { if err != nil {
return "" return nil, err
}
var controller *url.URL
switch scope {
case "delete":
controller, _ = url.Parse("/api/v3/slave/delete")
case "thumb":
controller, _ = url.Parse("/api/v3/slave/thumb")
case "list":
controller, _ = url.Parse("/api/v3/slave/list")
default:
controller = serverURL
}
for _, r := range routes {
controller.Path = path.Join(controller.Path, r)
} }
return serverURL.ResolveReference(controller).String() onProgress(len(res))
return res, nil
} }
// Open 获取文件内容 // Open 获取文件内容
func (handler *Driver) Open(ctx context.Context, path string) (*os.File, error) { func (handler *Driver) Open(ctx context.Context, path string) (*os.File, error) {
//// 尝试获取速度限制
//speedLimit := 0
//if user, ok := ctx.Value(fsctx.UserCtx).(model.User); ok {
// speedLimit = user.Group.SpeedLimit
//}
//
//// 获取文件源地址
//downloadURL, err := handler.Source(ctx, path, nil, true, int64(speedLimit))
//if err != nil {
// return nil, err
//}
//
//// 获取文件数据流
//resp, err := handler.Client.Request(
// "GET",
// downloadURL,
// nil,
// request.WithContext(ctx),
// request.WithTimeout(time.Duration(0)),
// request.WithMasterMeta(handler.settings.SiteBasic(ctx).ID, handler.settings.SiteURL(ctx).String()),
//).CheckHTTPResponse(200).GetRSCloser()
//if err != nil {
// return nil, err
//}
//
//resp.SetFirstFakeChunk()
//
//// 尝试获取文件大小
//if file, ok := ctx.Value(fsctx.FileModelCtx).(model.File); ok {
// resp.SetContentLength(int64(file.Size))
//}
return nil, errors.New("not implemented") return nil, errors.New("not implemented")
} }

@ -7,6 +7,9 @@ import (
"io" "io"
"net/url" "net/url"
"os" "os"
"path"
"path/filepath"
"strings"
"time" "time"
"github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/awserr"
@ -100,82 +103,85 @@ func New(ctx context.Context, policy *ent.StoragePolicy, settings setting.Provid
return driver, nil return driver, nil
} }
//// List 列出给定路径下的文件 // List 列出给定路径下的文件
//func (handler *Driver) List(ctx context.Context, base string, recursive bool) ([]response.Object, error) { func (handler *Driver) List(ctx context.Context, base string, onProgress driver.ListProgressFunc, recursive bool) ([]fs.PhysicalObject, error) {
// // 初始化列目录参数 // 初始化列目录参数
// base = strings.TrimPrefix(base, "/") base = strings.TrimPrefix(base, "/")
// if base != "" { if base != "" {
// base += "/" base += "/"
// } }
//
// opt := &s3.ListObjectsInput{ opt := &s3.ListObjectsInput{
// Bucket: &handler.policy.BucketName, Bucket: &handler.policy.BucketName,
// Prefix: &base, Prefix: &base,
// MaxKeys: aws.Int64(1000), MaxKeys: aws.Int64(1000),
// } }
//
// // 是否为递归列出 // 是否为递归列出
// if !recursive { if !recursive {
// opt.Delimiter = aws.String("/") opt.Delimiter = aws.String("/")
// } }
//
// var ( var (
// objects []*s3.Object objects []*s3.Object
// commons []*s3.CommonPrefix commons []*s3.CommonPrefix
// ) )
//
// for { for {
// res, err := handler.svc.ListObjectsWithContext(ctx, opt) res, err := handler.svc.ListObjectsWithContext(ctx, opt)
// if err != nil { if err != nil {
// return nil, err return nil, err
// } }
// objects = append(objects, res.Contents...) objects = append(objects, res.Contents...)
// commons = append(commons, res.CommonPrefixes...) commons = append(commons, res.CommonPrefixes...)
//
// // 如果本次未列取完则继续使用marker获取结果 // 如果本次未列取完则继续使用marker获取结果
// if *res.IsTruncated { if *res.IsTruncated {
// opt.Marker = res.NextMarker opt.Marker = res.NextMarker
// } else { } else {
// break break
// } }
// } }
//
// // 处理列取结果 // 处理列取结果
// res := make([]response.Object, 0, len(objects)+len(commons)) res := make([]fs.PhysicalObject, 0, len(objects)+len(commons))
//
// // 处理目录 // 处理目录
// for _, object := range commons { for _, object := range commons {
// rel, err := filepath.Rel(*opt.Prefix, *object.Prefix) rel, err := filepath.Rel(*opt.Prefix, *object.Prefix)
// if err != nil { if err != nil {
// continue continue
// } }
// res = append(res, response.Object{ res = append(res, fs.PhysicalObject{
// Name: path.Base(*object.Prefix), Name: path.Base(*object.Prefix),
// RelativePath: filepath.ToSlash(rel), RelativePath: filepath.ToSlash(rel),
// Size: 0, Size: 0,
// IsDir: true, IsDir: true,
// LastModify: time.Now(), LastModify: time.Now(),
// }) })
// } }
// // 处理文件 onProgress(len(commons))
// for _, object := range objects {
// rel, err := filepath.Rel(*opt.Prefix, *object.Key) // 处理文件
// if err != nil { for _, object := range objects {
// continue rel, err := filepath.Rel(*opt.Prefix, *object.Key)
// } if err != nil {
// res = append(res, response.Object{ continue
// Name: path.Base(*object.Key), }
// Source: *object.Key, res = append(res, fs.PhysicalObject{
// RelativePath: filepath.ToSlash(rel), Name: path.Base(*object.Key),
// Size: uint64(*object.Size), Source: *object.Key,
// IsDir: false, RelativePath: filepath.ToSlash(rel),
// LastModify: time.Now(), Size: int64(*object.Size),
// }) IsDir: false,
// } LastModify: time.Now(),
// })
// return res, nil }
// onProgress(len(objects))
//}
return res, nil
}
// Open 打开文件 // Open 打开文件
func (handler *Driver) Open(ctx context.Context, path string) (*os.File, error) { func (handler *Driver) Open(ctx context.Context, path string) (*os.File, error) {

@ -10,6 +10,15 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"io"
"net/url"
"os"
"path"
"strconv"
"strings"
"sync"
"time"
"github.com/cloudreve/Cloudreve/v4/ent" "github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/inventory/types" "github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/boolset" "github.com/cloudreve/Cloudreve/v4/pkg/boolset"
@ -23,12 +32,6 @@ import (
"github.com/cloudreve/Cloudreve/v4/pkg/setting" "github.com/cloudreve/Cloudreve/v4/pkg/setting"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/upyun/go-sdk/upyun" "github.com/upyun/go-sdk/upyun"
"io"
"net/url"
"os"
"strconv"
"strings"
"time"
) )
type ( type (
@ -78,66 +81,67 @@ func New(ctx context.Context, policy *ent.StoragePolicy, settings setting.Provid
return driver, nil return driver, nil
} }
//func (handler *Driver) List(ctx context.Context, base string, recursive bool) ([]response.Object, error) { func (handler *Driver) List(ctx context.Context, base string, onProgress driver.ListProgressFunc, recursive bool) ([]fs.PhysicalObject, error) {
// base = strings.TrimPrefix(base, "/") base = strings.TrimPrefix(base, "/")
//
// // 用于接受SDK返回对象的chan // 用于接受SDK返回对象的chan
// objChan := make(chan *upyun.FileInfo) objChan := make(chan *upyun.FileInfo)
// objects := []*upyun.FileInfo{} objects := []*upyun.FileInfo{}
//
// // 列取配置 // 列取配置
// listConf := &upyun.GetObjectsConfig{ listConf := &upyun.GetObjectsConfig{
// Path: "/" + base, Path: "/" + base,
// ObjectsChan: objChan, ObjectsChan: objChan,
// MaxListTries: 1, MaxListTries: 1,
// } }
// // 递归列取时不限制递归次数 // 递归列取时不限制递归次数
// if recursive { if recursive {
// listConf.MaxListLevel = -1 listConf.MaxListLevel = -1
// } }
//
// // 启动一个goroutine收集列取的对象信 // 启动一个goroutine收集列取的对象信
// wg := &sync.WaitGroup{} wg := &sync.WaitGroup{}
// wg.Add(1) wg.Add(1)
// go func(input chan *upyun.FileInfo, output *[]*upyun.FileInfo, wg *sync.WaitGroup) { go func(input chan *upyun.FileInfo, output *[]*upyun.FileInfo, wg *sync.WaitGroup) {
// defer wg.Done() defer wg.Done()
// for { for {
// file, ok := <-input file, ok := <-input
// if !ok { if !ok {
// return return
// } }
// *output = append(*output, file) *output = append(*output, file)
// } onProgress(1)
// }(objChan, &objects, wg) }
// }(objChan, &objects, wg)
// up := upyun.NewUpYun(&upyun.UpYunConfig{
// Bucket: handler.policy.BucketName, up := upyun.NewUpYun(&upyun.UpYunConfig{
// Operator: handler.policy.AccessKey, Bucket: handler.policy.BucketName,
// Password: handler.policy.SecretKey, Operator: handler.policy.AccessKey,
// }) Password: handler.policy.SecretKey,
// })
// err := up.List(listConf)
// if err != nil { err := up.List(listConf)
// return nil, err if err != nil {
// } return nil, err
// }
// wg.Wait()
// wg.Wait()
// // 汇总处理列取结果
// res := make([]response.Object, 0, len(objects)) // 汇总处理列取结果
// for _, object := range objects { res := make([]fs.PhysicalObject, 0, len(objects))
// res = append(res, response.Object{ for _, object := range objects {
// Name: path.Base(object.Name), res = append(res, fs.PhysicalObject{
// RelativePath: object.Name, Name: path.Base(object.Name),
// Source: path.Join(base, object.Name), RelativePath: object.Name,
// Size: uint64(object.Size), Source: path.Join(base, object.Name),
// IsDir: object.IsDir, Size: int64(object.Size),
// LastModify: object.Time, IsDir: object.IsDir,
// }) LastModify: object.Time,
// } })
// }
// return res, nil
//} return res, nil
}
func (handler *Driver) Open(ctx context.Context, path string) (*os.File, error) { func (handler *Driver) Open(ctx context.Context, path string) (*os.File, error) {
return nil, errors.New("not implemented") return nil, errors.New("not implemented")

@ -653,6 +653,7 @@ func (f *DBFS) createFile(ctx context.Context, parent *File, name string, fileTy
Size: o.UploadRequest.Props.Size, Size: o.UploadRequest.Props.Size,
ModifiedAt: o.UploadRequest.Props.LastModified, ModifiedAt: o.UploadRequest.Props.LastModified,
UploadSessionID: uuid.FromStringOrNil(o.UploadRequest.Props.UploadSessionID), UploadSessionID: uuid.FromStringOrNil(o.UploadRequest.Props.UploadSessionID),
Importing: o.UploadRequest.ImportFrom != nil,
} }
} }

@ -117,7 +117,14 @@ func (f *DBFS) PrepareUpload(ctx context.Context, req *fs.UploadRequest, opts ..
} }
// Get parent folder storage policy and performs validation // Get parent folder storage policy and performs validation
policy, err := f.getPreferredPolicy(ctx, ancestor) var (
policy *ent.StoragePolicy
)
if req.ImportFrom == nil {
policy, err = f.getPreferredPolicy(ctx, ancestor)
} else {
policy, err = f.storagePolicyClient.GetPolicyByID(ctx, req.Props.PreferredStoragePolicy)
}
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -133,7 +140,9 @@ func (f *DBFS) PrepareUpload(ctx context.Context, req *fs.UploadRequest, opts ..
} }
// Generate save path by storage policy // Generate save path by storage policy
isThumbnailAndPolicyNotAvailable := policy.ID != ancestor.Model.StoragePolicyFiles && (req.Props.EntityType != nil && *req.Props.EntityType == types.EntityTypeThumbnail) isThumbnailAndPolicyNotAvailable := policy.ID != ancestor.Model.StoragePolicyFiles &&
(req.Props.EntityType != nil && *req.Props.EntityType == types.EntityTypeThumbnail) &&
req.ImportFrom == nil
if req.Props.SavePath == "" || isThumbnailAndPolicyNotAvailable { if req.Props.SavePath == "" || isThumbnailAndPolicyNotAvailable {
req.Props.SavePath = generateSavePath(policy, req, f.user) req.Props.SavePath = generateSavePath(policy, req, f.user)
if isThumbnailAndPolicyNotAvailable { if isThumbnailAndPolicyNotAvailable {
@ -174,7 +183,6 @@ func (f *DBFS) PrepareUpload(ctx context.Context, req *fs.UploadRequest, opts ..
fileId = ancestor.ID() fileId = ancestor.ID()
entityId = entity.ID() entityId = entity.ID()
targetFile = ancestor.Model targetFile = ancestor.Model
lockToken = ls.Exclude(lr, f.user, f.hasher)
} else { } else {
uploadPlaceholder, err := f.Create(ctx, req.Props.Uri, types.FileTypeFile, uploadPlaceholder, err := f.Create(ctx, req.Props.Uri, types.FileTypeFile,
fs.WithUploadRequest(req), fs.WithUploadRequest(req),
@ -190,15 +198,19 @@ func (f *DBFS) PrepareUpload(ctx context.Context, req *fs.UploadRequest, opts ..
fileId = uploadPlaceholder.ID() fileId = uploadPlaceholder.ID()
entityId = uploadPlaceholder.Entities()[0].ID() entityId = uploadPlaceholder.Entities()[0].ID()
targetFile = uploadPlaceholder.(*File).Model targetFile = uploadPlaceholder.(*File).Model
lockToken = ls.Exclude(lr, f.user, f.hasher)
} }
// create metadata to record uploading entity id if req.ImportFrom == nil {
if err := fc.UpsertMetadata(ctx, targetFile, map[string]string{ // If not importing, we can keep the lock
MetadataUploadSessionID: req.Props.UploadSessionID, lockToken = ls.Exclude(lr, f.user, f.hasher)
}, nil); err != nil {
_ = inventory.Rollback(dbTx) // create metadata to record uploading entity id
return nil, serializer.NewError(serializer.CodeDBError, "Failed to update upload session metadata", err) if err := fc.UpsertMetadata(ctx, targetFile, map[string]string{
MetadataUploadSessionID: req.Props.UploadSessionID,
}, nil); err != nil {
_ = inventory.Rollback(dbTx)
return nil, serializer.NewError(serializer.CodeDBError, "Failed to update upload session metadata", err)
}
} }
if err := inventory.CommitWithStorageDiff(ctx, dbTx, f.l, f.userClient); err != nil { if err := inventory.CommitWithStorageDiff(ctx, dbTx, f.l, f.userClient); err != nil {
@ -217,6 +229,7 @@ func (f *DBFS) PrepareUpload(ctx context.Context, req *fs.UploadRequest, opts ..
}, },
FileID: fileId, FileID: fileId,
NewFileCreated: !fileExisted, NewFileCreated: !fileExisted,
Importing: req.ImportFrom != nil,
EntityID: entityId, EntityID: entityId,
UID: f.user.ID, UID: f.user.ID,
Policy: policy, Policy: policy,

@ -258,6 +258,7 @@ type (
ChunkSize int64 ChunkSize int64
SentinelTaskID int SentinelTaskID int
NewFileCreated bool // If new file is created for this session NewFileCreated bool // If new file is created for this session
Importing bool // If the upload is importing from another file
LockToken string // Token of the locked placeholder file LockToken string // Token of the locked placeholder file
Props *UploadProps Props *UploadProps
@ -377,6 +378,15 @@ type (
Size int64 Size int64
OmitName bool // if true, file name will not be validated OmitName bool // if true, file name will not be validated
} }
PhysicalObject struct {
Name string `json:"name"`
Source string `json:"source"`
RelativePath string `json:"relative_path"`
Size int64 `json:"size"`
IsDir bool `json:"is_dir"`
LastModify time.Time `json:"last_modify"`
}
) )
const ( const (
@ -599,7 +609,8 @@ type (
Offset int64 Offset int64
ProgressFunc `json:"-"` ProgressFunc `json:"-"`
read int64 ImportFrom *PhysicalObject `json:"-"`
read int64
} }
) )

@ -344,6 +344,13 @@ func NewShareUri(id, password string) string {
return fmt.Sprintf("%s://%s@%s", constants.CloudreveScheme, id, constants.FileSystemShare) return fmt.Sprintf("%s://%s@%s", constants.CloudreveScheme, id, constants.FileSystemShare)
} }
func NewMyUri(id string) string {
if id == "" {
return fmt.Sprintf("%s://%s", constants.CloudreveScheme, constants.FileSystemMy)
}
return fmt.Sprintf("%s://%s@%s", constants.CloudreveScheme, id, constants.FileSystemMy)
}
// PathEscape is same as url.PathEscape, with modifications to incoporate with JS encodeURIComponent: // PathEscape is same as url.PathEscape, with modifications to incoporate with JS encodeURIComponent:
// encodeURI() escapes all characters except: // encodeURI() escapes all characters except:
// //

@ -11,11 +11,13 @@ import (
"github.com/cloudreve/Cloudreve/v4/ent/user" "github.com/cloudreve/Cloudreve/v4/ent/user"
"github.com/cloudreve/Cloudreve/v4/inventory/types" "github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/cluster/routes" "github.com/cloudreve/Cloudreve/v4/pkg/cluster/routes"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs" "github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/dbfs" "github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/dbfs"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/manager/entitysource" "github.com/cloudreve/Cloudreve/v4/pkg/filemanager/manager/entitysource"
"github.com/cloudreve/Cloudreve/v4/pkg/hashid" "github.com/cloudreve/Cloudreve/v4/pkg/hashid"
"github.com/cloudreve/Cloudreve/v4/pkg/serializer" "github.com/cloudreve/Cloudreve/v4/pkg/serializer"
"github.com/gofrs/uuid"
"github.com/samber/lo" "github.com/samber/lo"
) )
@ -41,6 +43,10 @@ type (
ExtractAndSaveMediaMeta(ctx context.Context, uri *fs.URI, entityID int) error ExtractAndSaveMediaMeta(ctx context.Context, uri *fs.URI, entityID int) error
// RecycleEntities recycles a group of entities // RecycleEntities recycles a group of entities
RecycleEntities(ctx context.Context, force bool, entityIDs ...int) error RecycleEntities(ctx context.Context, force bool, entityIDs ...int) error
// ListPhysical lists physical files in a path
ListPhysical(ctx context.Context, path string, policyID int, recursive bool, progress driver.ListProgressFunc) ([]fs.PhysicalObject, error)
// ImportPhysical imports a physical file to a Cloudreve file
ImportPhysical(ctx context.Context, dst *fs.URI, policyId int, src fs.PhysicalObject, completeHook bool) error
} }
DirectLink struct { DirectLink struct {
File fs.File File fs.File
@ -369,6 +375,51 @@ func (l *manager) DeleteVersion(ctx context.Context, path *fs.URI, version int)
return l.fs.VersionControl(ctx, path, version, true) return l.fs.VersionControl(ctx, path, version, true)
} }
func (l *manager) ListPhysical(ctx context.Context, path string, policyID int, recursive bool, progress driver.ListProgressFunc) ([]fs.PhysicalObject, error) {
policy, err := l.dep.StoragePolicyClient().GetPolicyByID(ctx, policyID)
if err != nil {
return nil, err
}
driver, err := l.GetStorageDriver(ctx, policy)
if err != nil {
return nil, err
}
return driver.List(ctx, path, progress, recursive)
}
func (l *manager) ImportPhysical(ctx context.Context, dst *fs.URI, policyId int, src fs.PhysicalObject, completeHook bool) error {
targetUri := dst.Join(src.RelativePath)
req := &fs.UploadRequest{
Props: &fs.UploadProps{
Uri: targetUri,
UploadSessionID: uuid.Must(uuid.NewV4()).String(),
Size: src.Size,
PreferredStoragePolicy: policyId,
SavePath: src.Source,
LastModified: &src.LastModify,
},
ImportFrom: &src,
}
// Prepare for upload
uploadSession, err := l.fs.PrepareUpload(ctx, req)
if err != nil {
return fmt.Errorf("faield to prepare uplaod: %w", err)
}
if completeHook {
d, err := l.GetStorageDriver(ctx, l.CastStoragePolicyOnSlave(ctx, uploadSession.Policy))
if err != nil {
return err
}
l.onNewEntityUploaded(ctx, uploadSession, d)
}
return nil
}
func entityUrlCacheKey(id int, speed int64, displayName string, download bool, siteUrl string) string { func entityUrlCacheKey(id int, speed int64, displayName string, download bool, siteUrl string) string {
hash := sha1.New() hash := sha1.New()
hash.Write([]byte(fmt.Sprintf("%d_%d_%s_%t_%s", id, hash.Write([]byte(fmt.Sprintf("%d_%d_%s_%t_%s", id,

@ -219,9 +219,13 @@ func (m *manager) RecycleEntities(ctx context.Context, force bool, entityIDs ...
mapSrcToId[entity.Source()] = entity.ID() mapSrcToId[entity.Source()] = entity.ID()
} }
res, err := d.Delete(ctx, lo.Map(chunk, func(entity fs.Entity, index int) string { toBeDeletedSrc := lo.Map(lo.Filter(chunk, func(item fs.Entity, index int) bool {
// Only delete entities that are not marked as "unlink only"
return item.Model().RecycleOptions == nil || !item.Model().RecycleOptions.UnlinkOnly
}), func(entity fs.Entity, index int) string {
return entity.Source() return entity.Source()
})...) })
res, err := d.Delete(ctx, toBeDeletedSrc...)
if err != nil { if err != nil {
for _, src := range res { for _, src := range res {
ae.Add(strconv.Itoa(mapSrcToId[src]), err) ae.Add(strconv.Itoa(mapSrcToId[src]), err)

@ -4,6 +4,7 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"github.com/cloudreve/Cloudreve/v4/pkg/thumb"
"os" "os"
"runtime" "runtime"
"time" "time"
@ -270,6 +271,11 @@ func (m *GenerateThumbTask) Do(ctx context.Context) (task.Status, error) {
res, err := m.m.generateThumb(ctx, m.uri, m.ext, m.es) res, err := m.m.generateThumb(ctx, m.uri, m.ext, m.es)
if err != nil { if err != nil {
if errors.Is(err, thumb.ErrNotAvailable) {
m.sig <- &generateRes{nil, err}
return task.StatusCompleted, nil
}
return task.StatusError, err return task.StatusError, err
} }

@ -339,7 +339,7 @@ func (m *manager) OnUploadFailed(ctx context.Context, session *fs.UploadSession)
if err := m.Delete(ctx, []*fs.URI{session.Props.Uri}, fs.WithSysSkipSoftDelete(true)); err != nil { if err := m.Delete(ctx, []*fs.URI{session.Props.Uri}, fs.WithSysSkipSoftDelete(true)); err != nil {
m.l.Warning("OnUploadFailed hook failed to delete file: %s", err) m.l.Warning("OnUploadFailed hook failed to delete file: %s", err)
} }
} else { } else if !session.Importing {
if err := m.fs.VersionControl(ctx, session.Props.Uri, session.EntityID, true); err != nil { if err := m.fs.VersionControl(ctx, session.Props.Uri, session.EntityID, true); err != nil {
m.l.Warning("OnUploadFailed hook failed to version control: %s", err) m.l.Warning("OnUploadFailed hook failed to version control: %s", err)
} }

@ -61,8 +61,9 @@ const (
ProgressTypeExtractSize = "extract_size" ProgressTypeExtractSize = "extract_size"
ProgressTypeDownload = "download" ProgressTypeDownload = "download"
SummaryKeySrc = "src" SummaryKeySrc = "src"
SummaryKeyDst = "dst" SummaryKeySrcPhysical = "src_physical"
SummaryKeyDst = "dst"
) )
func init() { func init() {

@ -0,0 +1,197 @@
package workflows
import (
"context"
"encoding/json"
"errors"
"fmt"
"sync/atomic"
"github.com/cloudreve/Cloudreve/v4/application/dependency"
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/ent/task"
"github.com/cloudreve/Cloudreve/v4/inventory"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/manager"
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
"github.com/cloudreve/Cloudreve/v4/pkg/queue"
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
)
type (
ImportTask struct {
*queue.DBTask
l logging.Logger
state *ImportTaskState
progress queue.Progresses
}
ImportTaskState struct {
PolicyID int `json:"policy_id"`
Src string `json:"src"`
Recursive bool `json:"is_recursive"`
Dst string `json:"dst"`
Phase ImportTaskPhase `json:"phase"`
Failed int `json:"failed,omitempty"`
ExtractMediaMeta bool `json:"extract_media_meta"`
}
ImportTaskPhase string
)
const (
ProgressTypeImported = "imported"
ProgressTypeIndexed = "indexed"
)
func init() {
queue.RegisterResumableTaskFactory(queue.ImportTaskType, NewImportTaskFromModel)
}
func NewImportTask(ctx context.Context, u *ent.User, src string, recursive bool, dst string, policyID int) (queue.Task, error) {
state := &ImportTaskState{
Src: src,
Recursive: recursive,
Dst: dst,
PolicyID: policyID,
}
stateBytes, err := json.Marshal(state)
if err != nil {
return nil, fmt.Errorf("failed to marshal state: %w", err)
}
t := &ImportTask{
DBTask: &queue.DBTask{
Task: &ent.Task{
Type: queue.ImportTaskType,
CorrelationID: logging.CorrelationID(ctx),
PrivateState: string(stateBytes),
PublicState: &types.TaskPublicState{},
},
DirectOwner: u,
},
}
return t, nil
}
func NewImportTaskFromModel(task *ent.Task) queue.Task {
return &ImportTask{
DBTask: &queue.DBTask{
Task: task,
},
}
}
func (m *ImportTask) Do(ctx context.Context) (task.Status, error) {
dep := dependency.FromContext(ctx)
m.l = dep.Logger()
m.Lock()
if m.progress == nil {
m.progress = make(queue.Progresses)
}
m.progress[ProgressTypeIndexed] = &queue.Progress{}
m.Unlock()
// unmarshal state
state := &ImportTaskState{}
if err := json.Unmarshal([]byte(m.State()), state); err != nil {
return task.StatusError, fmt.Errorf("failed to unmarshal state: %w", err)
}
m.state = state
next, err := m.processImport(ctx, dep)
newStateStr, marshalErr := json.Marshal(m.state)
if marshalErr != nil {
return task.StatusError, fmt.Errorf("failed to marshal state: %w", marshalErr)
}
m.Lock()
m.Task.PrivateState = string(newStateStr)
m.Unlock()
return next, err
}
func (m *ImportTask) processImport(ctx context.Context, dep dependency.Dep) (task.Status, error) {
user := inventory.UserFromContext(ctx)
fm := manager.NewFileManager(dep, user)
defer fm.Recycle()
failed := 0
dst, err := fs.NewUriFromString(m.state.Dst)
if err != nil {
return task.StatusError, fmt.Errorf("failed to parse dst: %s (%w)", err, queue.CriticalErr)
}
physicalFiles, err := fm.ListPhysical(ctx, m.state.Src, m.state.PolicyID, m.state.Recursive,
func(i int) {
atomic.AddInt64(&m.progress[ProgressTypeIndexed].Current, int64(i))
})
if err != nil {
return task.StatusError, fmt.Errorf("failed to list physical files: %w", err)
}
m.l.Info("Importing %d physical files", len(physicalFiles))
m.Lock()
m.progress[ProgressTypeImported] = &queue.Progress{
Total: int64(len(physicalFiles)),
}
delete(m.progress, ProgressTypeIndexed)
m.Unlock()
for _, physicalFile := range physicalFiles {
if physicalFile.IsDir {
m.l.Info("Creating folder %s", physicalFile.RelativePath)
_, err := fm.Create(ctx, dst.Join(physicalFile.RelativePath), types.FileTypeFolder)
atomic.AddInt64(&m.progress[ProgressTypeImported].Current, 1)
if err != nil {
m.l.Warning("Failed to create folder %s: %s", physicalFile.RelativePath, err)
failed++
}
} else {
m.l.Info("Importing file %s", physicalFile.RelativePath)
err := fm.ImportPhysical(ctx, dst, m.state.PolicyID, physicalFile, m.state.ExtractMediaMeta)
atomic.AddInt64(&m.progress[ProgressTypeImported].Current, 1)
if err != nil {
var appErr serializer.AppError
if errors.As(err, &appErr) && appErr.Code == serializer.CodeObjectExist {
m.l.Info("File %s already exists, skipping", physicalFile.RelativePath)
continue
}
m.l.Error("Failed to import file %s: %s, skipping", physicalFile.RelativePath, err)
failed++
}
}
}
return task.StatusCompleted, nil
}
func (m *ImportTask) Progress(ctx context.Context) queue.Progresses {
m.Lock()
defer m.Unlock()
return m.progress
}
func (m *ImportTask) Summarize(hasher hashid.Encoder) *queue.Summary {
// unmarshal state
if m.state == nil {
if err := json.Unmarshal([]byte(m.State()), &m.state); err != nil {
return nil
}
}
return &queue.Summary{
Phase: string(m.state.Phase),
Props: map[string]any{
SummaryKeyDst: m.state.Dst,
SummaryKeySrcStr: m.state.Src,
SummaryKeyFailed: m.state.Failed,
SummaryKeySrcDstPolicyID: hashid.EncodePolicyID(hasher, m.state.PolicyID),
},
}
}

@ -103,6 +103,7 @@ const (
ExtractArchiveTaskType = "extract_archive" ExtractArchiveTaskType = "extract_archive"
RelocateTaskType = "relocate" RelocateTaskType = "relocate"
RemoteDownloadTaskType = "remote_download" RemoteDownloadTaskType = "remote_download"
ImportTaskType = "import"
SlaveCreateArchiveTaskType = "slave_create_archive" SlaveCreateArchiveTaskType = "slave_create_archive"
SlaveUploadTaskType = "slave_upload" SlaveUploadTaskType = "slave_upload"

@ -34,6 +34,23 @@ func CreateArchive(c *gin.Context) {
} }
} }
// ImportFiles imports files
func ImportFiles(c *gin.Context) {
service := ParametersFromContext[*explorer.ImportWorkflowService](c, explorer.CreateImportParamCtx{})
resp, err := service.CreateImportTask(c)
if err != nil {
c.JSON(200, serializer.Err(c, err))
c.Abort()
return
}
if resp != nil {
c.JSON(200, serializer.Response{
Data: resp,
})
}
}
// CreateRemoteDownload creates remote download task // CreateRemoteDownload creates remote download task
func CreateRemoteDownload(c *gin.Context) { func CreateRemoteDownload(c *gin.Context) {
service := ParametersFromContext[*explorer.DownloadWorkflowService](c, explorer.CreateDownloadParamCtx{}) service := ParametersFromContext[*explorer.DownloadWorkflowService](c, explorer.CreateDownloadParamCtx{})

@ -118,13 +118,15 @@ func SlavePing(c *gin.Context) {
// SlaveList 从机列出文件 // SlaveList 从机列出文件
func SlaveList(c *gin.Context) { func SlaveList(c *gin.Context) {
var service explorer.SlaveListService service := ParametersFromContext[*explorer.SlaveListService](c, explorer.SlaveListParamCtx{})
if err := c.ShouldBindJSON(&service); err == nil { objects, err := service.List(c)
res := service.List(c) if err != nil {
c.JSON(200, res) c.JSON(200, serializer.Err(c, err))
} else { c.Abort()
c.JSON(200, ErrorResponse(err)) return
} }
c.JSON(200, serializer.NewResponseWithGobData(c, objects))
} }
// SlaveDownloadTaskCreate creates a download task on slave // SlaveDownloadTaskCreate creates a download task on slave

@ -97,6 +97,11 @@ func initSlaveFileRouter(v4 *gin.RouterGroup) {
file.DELETE("", file.DELETE("",
controllers.FromJSON[explorer.SlaveDeleteFileService](explorer.SlaveDeleteFileParamCtx{}), controllers.FromJSON[explorer.SlaveDeleteFileService](explorer.SlaveDeleteFileParamCtx{}),
controllers.SlaveDelete) controllers.SlaveDelete)
// 列出文件
file.GET("list",
controllers.FromQuery[explorer.SlaveListService](explorer.SlaveListParamCtx{}),
controllers.SlaveList,
)
} }
} }
@ -683,6 +688,12 @@ func initMasterRouter(dep dependency.Dep) *gin.Engine {
controllers.FromJSON[explorer.CreateViewerSessionService](explorer.CreateViewerSessionParamCtx{}), controllers.FromJSON[explorer.CreateViewerSessionService](explorer.CreateViewerSessionParamCtx{}),
controllers.CreateViewerSession, controllers.CreateViewerSession,
) )
// Create task to import files
wf.POST("import",
middleware.IsAdmin(),
controllers.FromJSON[explorer.ImportWorkflowService](explorer.CreateImportParamCtx{}),
controllers.ImportFiles,
)
// 取得文件外链 // 取得文件外链
file.PUT("source", file.PUT("source",

@ -45,26 +45,6 @@ func init() {
gob.Register(ArchiveDownloadSession{}) gob.Register(ArchiveDownloadSession{})
} }
// List 列出从机上的文件
func (service *SlaveListService) List(c *gin.Context) serializer.Response {
//// 创建文件系统
//fs, err := filesystem.NewAnonymousFileSystem()
//if err != nil {
// return serializer.ErrDeprecated(serializer.CodeCreateFSError, "", err)
//}
//defer fs.Recycle()
//
//objects, err := fs.Handler.List(context.Background(), service.Path, service.Recursive)
//if err != nil {
// return serializer.ErrDeprecated(serializer.CodeIOFailed, "Cannot list files", err)
//}
//
//res, _ := json.Marshal(objects)
//return serializer.Response{Data: string(res)}
return serializer.Response{}
}
// ArchiveService 文件流式打包下載服务 // ArchiveService 文件流式打包下載服务
type ( type (
ArchiveService struct { ArchiveService struct {

@ -3,6 +3,8 @@ package explorer
import ( import (
"encoding/base64" "encoding/base64"
"fmt" "fmt"
"strings"
"github.com/cloudreve/Cloudreve/v4/application/dependency" "github.com/cloudreve/Cloudreve/v4/application/dependency"
"github.com/cloudreve/Cloudreve/v4/inventory/types" "github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/cluster/routes" "github.com/cloudreve/Cloudreve/v4/pkg/cluster/routes"
@ -14,7 +16,6 @@ import (
"github.com/cloudreve/Cloudreve/v4/pkg/serializer" "github.com/cloudreve/Cloudreve/v4/pkg/serializer"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/samber/lo" "github.com/samber/lo"
"strings"
) )
// SlaveDownloadService 从机文件下載服务 // SlaveDownloadService 从机文件下載服务
@ -35,12 +36,6 @@ type SlaveFilesService struct {
Files []string `json:"files" binding:"required,gt=0"` Files []string `json:"files" binding:"required,gt=0"`
} }
// SlaveListService 从机列表服务
type SlaveListService struct {
Path string `json:"path" binding:"required,min=1,max=65535"`
Recursive bool `json:"recursive"`
}
// SlaveServe serves file content // SlaveServe serves file content
func (s *EntityDownloadService) SlaveServe(c *gin.Context) error { func (s *EntityDownloadService) SlaveServe(c *gin.Context) error {
dep := dependency.FromContext(c) dep := dependency.FromContext(c)
@ -249,3 +244,25 @@ func (service *SlaveDeleteFileService) Delete(c *gin.Context) ([]string, error)
return nil, nil return nil, nil
} }
type (
SlaveListParamCtx struct{}
SlaveListService struct {
Path string `uri:"path" binding:"required"`
Recursive bool `uri:"recursive"`
}
)
func (s *SlaveListService) List(c *gin.Context) ([]fs.PhysicalObject, error) {
dep := dependency.FromContext(c)
m := manager.NewFileManager(dep, nil)
defer m.Recycle()
d := m.LocalDriver(nil)
objects, err := d.List(c, s.Path, func(i int) {}, s.Recursive)
if err != nil {
return nil, fmt.Errorf("failed to list files: %w", err)
}
return objects, nil
}

@ -2,6 +2,7 @@ package explorer
import ( import (
"encoding/gob" "encoding/gob"
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
"time" "time"
"github.com/cloudreve/Cloudreve/v4/application/dependency" "github.com/cloudreve/Cloudreve/v4/application/dependency"
@ -258,6 +259,57 @@ func (service *ArchiveWorkflowService) CreateCompressTask(c *gin.Context) (*Task
return BuildTaskResponse(t, nil, hasher), nil return BuildTaskResponse(t, nil, hasher), nil
} }
type (
ImportWorkflowService struct {
Src string `json:"src" binding:"required"`
Dst string `json:"dst" binding:"required"`
ExtractMediaMeta bool `json:"extract_media_meta"`
UserID string `json:"user_id" binding:"required"`
Recursive bool `json:"recursive"`
PolicyID int `json:"policy_id" binding:"required"`
}
CreateImportParamCtx struct{}
)
func (service *ImportWorkflowService) CreateImportTask(c *gin.Context) (*TaskResponse, error) {
dep := dependency.FromContext(c)
user := inventory.UserFromContext(c)
hasher := dep.HashIDEncoder()
m := manager.NewFileManager(dep, user)
defer m.Recycle()
if !user.Edges.Group.Permissions.Enabled(int(types.GroupPermissionIsAdmin)) {
return nil, serializer.NewError(serializer.CodeGroupNotAllowed, "Only admin can import files", nil)
}
userId, err := hasher.Decode(service.UserID, hashid.UserID)
if err != nil {
return nil, serializer.NewError(serializer.CodeParamErr, "Invalid user id", err)
}
owner, err := dep.UserClient().GetLoginUserByID(c, userId)
if err != nil || owner.ID == 0 {
return nil, serializer.NewError(serializer.CodeDBError, "Failed to get user", err)
}
dst, err := fs.NewUriFromString(fs.NewMyUri(service.UserID))
if err != nil {
return nil, serializer.NewError(serializer.CodeParamErr, "Invalid destination", err)
}
// Create task
t, err := workflows.NewImportTask(c, owner, service.Src, service.Recursive, dst.Join(service.Dst).String(), service.PolicyID)
if err != nil {
return nil, serializer.NewError(serializer.CodeCreateTaskError, "Failed to create task", err)
}
if err := dep.IoIntenseQueue(c).QueueTask(c, t); err != nil {
return nil, serializer.NewError(serializer.CodeCreateTaskError, "Failed to queue task", err)
}
return BuildTaskResponse(t, nil, hasher), nil
}
type ( type (
ListTaskService struct { ListTaskService struct {
PageSize int `form:"page_size" binding:"required,min=10,max=100"` PageSize int `form:"page_size" binding:"required,min=10,max=100"`
@ -279,7 +331,7 @@ func (service *ListTaskService) ListTasks(c *gin.Context) (*TaskListResponse, er
PageToken: service.NextPageToken, PageToken: service.NextPageToken,
PageSize: service.PageSize, PageSize: service.PageSize,
}, },
Types: []string{queue.CreateArchiveTaskType, queue.ExtractArchiveTaskType, queue.RelocateTaskType}, Types: []string{queue.CreateArchiveTaskType, queue.ExtractArchiveTaskType, queue.RelocateTaskType, queue.ImportTaskType},
UserID: user.ID, UserID: user.ID,
} }

Loading…
Cancel
Save