Init V4 community edition

u/liuaaron/v4
Aaron Liu 5 months ago
parent da4e44b77a
commit 960314b7f8

@ -0,0 +1,7 @@
[supervisord]
nodaemon=false
[program:background_process]
command=aria2c --enable-rpc --save-session /cloudreve/data
autostart=true
autorestart=true

@ -0,0 +1,15 @@
#!/bin/bash
set -e
export NODE_OPTIONS="--max-old-space-size=8192"
# This script is used to build the assets for the application.
cd assets
rm -rf build
yarn install --network-timeout 1000000
yarn version --new-version $1 --no-git-tag-version
yarn run build
# Copy the build files to the application directory
cd ../
zip -r - assets/build >assets.zip
mv assets.zip application/statics

@ -0,0 +1,2 @@
supervisord -c ./aria2.supervisor.conf
./cloudreve

@ -1,31 +0,0 @@
name: Build
on: workflow_dispatch
jobs:
build:
name: Build
runs-on: ubuntu-latest
steps:
- name: Set up Go 1.20
uses: actions/setup-go@v2
with:
go-version: "1.20"
id: go
- name: Check out code into the Go module directory
uses: actions/checkout@v2
with:
clean: false
submodules: "recursive"
- run: |
git fetch --prune --unshallow --tags
- name: Build and Release
uses: goreleaser/goreleaser-action@v4
with:
distribution: goreleaser
version: latest
args: release --clean --skip-validate
env:
GITHUB_TOKEN: ${{ secrets.RELEASE_TOKEN }}

@ -1,57 +0,0 @@
name: Build and push docker image
on:
push:
tags:
- 3.* # triggered on every push with tag 3.*
workflow_dispatch: # or just on button clicked
jobs:
docker-build:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- run: git fetch --prune --unshallow
- name: Setup Environments
id: envs
run: |
CLOUDREVE_LATEST_TAG=$(git describe --tags --abbrev=0)
DOCKER_IMAGE="cloudreve/cloudreve"
echo "RELEASE_VERSION=${GITHUB_REF#refs}"
TAGS="${DOCKER_IMAGE}:latest,${DOCKER_IMAGE}:${CLOUDREVE_LATEST_TAG}"
echo "CLOUDREVE_LATEST_TAG:${CLOUDREVE_LATEST_TAG}"
echo ::set-output name=tags::${TAGS}
- name: Setup QEMU Emulator
uses: docker/setup-qemu-action@master
with:
platforms: all
- name: Setup Docker Buildx Command
id: buildx
uses: docker/setup-buildx-action@master
- name: Login to Dockerhub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_PASSWORD }}
- name: Build Docker Image and Push
id: docker_build
uses: docker/build-push-action@v2
with:
push: true
builder: ${{ steps.buildx.outputs.name }}
context: .
file: ./Dockerfile
platforms: linux/amd64,linux/arm64,linux/arm/v7
tags: ${{ steps.envs.outputs.tags }}
- name: Update Docker Hub Description
uses: peter-evans/dockerhub-description@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_PASSWORD }}
repository: cloudreve/cloudreve
short-description: ${{ github.event.repository.description }}
- name: Image Digest
run: echo ${{ steps.docker_build.outputs.digest }}

@ -1,35 +0,0 @@
name: Test
on:
pull_request:
branches:
- master
push:
branches: [master]
jobs:
test:
name: Test
runs-on: ubuntu-latest
steps:
- name: Set up Go 1.20
uses: actions/setup-go@v2
with:
go-version: "1.20"
id: go
- name: Check out code into the Go module directory
uses: actions/checkout@v2
with:
submodules: "recursive"
- name: Build static files
run: |
mkdir assets/build
touch assets/build/test.html
- name: Test
run: go test -coverprofile=coverage.txt -covermode=atomic ./...
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v2

5
.gitignore vendored

@ -1,5 +1,4 @@
# Binaries for programs and plugins
cloudreve
*.exe
*.exe~
*.dll
@ -8,7 +7,7 @@ cloudreve
*.db
*.bin
/release/
assets.zip
application/statics/assets.zip
# Test binary, build with `go test -c`
*.test
@ -31,3 +30,5 @@ conf/conf.ini
.vscode/
dist/
data/
tmp/

@ -1,19 +1,20 @@
env:
- CI=false
- GENERATE_SOURCEMAP=false
version: 2
before:
hooks:
- go mod tidy
- sh -c "cd assets && rm -rf build && yarn install --network-timeout 1000000 && yarn run build && cd ../ && zip -r - assets/build >assets.zip"
- chmod +x ./.build/build-assets.sh
- ./.build/build-assets.sh {{.Version}}
builds:
-
env:
- env:
- CGO_ENABLED=0
binary: cloudreve
ldflags:
- -X 'github.com/cloudreve/Cloudreve/v3/pkg/conf.BackendVersion={{.Tag}}' -X 'github.com/cloudreve/Cloudreve/v3/pkg/conf.LastCommit={{.ShortCommit}}'
- -s -w
- -X 'github.com/cloudreve/Cloudreve/v4/application/constants.BackendVersion={{.Tag}}' -X 'github.com/cloudreve/Cloudreve/v4/application/constants.LastCommit={{.ShortCommit}}'
goos:
- linux
@ -39,83 +40,73 @@ builds:
goarm: 7
archives:
- format: tar.gz
- formats: ["tar.gz"]
# this name template makes the OS and Arch compatible with the results of uname.
name_template: >-
cloudreve_{{.Tag}}_{{- .Os }}_{{ .Arch }}
{{- if .Arm }}v{{ .Arm }}{{ end }}
# use zip for windows archives
format_overrides:
- goos: windows
format: zip
- goos: windows
formats: ["zip"]
checksum:
name_template: 'checksums.txt'
name_template: "checksums.txt"
snapshot:
name_template: "{{ incpatch .Version }}-next"
version_template: "{{ incpatch .Version }}-next"
changelog:
sort: asc
filters:
exclude:
- '^docs:'
- '^test:'
- "^docs:"
- "^test:"
release:
draft: true
prerelease: auto
target_commitish: '{{ .Commit }}'
skip_upload: true
target_commitish: "{{ .Commit }}"
name_template: "{{.Version}}"
blobs:
- provider: s3
endpoint: https://a09fb0452382d8d745cf79d9c5ce7f7d.r2.cloudflarestorage.com
region: auto
bucket: cloudreve
directory: "{{.Version}}"
dockers:
-
dockerfile: Dockerfile
- dockerfile: Dockerfile
use: buildx
build_flag_templates:
- "--platform=linux/amd64"
goos: linux
goarch: amd64
goamd64: v1
extra_files:
- .build/aria2.supervisor.conf
- .build/entrypoint.sh
image_templates:
- "cloudreve/cloudreve:{{ .Tag }}-amd64"
-
dockerfile: Dockerfile
- "cloudreve.azurecr.io/cloudreve/pro:{{ .Tag }}-amd64"
- dockerfile: Dockerfile
use: buildx
build_flag_templates:
- "--platform=linux/arm64"
goos: linux
goarch: arm64
extra_files:
- .build/aria2.supervisor.conf
- .build/entrypoint.sh
image_templates:
- "cloudreve/cloudreve:{{ .Tag }}-arm64"
-
dockerfile: Dockerfile
use: buildx
build_flag_templates:
- "--platform=linux/arm/v6"
goos: linux
goarch: arm
goarm: '6'
image_templates:
- "cloudreve/cloudreve:{{ .Tag }}-armv6"
-
dockerfile: Dockerfile
use: buildx
build_flag_templates:
- "--platform=linux/arm/v7"
goos: linux
goarch: arm
goarm: '7'
image_templates:
- "cloudreve/cloudreve:{{ .Tag }}-armv7"
- "cloudreve.azurecr.io/cloudreve/pro:{{ .Tag }}-arm64"
docker_manifests:
- name_template: "cloudreve/cloudreve:latest"
- name_template: "cloudreve.azurecr.io/cloudreve/pro:latest"
image_templates:
- "cloudreve/cloudreve:{{ .Tag }}-amd64"
- "cloudreve/cloudreve:{{ .Tag }}-arm64"
- "cloudreve/cloudreve:{{ .Tag }}-armv6"
- "cloudreve/cloudreve:{{ .Tag }}-armv7"
- name_template: "cloudreve/cloudreve:{{ .Tag }}"
- "cloudreve.azurecr.io/cloudreve/pro:{{ .Tag }}-amd64"
- "cloudreve.azurecr.io/cloudreve/pro:{{ .Tag }}-arm64"
- name_template: "cloudreve.azurecr.io/cloudreve/pro:{{ .Tag }}"
image_templates:
- "cloudreve/cloudreve:{{ .Tag }}-amd64"
- "cloudreve/cloudreve:{{ .Tag }}-arm64"
- "cloudreve/cloudreve:{{ .Tag }}-armv6"
- "cloudreve/cloudreve:{{ .Tag }}-armv7"
- "cloudreve.azurecr.io/cloudreve/pro:{{ .Tag }}-amd64"
- "cloudreve.azurecr.io/cloudreve/pro:{{ .Tag }}-arm64"

@ -1,17 +1,28 @@
FROM alpine:latest
WORKDIR /cloudreve
COPY .build/aria2.supervisor.conf .build/entrypoint.sh ./
COPY cloudreve ./cloudreve
RUN apk update \
&& apk add --no-cache tzdata \
&& apk add --no-cache tzdata vips-tools ffmpeg libreoffice aria2 supervisor font-noto font-noto-cjk \
&& cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime \
&& echo "Asia/Shanghai" > /etc/timezone \
&& chmod +x ./cloudreve \
&& mkdir -p /data/aria2 \
&& chmod -R 766 /data/aria2
&& chmod +x ./entrypoint.sh \
&& mkdir -p ./data/temp/aria2 \
&& chmod -R 766 ./data/temp/aria2
ENV CR_ENABLE_ARIA2=1 \
CR_SETTING_DEFAULT_thumb_ffmpeg_enabled=1 \
CR_SETTING_DEFAULT_thumb_vips_enabled=1 \
CR_SETTING_DEFAULT_thumb_libreoffice_enabled=1 \
CR_SETTING_DEFAULT_media_meta_ffprobe=1
EXPOSE 5212 443
VOLUME ["/cloudreve/data"]
EXPOSE 5212
VOLUME ["/cloudreve/uploads", "/cloudreve/avatar", "/data"]
ENTRYPOINT ["sh", "./entrypoint.sh"]
ENTRYPOINT ["./cloudreve"]

@ -0,0 +1,219 @@
package application
import (
"context"
"errors"
"fmt"
"net"
"net/http"
"os"
"time"
"github.com/cloudreve/Cloudreve/v4/application/constants"
"github.com/cloudreve/Cloudreve/v4/application/dependency"
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/pkg/cache"
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
"github.com/cloudreve/Cloudreve/v4/pkg/crontab"
"github.com/cloudreve/Cloudreve/v4/pkg/email"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver/onedrive"
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
"github.com/cloudreve/Cloudreve/v4/pkg/util"
"github.com/cloudreve/Cloudreve/v4/routers"
"github.com/gin-gonic/gin"
)
type Server interface {
// Start starts the Cloudreve server.
Start() error
PrintBanner()
Close()
}
// NewServer constructs a new Cloudreve server instance with given dependency.
func NewServer(dep dependency.Dep) Server {
return &server{
dep: dep,
logger: dep.Logger(),
config: dep.ConfigProvider(),
}
}
type server struct {
dep dependency.Dep
logger logging.Logger
dbClient *ent.Client
config conf.ConfigProvider
server *http.Server
kv cache.Driver
mailQueue email.Driver
}
func (s *server) PrintBanner() {
fmt.Print(`
___ _ _
/ __\ | ___ _ _ __| |_ __ _____ _____
/ / | |/ _ \| | | |/ _ | '__/ _ \ \ / / _ \
/ /___| | (_) | |_| | (_| | | | __/\ V / __/
\____/|_|\___/ \__,_|\__,_|_| \___| \_/ \___|
V` + constants.BackendVersion + ` Commit #` + constants.LastCommit + ` Pro=` + constants.IsPro + `
================================================
`)
}
func (s *server) Start() error {
// Debug 关闭时,切换为生产模式
if !s.config.System().Debug {
gin.SetMode(gin.ReleaseMode)
}
s.kv = s.dep.KV()
// delete all cached settings
_ = s.kv.Delete(setting.KvSettingPrefix)
// TODO: make sure redis is connected in dep before user traffic.
if s.config.System().Mode == conf.MasterMode {
s.dbClient = s.dep.DBClient()
// TODO: make sure all dep is initialized before server start.
s.dep.LockSystem()
s.dep.UAParser()
// Initialize OneDrive credentials
credentials, err := onedrive.RetrieveOneDriveCredentials(context.Background(), s.dep.StoragePolicyClient())
if err != nil {
return fmt.Errorf("faield to retrieve OneDrive credentials for CredManager: %w", err)
}
if err := s.dep.CredManager().Upsert(context.Background(), credentials...); err != nil {
return fmt.Errorf("failed to upsert OneDrive credentials to CredManager: %w", err)
}
crontab.Register(setting.CronTypeOauthCredRefresh, func(ctx context.Context) {
dep := dependency.FromContext(ctx)
cred := dep.CredManager()
cred.RefreshAll(ctx)
})
// Initialize email queue before user traffic starts.
_ = s.dep.EmailClient(context.Background())
// Start all queues
s.dep.MediaMetaQueue(context.Background()).Start()
s.dep.EntityRecycleQueue(context.Background()).Start()
s.dep.IoIntenseQueue(context.Background()).Start()
s.dep.RemoteDownloadQueue(context.Background()).Start()
// Start cron jobs
c, err := crontab.NewCron(context.Background(), s.dep)
if err != nil {
return err
}
c.Start()
// Start node pool
if _, err := s.dep.NodePool(context.Background()); err != nil {
return err
}
} else {
s.dep.SlaveQueue(context.Background()).Start()
}
s.dep.ThumbQueue(context.Background()).Start()
api := routers.InitRouter(s.dep)
api.TrustedPlatform = s.config.System().ProxyHeader
s.server = &http.Server{Handler: api}
// 如果启用了SSL
if s.config.SSL().CertPath != "" {
s.logger.Info("Listening to %q", s.config.SSL().Listen)
s.server.Addr = s.config.SSL().Listen
if err := s.server.ListenAndServeTLS(s.config.SSL().CertPath, s.config.SSL().KeyPath); err != nil && !errors.Is(err, http.ErrServerClosed) {
return fmt.Errorf("failed to listen to %q: %w", s.config.SSL().Listen, err)
}
return nil
}
// 如果启用了Unix
if s.config.Unix().Listen != "" {
// delete socket file before listening
if _, err := os.Stat(s.config.Unix().Listen); err == nil {
if err = os.Remove(s.config.Unix().Listen); err != nil {
return fmt.Errorf("failed to delete socket file %q: %w", s.config.Unix().Listen, err)
}
}
s.logger.Info("Listening to %q", s.config.Unix().Listen)
if err := s.runUnix(s.server); err != nil && !errors.Is(err, http.ErrServerClosed) {
return fmt.Errorf("failed to listen to %q: %w", s.config.Unix().Listen, err)
}
return nil
}
s.logger.Info("Listening to %q", s.config.System().Listen)
s.server.Addr = s.config.System().Listen
if err := s.server.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) {
return fmt.Errorf("failed to listen to %q: %w", s.config.System().Listen, err)
}
return nil
}
func (s *server) Close() {
if s.dbClient != nil {
s.logger.Info("Shutting down database connection...")
if err := s.dbClient.Close(); err != nil {
s.logger.Error("Failed to close database connection: %s", err)
}
}
ctx := context.Background()
if conf.SystemConfig.GracePeriod != 0 {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, time.Duration(s.config.System().GracePeriod)*time.Second)
defer cancel()
}
// Shutdown http server
if s.server != nil {
err := s.server.Shutdown(ctx)
if err != nil {
s.logger.Error("Failed to shutdown server: %s", err)
}
}
if s.kv != nil {
if err := s.kv.Persist(util.DataPath(cache.DefaultCacheFile)); err != nil {
s.logger.Warning("Failed to persist cache: %s", err)
}
}
if err := s.dep.Shutdown(ctx); err != nil {
s.logger.Warning("Failed to shutdown dependency manager: %s", err)
}
}
func (s *server) runUnix(server *http.Server) error {
listener, err := net.Listen("unix", s.config.Unix().Listen)
if err != nil {
return err
}
defer listener.Close()
defer os.Remove(s.config.Unix().Listen)
if conf.UnixConfig.Perm > 0 {
err = os.Chmod(conf.UnixConfig.Listen, os.FileMode(s.config.Unix().Perm))
if err != nil {
s.logger.Warning(
"Failed to set permission to %q for socket file %q: %s",
s.config.Unix().Perm,
s.config.Unix().Listen,
err,
)
}
}
return server.Serve(listener)
}

@ -0,0 +1,34 @@
package constants
// These values will be injected at build time, DO NOT EDIT.
// BackendVersion 当前后端版本号
var BackendVersion = "4.0.0-alpha.1"
// IsPro 是否为Pro版本
var IsPro = "false"
var IsProBool = IsPro == "true"
// LastCommit 最后commit id
var LastCommit = "000000"
const (
APIPrefix = "/api/v4"
APIPrefixSlave = "/api/v4/slave"
CrHeaderPrefix = "X-Cr-"
)
const CloudreveScheme = "cloudreve"
type (
FileSystemType string
)
const (
FileSystemMy = FileSystemType("my")
FileSystemShare = FileSystemType("share")
FileSystemTrash = FileSystemType("trash")
FileSystemSharedWithMe = FileSystemType("shared_with_me")
FileSystemUnknown = FileSystemType("unknown")
)

@ -0,0 +1,8 @@
package constants
const (
MB = 1 << 20
GB = 1 << 30
TB = 1 << 40
PB = 1 << 50
)

@ -0,0 +1,874 @@
package dependency
import (
"context"
"errors"
iofs "io/fs"
"net/url"
"sync"
"time"
"github.com/cloudreve/Cloudreve/v4/application/statics"
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/inventory"
"github.com/cloudreve/Cloudreve/v4/pkg/auth"
"github.com/cloudreve/Cloudreve/v4/pkg/cache"
"github.com/cloudreve/Cloudreve/v4/pkg/cluster"
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
"github.com/cloudreve/Cloudreve/v4/pkg/credmanager"
"github.com/cloudreve/Cloudreve/v4/pkg/email"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/mime"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/lock"
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
"github.com/cloudreve/Cloudreve/v4/pkg/mediameta"
"github.com/cloudreve/Cloudreve/v4/pkg/queue"
"github.com/cloudreve/Cloudreve/v4/pkg/request"
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
"github.com/cloudreve/Cloudreve/v4/pkg/thumb"
"github.com/cloudreve/Cloudreve/v4/pkg/util"
"github.com/gin-contrib/static"
"github.com/go-webauthn/webauthn/webauthn"
"github.com/robfig/cron/v3"
"github.com/samber/lo"
"github.com/ua-parser/uap-go/uaparser"
)
var (
ErrorConfigPathNotSet = errors.New("config path not set")
)
type (
// DepCtx defines keys for dependency manager
DepCtx struct{}
// ReloadCtx force reload new dependency
ReloadCtx struct{}
)
// Dep manages all dependencies of the server application. The default implementation is not
// concurrent safe, so all inner deps should be initialized before any goroutine starts.
type Dep interface {
// ConfigProvider Get a singleton conf.ConfigProvider instance.
ConfigProvider() conf.ConfigProvider
// Logger Get a singleton logging.Logger instance.
Logger() logging.Logger
// Statics Get a singleton fs.FS instance for embedded static resources.
Statics() iofs.FS
// ServerStaticFS Get a singleton static.ServeFileSystem instance for serving static resources.
ServerStaticFS() static.ServeFileSystem
// DBClient Get a singleton ent.Client instance for database access.
DBClient() *ent.Client
// KV Get a singleton cache.Driver instance for KV store.
KV() cache.Driver
// NavigatorStateKV Get a singleton cache.Driver instance for navigator state store. It forces use in-memory
// map instead of Redis to get better performance for complex nested linked list.
NavigatorStateKV() cache.Driver
// SettingClient Get a singleton inventory.SettingClient instance for access DB setting store.
SettingClient() inventory.SettingClient
// SettingProvider Get a singleton setting.Provider instance for access setting store in strong type.
SettingProvider() setting.Provider
// UserClient Creates a new inventory.UserClient instance for access DB user store.
UserClient() inventory.UserClient
// GroupClient Creates a new inventory.GroupClient instance for access DB group store.
GroupClient() inventory.GroupClient
// EmailClient Get a singleton email.Driver instance for sending emails.
EmailClient(ctx context.Context) email.Driver
// GeneralAuth Get a singleton auth.Auth instance for general authentication.
GeneralAuth() auth.Auth
// Shutdown the dependencies gracefully.
Shutdown(ctx context.Context) error
// FileClient Creates a new inventory.FileClient instance for access DB file store.
FileClient() inventory.FileClient
// NodeClient Creates a new inventory.NodeClient instance for access DB node store.
NodeClient() inventory.NodeClient
// DavAccountClient Creates a new inventory.DavAccountClient instance for access DB dav account store.
DavAccountClient() inventory.DavAccountClient
// DirectLinkClient Creates a new inventory.DirectLinkClient instance for access DB direct link store.
DirectLinkClient() inventory.DirectLinkClient
// HashIDEncoder Get a singleton hashid.Encoder instance for encoding/decoding hashids.
HashIDEncoder() hashid.Encoder
// TokenAuth Get a singleton auth.TokenAuth instance for token authentication.
TokenAuth() auth.TokenAuth
// LockSystem Get a singleton lock.LockSystem instance for file lock management.
LockSystem() lock.LockSystem
// ShareClient Creates a new inventory.ShareClient instance for access DB share store.
StoragePolicyClient() inventory.StoragePolicyClient
// RequestClient Creates a new request.Client instance for HTTP requests.
RequestClient(opts ...request.Option) request.Client
// ShareClient Creates a new inventory.ShareClient instance for access DB share store.
ShareClient() inventory.ShareClient
// TaskClient Creates a new inventory.TaskClient instance for access DB task store.
TaskClient() inventory.TaskClient
// ForkWithLogger create a shallow copy of dependency with a new correlated logger, used as per-request dep.
ForkWithLogger(ctx context.Context, l logging.Logger) context.Context
// MediaMetaQueue Get a singleton queue.Queue instance for media metadata processing.
MediaMetaQueue(ctx context.Context) queue.Queue
// SlaveQueue Get a singleton queue.Queue instance for slave tasks.
SlaveQueue(ctx context.Context) queue.Queue
// MediaMetaExtractor Get a singleton mediameta.Extractor instance for media metadata extraction.
MediaMetaExtractor(ctx context.Context) mediameta.Extractor
// ThumbPipeline Get a singleton thumb.Generator instance for chained thumbnail generation.
ThumbPipeline() thumb.Generator
// ThumbQueue Get a singleton queue.Queue instance for thumbnail generation.
ThumbQueue(ctx context.Context) queue.Queue
// EntityRecycleQueue Get a singleton queue.Queue instance for entity recycle.
EntityRecycleQueue(ctx context.Context) queue.Queue
// MimeDetector Get a singleton fs.MimeDetector instance for MIME type detection.
MimeDetector(ctx context.Context) mime.MimeDetector
// CredManager Get a singleton credmanager.CredManager instance for credential management.
CredManager() credmanager.CredManager
// IoIntenseQueue Get a singleton queue.Queue instance for IO intense tasks.
IoIntenseQueue(ctx context.Context) queue.Queue
// RemoteDownloadQueue Get a singleton queue.Queue instance for remote download tasks.
RemoteDownloadQueue(ctx context.Context) queue.Queue
// NodePool Get a singleton cluster.NodePool instance for node pool management.
NodePool(ctx context.Context) (cluster.NodePool, error)
// TaskRegistry Get a singleton queue.TaskRegistry instance for task registration.
TaskRegistry() queue.TaskRegistry
// WebAuthn Get a singleton webauthn.WebAuthn instance for WebAuthn authentication.
WebAuthn(ctx context.Context) (*webauthn.WebAuthn, error)
// UAParser Get a singleton uaparser.Parser instance for user agent parsing.
UAParser() *uaparser.Parser
}
type dependency struct {
configProvider conf.ConfigProvider
logger logging.Logger
statics iofs.FS
serverStaticFS static.ServeFileSystem
dbClient *ent.Client
rawEntClient *ent.Client
kv cache.Driver
navigatorStateKv cache.Driver
settingClient inventory.SettingClient
fileClient inventory.FileClient
shareClient inventory.ShareClient
settingProvider setting.Provider
userClient inventory.UserClient
groupClient inventory.GroupClient
storagePolicyClient inventory.StoragePolicyClient
taskClient inventory.TaskClient
nodeClient inventory.NodeClient
davAccountClient inventory.DavAccountClient
directLinkClient inventory.DirectLinkClient
emailClient email.Driver
generalAuth auth.Auth
hashidEncoder hashid.Encoder
tokenAuth auth.TokenAuth
lockSystem lock.LockSystem
requestClient request.Client
ioIntenseQueue queue.Queue
thumbQueue queue.Queue
mediaMetaQueue queue.Queue
entityRecycleQueue queue.Queue
slaveQueue queue.Queue
remoteDownloadQueue queue.Queue
ioIntenseQueueTask queue.Task
mediaMeta mediameta.Extractor
thumbPipeline thumb.Generator
mimeDetector mime.MimeDetector
credManager credmanager.CredManager
nodePool cluster.NodePool
taskRegistry queue.TaskRegistry
webauthn *webauthn.WebAuthn
parser *uaparser.Parser
cron *cron.Cron
configPath string
isPro bool
requiredDbVersion string
licenseKey string
// Protects inner deps that can be reloaded at runtime.
mu sync.Mutex
}
// NewDependency creates a new Dep instance for construct dependencies.
func NewDependency(opts ...Option) Dep {
d := &dependency{}
for _, o := range opts {
o.apply(d)
}
return d
}
// FromContext retrieves a Dep instance from context.
func FromContext(ctx context.Context) Dep {
return ctx.Value(DepCtx{}).(Dep)
}
func (d *dependency) RequestClient(opts ...request.Option) request.Client {
if d.requestClient != nil {
return d.requestClient
}
return request.NewClient(d.ConfigProvider(), opts...)
}
func (d *dependency) WebAuthn(ctx context.Context) (*webauthn.WebAuthn, error) {
if d.webauthn != nil {
return d.webauthn, nil
}
settings := d.SettingProvider()
siteBasic := settings.SiteBasic(ctx)
wConfig := &webauthn.Config{
RPDisplayName: siteBasic.Name,
RPID: settings.SiteURL(ctx).Hostname(),
RPOrigins: lo.Map(settings.AllSiteURLs(ctx), func(item *url.URL, index int) string {
item.Path = ""
return item.String()
}), // The origin URLs allowed for WebAuthn requests
}
return webauthn.New(wConfig)
}
func (d *dependency) UAParser() *uaparser.Parser {
if d.parser != nil {
return d.parser
}
d.parser = uaparser.NewFromSaved()
return d.parser
}
func (d *dependency) ConfigProvider() conf.ConfigProvider {
if d.configProvider != nil {
return d.configProvider
}
if d.configPath == "" {
d.panicError(ErrorConfigPathNotSet)
}
var err error
d.configProvider, err = conf.NewIniConfigProvider(d.configPath, logging.NewConsoleLogger(logging.LevelInformational))
if err != nil {
d.panicError(err)
}
return d.configProvider
}
func (d *dependency) Logger() logging.Logger {
if d.logger != nil {
return d.logger
}
config := d.ConfigProvider()
logLevel := logging.LogLevel(config.System().LogLevel)
if config.System().Debug {
logLevel = logging.LevelDebug
}
d.logger = logging.NewConsoleLogger(logLevel)
d.logger.Info("Logger initialized with LogLevel=%q.", logLevel)
return d.logger
}
func (d *dependency) Statics() iofs.FS {
if d.statics != nil {
return d.statics
}
d.statics = statics.NewStaticFS(d.Logger())
return d.statics
}
func (d *dependency) ServerStaticFS() static.ServeFileSystem {
if d.serverStaticFS != nil {
return d.serverStaticFS
}
sfs, err := statics.NewServerStaticFS(d.Logger(), d.Statics(), d.isPro)
if err != nil {
d.panicError(err)
}
d.serverStaticFS = sfs
return d.serverStaticFS
}
func (d *dependency) DBClient() *ent.Client {
if d.dbClient != nil {
return d.dbClient
}
if d.rawEntClient == nil {
client, err := inventory.NewRawEntClient(d.Logger(), d.ConfigProvider())
if err != nil {
d.panicError(err)
}
d.rawEntClient = client
}
client, err := inventory.InitializeDBClient(d.Logger(), d.rawEntClient, d.KV(), d.requiredDbVersion)
if err != nil {
d.panicError(err)
}
d.dbClient = client
return d.dbClient
}
func (d *dependency) KV() cache.Driver {
if d.kv != nil {
return d.kv
}
config := d.ConfigProvider().Redis()
if config.Server != "" {
d.kv = cache.NewRedisStore(
d.Logger(),
10,
config.Network,
config.Server,
config.User,
config.Password,
config.DB,
)
} else {
d.kv = cache.NewMemoStore(util.DataPath(cache.DefaultCacheFile), d.Logger())
}
return d.kv
}
func (d *dependency) NavigatorStateKV() cache.Driver {
if d.navigatorStateKv != nil {
return d.navigatorStateKv
}
d.navigatorStateKv = cache.NewMemoStore("", d.Logger())
return d.navigatorStateKv
}
func (d *dependency) SettingClient() inventory.SettingClient {
if d.settingClient != nil {
return d.settingClient
}
d.settingClient = inventory.NewSettingClient(d.DBClient(), d.KV())
return d.settingClient
}
func (d *dependency) SettingProvider() setting.Provider {
if d.settingProvider != nil {
return d.settingProvider
}
if d.ConfigProvider().System().Mode == conf.MasterMode {
// For master mode, setting value will be retrieved in order:
// Env overwrite -> KV Store -> DB Setting Store
d.settingProvider = setting.NewProvider(
setting.NewEnvOverrideStore(
setting.NewKvSettingStore(d.KV(),
setting.NewDbSettingStore(d.SettingClient(), nil),
),
d.Logger(),
),
)
} else {
// For slave mode, setting value will be retrieved in order:
// Env overwrite -> Config file overwrites -> Setting defaults in DB schema
d.settingProvider = setting.NewProvider(
setting.NewEnvOverrideStore(
setting.NewConfSettingStore(d.ConfigProvider(),
setting.NewDbDefaultStore(nil),
),
d.Logger(),
),
)
}
return d.settingProvider
}
func (d *dependency) UserClient() inventory.UserClient {
if d.userClient != nil {
return d.userClient
}
return inventory.NewUserClient(d.DBClient())
}
func (d *dependency) GroupClient() inventory.GroupClient {
if d.groupClient != nil {
return d.groupClient
}
return inventory.NewGroupClient(d.DBClient(), d.ConfigProvider().Database().Type, d.KV())
}
func (d *dependency) NodeClient() inventory.NodeClient {
if d.nodeClient != nil {
return d.nodeClient
}
return inventory.NewNodeClient(d.DBClient())
}
func (d *dependency) NodePool(ctx context.Context) (cluster.NodePool, error) {
reload, _ := ctx.Value(ReloadCtx{}).(bool)
if d.nodePool != nil && !reload {
return d.nodePool, nil
}
if d.ConfigProvider().System().Mode == conf.MasterMode {
np, err := cluster.NewNodePool(ctx, d.Logger(), d.ConfigProvider(), d.SettingProvider(), d.NodeClient())
if err != nil {
return nil, err
}
d.nodePool = np
} else {
d.nodePool = cluster.NewSlaveDummyNodePool(ctx, d.ConfigProvider(), d.SettingProvider())
}
return d.nodePool, nil
}
func (d *dependency) EmailClient(ctx context.Context) email.Driver {
d.mu.Lock()
defer d.mu.Unlock()
if reload, _ := ctx.Value(ReloadCtx{}).(bool); reload || d.emailClient == nil {
if d.emailClient != nil {
d.emailClient.Close()
}
d.emailClient = email.NewSMTPPool(d.SettingProvider(), d.Logger())
}
return d.emailClient
}
func (d *dependency) MimeDetector(ctx context.Context) mime.MimeDetector {
d.mu.Lock()
defer d.mu.Unlock()
_, reload := ctx.Value(ReloadCtx{}).(bool)
if d.mimeDetector != nil && !reload {
return d.mimeDetector
}
d.mimeDetector = mime.NewMimeDetector(ctx, d.SettingProvider(), d.Logger())
return d.mimeDetector
}
func (d *dependency) MediaMetaExtractor(ctx context.Context) mediameta.Extractor {
d.mu.Lock()
defer d.mu.Unlock()
_, reload := ctx.Value(ReloadCtx{}).(bool)
if d.mediaMeta != nil && !reload {
return d.mediaMeta
}
d.mediaMeta = mediameta.NewExtractorManager(ctx, d.SettingProvider(), d.Logger())
return d.mediaMeta
}
func (d *dependency) ThumbQueue(ctx context.Context) queue.Queue {
d.mu.Lock()
defer d.mu.Unlock()
_, reload := ctx.Value(ReloadCtx{}).(bool)
if d.thumbQueue != nil && !reload {
return d.thumbQueue
}
if d.thumbQueue != nil {
d.thumbQueue.Shutdown()
}
settings := d.SettingProvider()
queueSetting := settings.Queue(context.Background(), setting.QueueTypeThumb)
var (
t inventory.TaskClient
)
if d.ConfigProvider().System().Mode == conf.MasterMode {
t = d.TaskClient()
}
d.thumbQueue = queue.New(d.Logger(), t, nil, d,
queue.WithBackoffFactor(queueSetting.BackoffFactor),
queue.WithMaxRetry(queueSetting.MaxRetry),
queue.WithBackoffMaxDuration(queueSetting.BackoffMaxDuration),
queue.WithRetryDelay(queueSetting.RetryDelay),
queue.WithWorkerCount(queueSetting.WorkerNum),
queue.WithName("ThumbQueue"),
queue.WithMaxTaskExecution(queueSetting.MaxExecution),
)
return d.thumbQueue
}
func (d *dependency) MediaMetaQueue(ctx context.Context) queue.Queue {
d.mu.Lock()
defer d.mu.Unlock()
_, reload := ctx.Value(ReloadCtx{}).(bool)
if d.mediaMetaQueue != nil && !reload {
return d.mediaMetaQueue
}
if d.mediaMetaQueue != nil {
d.mediaMetaQueue.Shutdown()
}
settings := d.SettingProvider()
queueSetting := settings.Queue(context.Background(), setting.QueueTypeMediaMeta)
d.mediaMetaQueue = queue.New(d.Logger(), d.TaskClient(), nil, d,
queue.WithBackoffFactor(queueSetting.BackoffFactor),
queue.WithMaxRetry(queueSetting.MaxRetry),
queue.WithBackoffMaxDuration(queueSetting.BackoffMaxDuration),
queue.WithRetryDelay(queueSetting.RetryDelay),
queue.WithWorkerCount(queueSetting.WorkerNum),
queue.WithName("MediaMetadataQueue"),
queue.WithMaxTaskExecution(queueSetting.MaxExecution),
queue.WithResumeTaskType(queue.MediaMetaTaskType),
)
return d.mediaMetaQueue
}
func (d *dependency) IoIntenseQueue(ctx context.Context) queue.Queue {
d.mu.Lock()
defer d.mu.Unlock()
_, reload := ctx.Value(ReloadCtx{}).(bool)
if d.ioIntenseQueue != nil && !reload {
return d.ioIntenseQueue
}
if d.ioIntenseQueue != nil {
d.ioIntenseQueue.Shutdown()
}
settings := d.SettingProvider()
queueSetting := settings.Queue(context.Background(), setting.QueueTypeIOIntense)
d.ioIntenseQueue = queue.New(d.Logger(), d.TaskClient(), d.TaskRegistry(), d,
queue.WithBackoffFactor(queueSetting.BackoffFactor),
queue.WithMaxRetry(queueSetting.MaxRetry),
queue.WithBackoffMaxDuration(queueSetting.BackoffMaxDuration),
queue.WithRetryDelay(queueSetting.RetryDelay),
queue.WithWorkerCount(queueSetting.WorkerNum),
queue.WithName("IoIntenseQueue"),
queue.WithMaxTaskExecution(queueSetting.MaxExecution),
queue.WithResumeTaskType(queue.CreateArchiveTaskType, queue.ExtractArchiveTaskType, queue.RelocateTaskType),
queue.WithTaskPullInterval(10*time.Second),
)
return d.ioIntenseQueue
}
func (d *dependency) RemoteDownloadQueue(ctx context.Context) queue.Queue {
d.mu.Lock()
defer d.mu.Unlock()
_, reload := ctx.Value(ReloadCtx{}).(bool)
if d.remoteDownloadQueue != nil && !reload {
return d.remoteDownloadQueue
}
if d.remoteDownloadQueue != nil {
d.remoteDownloadQueue.Shutdown()
}
settings := d.SettingProvider()
queueSetting := settings.Queue(context.Background(), setting.QueueTypeRemoteDownload)
d.remoteDownloadQueue = queue.New(d.Logger(), d.TaskClient(), d.TaskRegistry(), d,
queue.WithBackoffFactor(queueSetting.BackoffFactor),
queue.WithMaxRetry(queueSetting.MaxRetry),
queue.WithBackoffMaxDuration(queueSetting.BackoffMaxDuration),
queue.WithRetryDelay(queueSetting.RetryDelay),
queue.WithWorkerCount(queueSetting.WorkerNum),
queue.WithName("RemoteDownloadQueue"),
queue.WithMaxTaskExecution(queueSetting.MaxExecution),
queue.WithResumeTaskType(queue.RemoteDownloadTaskType),
queue.WithTaskPullInterval(20*time.Second),
)
return d.remoteDownloadQueue
}
func (d *dependency) EntityRecycleQueue(ctx context.Context) queue.Queue {
d.mu.Lock()
defer d.mu.Unlock()
_, reload := ctx.Value(ReloadCtx{}).(bool)
if d.entityRecycleQueue != nil && !reload {
return d.entityRecycleQueue
}
if d.entityRecycleQueue != nil {
d.entityRecycleQueue.Shutdown()
}
settings := d.SettingProvider()
queueSetting := settings.Queue(context.Background(), setting.QueueTypeEntityRecycle)
d.entityRecycleQueue = queue.New(d.Logger(), d.TaskClient(), nil, d,
queue.WithBackoffFactor(queueSetting.BackoffFactor),
queue.WithMaxRetry(queueSetting.MaxRetry),
queue.WithBackoffMaxDuration(queueSetting.BackoffMaxDuration),
queue.WithRetryDelay(queueSetting.RetryDelay),
queue.WithWorkerCount(queueSetting.WorkerNum),
queue.WithName("EntityRecycleQueue"),
queue.WithMaxTaskExecution(queueSetting.MaxExecution),
queue.WithResumeTaskType(queue.EntityRecycleRoutineTaskType, queue.ExplicitEntityRecycleTaskType, queue.UploadSentinelCheckTaskType),
queue.WithTaskPullInterval(10*time.Second),
)
return d.entityRecycleQueue
}
func (d *dependency) SlaveQueue(ctx context.Context) queue.Queue {
d.mu.Lock()
defer d.mu.Unlock()
_, reload := ctx.Value(ReloadCtx{}).(bool)
if d.slaveQueue != nil && !reload {
return d.slaveQueue
}
if d.slaveQueue != nil {
d.slaveQueue.Shutdown()
}
settings := d.SettingProvider()
queueSetting := settings.Queue(context.Background(), setting.QueueTypeSlave)
d.slaveQueue = queue.New(d.Logger(), nil, nil, d,
queue.WithBackoffFactor(queueSetting.BackoffFactor),
queue.WithMaxRetry(queueSetting.MaxRetry),
queue.WithBackoffMaxDuration(queueSetting.BackoffMaxDuration),
queue.WithRetryDelay(queueSetting.RetryDelay),
queue.WithWorkerCount(queueSetting.WorkerNum),
queue.WithName("SlaveQueue"),
queue.WithMaxTaskExecution(queueSetting.MaxExecution),
)
return d.slaveQueue
}
func (d *dependency) GeneralAuth() auth.Auth {
if d.generalAuth != nil {
return d.generalAuth
}
var secretKey string
if d.ConfigProvider().System().Mode == conf.MasterMode {
secretKey = d.SettingProvider().SecretKey(context.Background())
} else {
secretKey = d.ConfigProvider().Slave().Secret
if secretKey == "" {
d.panicError(errors.New("SlaveSecret is not set, please specify it in config file"))
}
}
d.generalAuth = auth.HMACAuth{
SecretKey: []byte(secretKey),
}
return d.generalAuth
}
func (d *dependency) FileClient() inventory.FileClient {
if d.fileClient != nil {
return d.fileClient
}
return inventory.NewFileClient(d.DBClient(), d.ConfigProvider().Database().Type, d.HashIDEncoder())
}
func (d *dependency) ShareClient() inventory.ShareClient {
if d.shareClient != nil {
return d.shareClient
}
return inventory.NewShareClient(d.DBClient(), d.ConfigProvider().Database().Type, d.HashIDEncoder())
}
func (d *dependency) TaskClient() inventory.TaskClient {
if d.taskClient != nil {
return d.taskClient
}
return inventory.NewTaskClient(d.DBClient(), d.ConfigProvider().Database().Type, d.HashIDEncoder())
}
func (d *dependency) DavAccountClient() inventory.DavAccountClient {
if d.davAccountClient != nil {
return d.davAccountClient
}
return inventory.NewDavAccountClient(d.DBClient(), d.ConfigProvider().Database().Type, d.HashIDEncoder())
}
func (d *dependency) DirectLinkClient() inventory.DirectLinkClient {
if d.directLinkClient != nil {
return d.directLinkClient
}
return inventory.NewDirectLinkClient(d.DBClient(), d.ConfigProvider().Database().Type, d.HashIDEncoder())
}
func (d *dependency) HashIDEncoder() hashid.Encoder {
if d.hashidEncoder != nil {
return d.hashidEncoder
}
encoder, err := hashid.New(d.SettingProvider().HashIDSalt(context.Background()))
if err != nil {
d.panicError(err)
}
d.hashidEncoder = encoder
return d.hashidEncoder
}
func (d *dependency) CredManager() credmanager.CredManager {
if d.credManager != nil {
return d.credManager
}
if d.ConfigProvider().System().Mode == conf.MasterMode {
d.credManager = credmanager.New(d.KV())
} else {
d.credManager = credmanager.NewSlaveManager(d.KV(), d.ConfigProvider())
}
return d.credManager
}
func (d *dependency) TokenAuth() auth.TokenAuth {
if d.tokenAuth != nil {
return d.tokenAuth
}
d.tokenAuth = auth.NewTokenAuth(d.HashIDEncoder(), d.SettingProvider(),
[]byte(d.SettingProvider().SecretKey(context.Background())), d.UserClient(), d.Logger())
return d.tokenAuth
}
func (d *dependency) LockSystem() lock.LockSystem {
if d.lockSystem != nil {
return d.lockSystem
}
d.lockSystem = lock.NewMemLS(d.HashIDEncoder(), d.Logger())
return d.lockSystem
}
func (d *dependency) StoragePolicyClient() inventory.StoragePolicyClient {
if d.storagePolicyClient != nil {
return d.storagePolicyClient
}
return inventory.NewStoragePolicyClient(d.DBClient(), d.KV())
}
func (d *dependency) ThumbPipeline() thumb.Generator {
if d.thumbPipeline != nil {
return d.thumbPipeline
}
d.thumbPipeline = thumb.NewPipeline(d.SettingProvider(), d.Logger())
return d.thumbPipeline
}
func (d *dependency) TaskRegistry() queue.TaskRegistry {
if d.taskRegistry != nil {
return d.taskRegistry
}
d.taskRegistry = queue.NewTaskRegistry()
return d.taskRegistry
}
func (d *dependency) Shutdown(ctx context.Context) error {
d.mu.Lock()
if d.emailClient != nil {
d.emailClient.Close()
}
wg := sync.WaitGroup{}
if d.mediaMetaQueue != nil {
wg.Add(1)
go func() {
d.mediaMetaQueue.Shutdown()
defer wg.Done()
}()
}
if d.thumbQueue != nil {
wg.Add(1)
go func() {
d.thumbQueue.Shutdown()
defer wg.Done()
}()
}
if d.ioIntenseQueue != nil {
wg.Add(1)
go func() {
d.ioIntenseQueue.Shutdown()
defer wg.Done()
}()
}
if d.entityRecycleQueue != nil {
wg.Add(1)
go func() {
d.entityRecycleQueue.Shutdown()
defer wg.Done()
}()
}
if d.slaveQueue != nil {
wg.Add(1)
go func() {
d.slaveQueue.Shutdown()
defer wg.Done()
}()
}
if d.remoteDownloadQueue != nil {
wg.Add(1)
go func() {
d.remoteDownloadQueue.Shutdown()
defer wg.Done()
}()
}
d.mu.Unlock()
wg.Wait()
return nil
}
func (d *dependency) panicError(err error) {
if d.logger != nil {
d.logger.Panic("Fatal error in dependency initialization: %s", err)
}
panic(err)
}
func (d *dependency) ForkWithLogger(ctx context.Context, l logging.Logger) context.Context {
dep := &dependencyCorrelated{
l: l,
dependency: d,
}
return context.WithValue(ctx, DepCtx{}, dep)
}
type dependencyCorrelated struct {
l logging.Logger
*dependency
}
func (d *dependencyCorrelated) Logger() logging.Logger {
return d.l
}

@ -0,0 +1,165 @@
package dependency
import (
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/inventory"
"github.com/cloudreve/Cloudreve/v4/pkg/auth"
"github.com/cloudreve/Cloudreve/v4/pkg/cache"
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
"github.com/cloudreve/Cloudreve/v4/pkg/email"
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
"github.com/gin-contrib/static"
"io/fs"
)
// Option 发送请求的额外设置
type Option interface {
apply(*dependency)
}
type optionFunc func(*dependency)
func (f optionFunc) apply(o *dependency) {
f(o)
}
// WithConfigPath Set the path of the config file.
func WithConfigPath(p string) Option {
return optionFunc(func(o *dependency) {
o.configPath = p
})
}
// WithLogger Set the default logging.
func WithLogger(l logging.Logger) Option {
return optionFunc(func(o *dependency) {
o.logger = l
})
}
// WithConfigProvider Set the default config provider.
func WithConfigProvider(c conf.ConfigProvider) Option {
return optionFunc(func(o *dependency) {
o.configProvider = c
})
}
// WithStatics Set the default statics FS.
func WithStatics(c fs.FS) Option {
return optionFunc(func(o *dependency) {
o.statics = c
})
}
// WithServerStaticFS Set the default statics FS for server.
func WithServerStaticFS(c static.ServeFileSystem) Option {
return optionFunc(func(o *dependency) {
o.serverStaticFS = c
})
}
// WithProFlag Set if current instance is a pro version.
func WithProFlag(c bool) Option {
return optionFunc(func(o *dependency) {
o.isPro = c
})
}
func WithLicenseKey(c string) Option {
return optionFunc(func(o *dependency) {
o.licenseKey = c
})
}
// WithRawEntClient Set the default raw ent client.
func WithRawEntClient(c *ent.Client) Option {
return optionFunc(func(o *dependency) {
o.rawEntClient = c
})
}
// WithDbClient Set the default ent client.
func WithDbClient(c *ent.Client) Option {
return optionFunc(func(o *dependency) {
o.dbClient = c
})
}
// WithRequiredDbVersion Set the required db version.
func WithRequiredDbVersion(c string) Option {
return optionFunc(func(o *dependency) {
o.requiredDbVersion = c
})
}
// WithKV Set the default KV store driverold
func WithKV(c cache.Driver) Option {
return optionFunc(func(o *dependency) {
o.kv = c
})
}
// WithSettingClient Set the default setting client
func WithSettingClient(s inventory.SettingClient) Option {
return optionFunc(func(o *dependency) {
o.settingClient = s
})
}
// WithSettingProvider Set the default setting provider
func WithSettingProvider(s setting.Provider) Option {
return optionFunc(func(o *dependency) {
o.settingProvider = s
})
}
// WithUserClient Set the default user client
func WithUserClient(s inventory.UserClient) Option {
return optionFunc(func(o *dependency) {
o.userClient = s
})
}
// WithEmailClient Set the default email client
func WithEmailClient(s email.Driver) Option {
return optionFunc(func(o *dependency) {
o.emailClient = s
})
}
// WithGeneralAuth Set the default general auth
func WithGeneralAuth(s auth.Auth) Option {
return optionFunc(func(o *dependency) {
o.generalAuth = s
})
}
// WithHashIDEncoder Set the default hash id encoder
func WithHashIDEncoder(s hashid.Encoder) Option {
return optionFunc(func(o *dependency) {
o.hashidEncoder = s
})
}
// WithTokenAuth Set the default token auth
func WithTokenAuth(s auth.TokenAuth) Option {
return optionFunc(func(o *dependency) {
o.tokenAuth = s
})
}
// WithFileClient Set the default file client
func WithFileClient(s inventory.FileClient) Option {
return optionFunc(func(o *dependency) {
o.fileClient = s
})
}
// WithShareClient Set the default share client
func WithShareClient(s inventory.ShareClient) Option {
return optionFunc(func(o *dependency) {
o.shareClient = s
})
}

@ -0,0 +1,47 @@
package migrator
import (
"fmt"
"io"
"os"
"path/filepath"
"github.com/cloudreve/Cloudreve/v4/pkg/util"
)
func migrateAvatars(m *Migrator) error {
m.l.Info("Migrating avatars files...")
avatarRoot := util.RelativePath(m.state.V3AvatarPath)
for uid, _ := range m.state.UserIDs {
avatarPath := filepath.Join(avatarRoot, fmt.Sprintf("avatar_%d_2.png", uid))
// check if file exists
if util.Exists(avatarPath) {
m.l.Info("Migrating avatar for user %d", uid)
// Copy to v4 avatar path
v4Path := filepath.Join(util.DataPath("avatar"), fmt.Sprintf("avatar_%d.png", uid))
// copy
origin, err := os.Open(avatarPath)
if err != nil {
return fmt.Errorf("failed to open avatar file: %w", err)
}
defer origin.Close()
dest, err := util.CreatNestedFile(v4Path)
if err != nil {
return fmt.Errorf("failed to create avatar file: %w", err)
}
defer dest.Close()
_, err = io.Copy(dest, origin)
if err != nil {
m.l.Warning("Failed to copy avatar file: %s, skipping...", err)
}
}
}
return nil
}

@ -0,0 +1,124 @@
package conf
import (
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
"github.com/go-ini/ini"
"github.com/go-playground/validator/v10"
)
// database 数据库
type database struct {
Type string
User string
Password string
Host string
Name string
TablePrefix string
DBFile string
Port int
Charset string
UnixSocket bool
}
// system 系统通用配置
type system struct {
Mode string `validate:"eq=master|eq=slave"`
Listen string `validate:"required"`
Debug bool
SessionSecret string
HashIDSalt string
GracePeriod int `validate:"gte=0"`
ProxyHeader string `validate:"required_with=Listen"`
}
type ssl struct {
CertPath string `validate:"omitempty,required"`
KeyPath string `validate:"omitempty,required"`
Listen string `validate:"required"`
}
type unix struct {
Listen string
Perm uint32
}
// slave 作为slave存储端配置
type slave struct {
Secret string `validate:"omitempty,gte=64"`
CallbackTimeout int `validate:"omitempty,gte=1"`
SignatureTTL int `validate:"omitempty,gte=1"`
}
// redis 配置
type redis struct {
Network string
Server string
User string
Password string
DB string
}
// 跨域配置
type cors struct {
AllowOrigins []string
AllowMethods []string
AllowHeaders []string
AllowCredentials bool
ExposeHeaders []string
SameSite string
Secure bool
}
var cfg *ini.File
// Init 初始化配置文件
func Init(l logging.Logger, path string) error {
var err error
cfg, err = ini.Load(path)
if err != nil {
l.Error("Failed to parse config file %q: %s", path, err)
return err
}
sections := map[string]interface{}{
"Database": DatabaseConfig,
"System": SystemConfig,
"SSL": SSLConfig,
"UnixSocket": UnixConfig,
"Redis": RedisConfig,
"CORS": CORSConfig,
"Slave": SlaveConfig,
}
for sectionName, sectionStruct := range sections {
err = mapSection(sectionName, sectionStruct)
if err != nil {
l.Error("Failed to parse config section %q: %s", sectionName, err)
return err
}
}
// 映射数据库配置覆盖
for _, key := range cfg.Section("OptionOverwrite").Keys() {
OptionOverwrite[key.Name()] = key.Value()
}
return nil
}
// mapSection 将配置文件的 Section 映射到结构体上
func mapSection(section string, confStruct interface{}) error {
err := cfg.Section(section).MapTo(confStruct)
if err != nil {
return err
}
// 验证合法性
validate := validator.New()
err = validate.Struct(confStruct)
if err != nil {
return err
}
return nil
}

@ -0,0 +1,82 @@
package migrator
import (
"context"
"fmt"
"github.com/cloudreve/Cloudreve/v4/application/migrator/model"
"github.com/cloudreve/Cloudreve/v4/ent/file"
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
)
func (m *Migrator) migrateDirectLink() error {
m.l.Info("Migrating direct links...")
batchSize := 1000
offset := m.state.DirectLinkOffset
ctx := context.Background()
if m.state.DirectLinkOffset > 0 {
m.l.Info("Resuming direct link migration from offset %d", offset)
}
for {
m.l.Info("Migrating direct links with offset %d", offset)
var directLinks []model.SourceLink
if err := model.DB.Limit(batchSize).Offset(offset).Find(&directLinks).Error; err != nil {
return fmt.Errorf("failed to list v3 direct links: %w", err)
}
if len(directLinks) == 0 {
if m.dep.ConfigProvider().Database().Type == conf.PostgresDB {
m.l.Info("Resetting direct link ID sequence for postgres...")
m.v4client.DirectLink.ExecContext(ctx, "SELECT SETVAL('direct_links_id_seq', (SELECT MAX(id) FROM direct_links))")
}
break
}
tx, err := m.v4client.Tx(ctx)
if err != nil {
_ = tx.Rollback()
return fmt.Errorf("failed to start transaction: %w", err)
}
for _, dl := range directLinks {
sourceId := int(dl.FileID) + m.state.LastFolderID
// check if file exists
_, err = tx.File.Query().Where(file.ID(sourceId)).First(ctx)
if err != nil {
m.l.Warning("File %d not found, skipping direct link %d", sourceId, dl.ID)
continue
}
stm := tx.DirectLink.Create().
SetCreatedAt(formatTime(dl.CreatedAt)).
SetUpdatedAt(formatTime(dl.UpdatedAt)).
SetRawID(int(dl.ID)).
SetFileID(sourceId).
SetName(dl.Name).
SetDownloads(dl.Downloads).
SetSpeed(0)
if _, err := stm.Save(ctx); err != nil {
_ = tx.Rollback()
return fmt.Errorf("failed to create direct link %d: %w", dl.ID, err)
}
}
if err := tx.Commit(); err != nil {
return fmt.Errorf("failed to commit transaction: %w", err)
}
offset += batchSize
m.state.DirectLinkOffset = offset
if err := m.saveState(); err != nil {
m.l.Warning("Failed to save state after direct link batch: %s", err)
} else {
m.l.Info("Saved migration state after processing this batch")
}
}
return nil
}

@ -0,0 +1,189 @@
package migrator
import (
"context"
"encoding/json"
"fmt"
"os"
"strconv"
"github.com/cloudreve/Cloudreve/v4/application/migrator/model"
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
)
func (m *Migrator) migrateFile() error {
m.l.Info("Migrating files...")
batchSize := 1000
offset := m.state.FileOffset
ctx := context.Background()
if m.state.FileConflictRename == nil {
m.state.FileConflictRename = make(map[uint]string)
}
if m.state.EntitySources == nil {
m.state.EntitySources = make(map[string]int)
}
if offset > 0 {
m.l.Info("Resuming file migration from offset %d", offset)
}
out:
for {
m.l.Info("Migrating files with offset %d", offset)
var files []model.File
if err := model.DB.Limit(batchSize).Offset(offset).Find(&files).Error; err != nil {
return fmt.Errorf("failed to list v3 files: %w", err)
}
if len(files) == 0 {
if m.dep.ConfigProvider().Database().Type == conf.PostgresDB {
m.l.Info("Resetting file ID sequence for postgres...")
m.v4client.File.ExecContext(ctx, "SELECT SETVAL('files_id_seq', (SELECT MAX(id) FROM files))")
}
break
}
tx, err := m.v4client.Tx(ctx)
if err != nil {
_ = tx.Rollback()
return fmt.Errorf("failed to start transaction: %w", err)
}
for _, f := range files {
if _, ok := m.state.FolderIDs[int(f.FolderID)]; !ok {
m.l.Warning("Folder ID %d for file %d not found, skipping", f.FolderID, f.ID)
continue
}
if _, ok := m.state.UserIDs[int(f.UserID)]; !ok {
m.l.Warning("User ID %d for file %d not found, skipping", f.UserID, f.ID)
continue
}
if _, ok := m.state.PolicyIDs[int(f.PolicyID)]; !ok {
m.l.Warning("Policy ID %d for file %d not found, skipping", f.PolicyID, f.ID)
continue
}
metadata := make(map[string]string)
if f.Metadata != "" {
json.Unmarshal([]byte(f.Metadata), &metadata)
}
var (
thumbnail *ent.Entity
entity *ent.Entity
err error
)
if metadata[model.ThumbStatusMetadataKey] == model.ThumbStatusExist {
size := int64(0)
if m.state.LocalPolicyIDs[int(f.PolicyID)] {
thumbFile, err := os.Stat(f.SourceName + m.state.ThumbSuffix)
if err == nil {
size = thumbFile.Size()
}
m.l.Warning("Thumbnail file %s for file %d not found, use 0 size", f.SourceName+m.state.ThumbSuffix, f.ID)
}
// Insert thumbnail entity
thumbnail, err = m.insertEntity(tx, f.SourceName+m.state.ThumbSuffix, int(types.EntityTypeThumbnail), int(f.PolicyID), int(f.UserID), size)
if err != nil {
_ = tx.Rollback()
return fmt.Errorf("failed to insert thumbnail entity: %w", err)
}
}
// Insert file version entity
entity, err = m.insertEntity(tx, f.SourceName, int(types.EntityTypeVersion), int(f.PolicyID), int(f.UserID), int64(f.Size))
if err != nil {
_ = tx.Rollback()
return fmt.Errorf("failed to insert file version entity: %w", err)
}
fname := f.Name
if _, ok := m.state.FileConflictRename[f.ID]; ok {
fname = m.state.FileConflictRename[f.ID]
}
stm := tx.File.Create().
SetCreatedAt(formatTime(f.CreatedAt)).
SetUpdatedAt(formatTime(f.UpdatedAt)).
SetName(fname).
SetRawID(int(f.ID) + m.state.LastFolderID).
SetOwnerID(int(f.UserID)).
SetSize(int64(f.Size)).
SetPrimaryEntity(entity.ID).
SetFileChildren(int(f.FolderID)).
SetType(int(types.FileTypeFile)).
SetStoragePoliciesID(int(f.PolicyID)).
AddEntities(entity)
if thumbnail != nil {
stm.AddEntities(thumbnail)
}
if _, err := stm.Save(ctx); err != nil {
_ = tx.Rollback()
if ent.IsConstraintError(err) {
if _, ok := m.state.FileConflictRename[f.ID]; ok {
return fmt.Errorf("file %d already exists, but new name is already in conflict rename map, please resolve this manually", f.ID)
}
m.l.Warning("File %d already exists, will retry with new name in next batch", f.ID)
m.state.FileConflictRename[f.ID] = fmt.Sprintf("%d_%s", f.ID, f.Name)
continue out
}
return fmt.Errorf("failed to create file %d: %w", f.ID, err)
}
}
if err := tx.Commit(); err != nil {
return fmt.Errorf("failed to commit transaction: %w", err)
}
offset += batchSize
m.state.FileOffset = offset
if err := m.saveState(); err != nil {
m.l.Warning("Failed to save state after file batch: %s", err)
} else {
m.l.Info("Saved migration state after processing this batch")
}
}
return nil
}
func (m *Migrator) insertEntity(tx *ent.Tx, source string, entityType, policyID, createdBy int, size int64) (*ent.Entity, error) {
// find existing one
entityKey := strconv.Itoa(policyID) + "+" + source
if existingId, ok := m.state.EntitySources[entityKey]; ok {
existing, err := tx.Entity.UpdateOneID(existingId).
AddReferenceCount(1).
Save(context.Background())
if err == nil {
return existing, nil
}
m.l.Warning("Failed to update existing entity %d: %s, fallback to create new one.", existingId, err)
}
// create new one
e, err := tx.Entity.Create().
SetSource(source).
SetType(entityType).
SetSize(size).
SetStoragePolicyEntities(policyID).
SetCreatedBy(createdBy).
SetReferenceCount(1).
Save(context.Background())
if err != nil {
return nil, fmt.Errorf("failed to create new entity: %w", err)
}
m.state.EntitySources[entityKey] = e.ID
return e, nil
}

@ -0,0 +1,147 @@
package migrator
import (
"context"
"fmt"
"github.com/cloudreve/Cloudreve/v4/application/migrator/model"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
)
func (m *Migrator) migrateFolders() error {
m.l.Info("Migrating folders...")
batchSize := 1000
// Start from the saved offset if available
offset := m.state.FolderOffset
ctx := context.Background()
foldersCount := 0
if m.state.FolderIDs == nil {
m.state.FolderIDs = make(map[int]bool)
}
if offset > 0 {
m.l.Info("Resuming folder migration from offset %d", offset)
}
for {
m.l.Info("Migrating folders with offset %d", offset)
var folders []model.Folder
if err := model.DB.Limit(batchSize).Offset(offset).Find(&folders).Error; err != nil {
return fmt.Errorf("failed to list v3 folders: %w", err)
}
if len(folders) == 0 {
break
}
tx, err := m.v4client.Tx(ctx)
if err != nil {
_ = tx.Rollback()
return fmt.Errorf("failed to start transaction: %w", err)
}
batchFoldersCount := 0
for _, f := range folders {
if _, ok := m.state.UserIDs[int(f.OwnerID)]; !ok {
m.l.Warning("Owner ID %d not found, skipping folder %d", f.OwnerID, f.ID)
continue
}
isRoot := f.ParentID == nil
if isRoot {
f.Name = ""
} else if *f.ParentID == 0 {
m.l.Warning("Parent ID %d not found, skipping folder %d", *f.ParentID, f.ID)
continue
}
stm := tx.File.Create().
SetRawID(int(f.ID)).
SetType(int(types.FileTypeFolder)).
SetCreatedAt(formatTime(f.CreatedAt)).
SetUpdatedAt(formatTime(f.UpdatedAt)).
SetName(f.Name).
SetOwnerID(int(f.OwnerID))
if _, err := stm.Save(ctx); err != nil {
_ = tx.Rollback()
return fmt.Errorf("failed to create folder %d: %w", f.ID, err)
}
m.state.FolderIDs[int(f.ID)] = true
m.state.LastFolderID = int(f.ID)
foldersCount++
batchFoldersCount++
}
if err := tx.Commit(); err != nil {
return fmt.Errorf("failed to commit transaction: %w", err)
}
// Update the offset in state and save after each batch
offset += batchSize
m.state.FolderOffset = offset
if err := m.saveState(); err != nil {
m.l.Warning("Failed to save state after folder batch: %s", err)
} else {
m.l.Info("Saved migration state after processing %d folders in this batch", batchFoldersCount)
}
}
m.l.Info("Successfully migrated %d folders", foldersCount)
return nil
}
func (m *Migrator) migrateFolderParent() error {
m.l.Info("Migrating folder parent...")
batchSize := 1000
offset := m.state.FolderParentOffset
ctx := context.Background()
for {
m.l.Info("Migrating folder parent with offset %d", offset)
var folderParents []model.Folder
if err := model.DB.Limit(batchSize).Offset(offset).Find(&folderParents).Error; err != nil {
return fmt.Errorf("failed to list v3 folder parents: %w", err)
}
if len(folderParents) == 0 {
break
}
tx, err := m.v4client.Tx(ctx)
if err != nil {
_ = tx.Rollback()
return fmt.Errorf("failed to start transaction: %w", err)
}
for _, f := range folderParents {
if f.ParentID != nil {
if _, ok := m.state.FolderIDs[int(*f.ParentID)]; !ok {
m.l.Warning("Folder ID %d not found, skipping folder parent %d", f.ID, f.ID)
continue
}
if _, err := tx.File.UpdateOneID(int(f.ID)).SetParentID(int(*f.ParentID)).Save(ctx); err != nil {
_ = tx.Rollback()
return fmt.Errorf("failed to update folder parent %d: %w", f.ID, err)
}
}
}
if err := tx.Commit(); err != nil {
return fmt.Errorf("failed to commit transaction: %w", err)
}
// Update the offset in state and save after each batch
offset += batchSize
m.state.FolderParentOffset = offset
if err := m.saveState(); err != nil {
m.l.Warning("Failed to save state after folder parent batch: %s", err)
}
}
return nil
}

@ -0,0 +1,92 @@
package migrator
import (
"context"
"encoding/json"
"fmt"
"github.com/cloudreve/Cloudreve/v4/application/migrator/model"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
"github.com/samber/lo"
)
func (m *Migrator) migrateGroup() error {
m.l.Info("Migrating groups...")
var groups []model.Group
if err := model.DB.Find(&groups).Error; err != nil {
return fmt.Errorf("failed to list v3 groups: %w", err)
}
for _, group := range groups {
cap := &boolset.BooleanSet{}
var (
opts model.GroupOption
policies []int
)
if err := json.Unmarshal([]byte(group.Options), &opts); err != nil {
return fmt.Errorf("failed to unmarshal options for group %q: %w", group.Name, err)
}
if err := json.Unmarshal([]byte(group.Policies), &policies); err != nil {
return fmt.Errorf("failed to unmarshal policies for group %q: %w", group.Name, err)
}
policies = lo.Filter(policies, func(id int, _ int) bool {
_, exist := m.state.PolicyIDs[id]
return exist
})
newOpts := &types.GroupSetting{
CompressSize: int64(opts.CompressSize),
DecompressSize: int64(opts.DecompressSize),
RemoteDownloadOptions: opts.Aria2Options,
SourceBatchSize: opts.SourceBatchSize,
RedirectedSource: opts.RedirectedSource,
Aria2BatchSize: opts.Aria2BatchSize,
MaxWalkedFiles: 100000,
TrashRetention: 7 * 24 * 3600,
}
boolset.Sets(map[types.GroupPermission]bool{
types.GroupPermissionIsAdmin: group.ID == 1,
types.GroupPermissionIsAnonymous: group.ID == 3,
types.GroupPermissionShareDownload: opts.ShareDownload,
types.GroupPermissionWebDAV: group.WebDAVEnabled,
types.GroupPermissionArchiveDownload: opts.ArchiveDownload,
types.GroupPermissionArchiveTask: opts.ArchiveTask,
types.GroupPermissionWebDAVProxy: opts.WebDAVProxy,
types.GroupPermissionRemoteDownload: opts.Aria2,
types.GroupPermissionAdvanceDelete: opts.AdvanceDelete,
types.GroupPermissionShare: group.ShareEnabled,
types.GroupPermissionRedirectedSource: opts.RedirectedSource,
}, cap)
stm := m.v4client.Group.Create().
SetRawID(int(group.ID)).
SetCreatedAt(formatTime(group.CreatedAt)).
SetUpdatedAt(formatTime(group.UpdatedAt)).
SetName(group.Name).
SetMaxStorage(int64(group.MaxStorage)).
SetSpeedLimit(group.SpeedLimit).
SetPermissions(cap).
SetSettings(newOpts)
if len(policies) > 0 {
stm.SetStoragePoliciesID(policies[0])
}
if _, err := stm.Save(context.Background()); err != nil {
return fmt.Errorf("failed to create group %q: %w", group.Name, err)
}
}
if m.dep.ConfigProvider().Database().Type == conf.PostgresDB {
m.l.Info("Resetting group ID sequence for postgres...")
m.v4client.Group.ExecContext(context.Background(), "SELECT SETVAL('groups_id_seq', (SELECT MAX(id) FROM groups))")
}
return nil
}

@ -0,0 +1,314 @@
package migrator
import (
"context"
"encoding/json"
"fmt"
"os"
"path/filepath"
"time"
"github.com/cloudreve/Cloudreve/v4/application/dependency"
"github.com/cloudreve/Cloudreve/v4/application/migrator/conf"
"github.com/cloudreve/Cloudreve/v4/application/migrator/model"
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/inventory"
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
"github.com/cloudreve/Cloudreve/v4/pkg/util"
)
// State stores the migration progress
type State struct {
PolicyIDs map[int]bool `json:"policy_ids,omitempty"`
LocalPolicyIDs map[int]bool `json:"local_policy_ids,omitempty"`
UserIDs map[int]bool `json:"user_ids,omitempty"`
FolderIDs map[int]bool `json:"folder_ids,omitempty"`
EntitySources map[string]int `json:"entity_sources,omitempty"`
LastFolderID int `json:"last_folder_id,omitempty"`
Step int `json:"step,omitempty"`
UserOffset int `json:"user_offset,omitempty"`
FolderOffset int `json:"folder_offset,omitempty"`
FileOffset int `json:"file_offset,omitempty"`
ShareOffset int `json:"share_offset,omitempty"`
GiftCodeOffset int `json:"gift_code_offset,omitempty"`
DirectLinkOffset int `json:"direct_link_offset,omitempty"`
WebdavOffset int `json:"webdav_offset,omitempty"`
StoragePackOffset int `json:"storage_pack_offset,omitempty"`
FileConflictRename map[uint]string `json:"file_conflict_rename,omitempty"`
FolderParentOffset int `json:"folder_parent_offset,omitempty"`
ThumbSuffix string `json:"thumb_suffix,omitempty"`
V3AvatarPath string `json:"v3_avatar_path,omitempty"`
}
// Step identifiers for migration phases
const (
StepInitial = 0
StepSchema = 1
StepSettings = 2
StepNode = 3
StepPolicy = 4
StepGroup = 5
StepUser = 6
StepFolders = 7
StepFolderParent = 8
StepFile = 9
StepShare = 10
StepDirectLink = 11
Step_CommunityPlaceholder1 = 12
Step_CommunityPlaceholder2 = 13
StepAvatar = 14
StepWebdav = 15
StepCompleted = 16
StateFileName = "migration_state.json"
)
type Migrator struct {
dep dependency.Dep
l logging.Logger
v4client *ent.Client
state *State
statePath string
}
func NewMigrator(dep dependency.Dep, v3ConfPath string) (*Migrator, error) {
m := &Migrator{
dep: dep,
l: dep.Logger(),
state: &State{
PolicyIDs: make(map[int]bool),
UserIDs: make(map[int]bool),
Step: StepInitial,
UserOffset: 0,
FolderOffset: 0,
},
}
// Determine state file path
configDir := filepath.Dir(v3ConfPath)
m.statePath = filepath.Join(configDir, StateFileName)
// Try to load existing state
if util.Exists(m.statePath) {
m.l.Info("Found existing migration state file, loading from %s", m.statePath)
if err := m.loadState(); err != nil {
return nil, fmt.Errorf("failed to load migration state: %w", err)
}
stepName := "unknown"
switch m.state.Step {
case StepInitial:
stepName = "initial"
case StepSchema:
stepName = "schema creation"
case StepSettings:
stepName = "settings migration"
case StepNode:
stepName = "node migration"
case StepPolicy:
stepName = "policy migration"
case StepGroup:
stepName = "group migration"
case StepUser:
stepName = "user migration"
case StepFolders:
stepName = "folders migration"
case StepCompleted:
stepName = "completed"
case StepWebdav:
stepName = "webdav migration"
case StepAvatar:
stepName = "avatar migration"
}
m.l.Info("Resumed migration from step %d (%s)", m.state.Step, stepName)
// Log batch information if applicable
if m.state.Step == StepUser && m.state.UserOffset > 0 {
m.l.Info("Will resume user migration from batch offset %d", m.state.UserOffset)
}
if m.state.Step == StepFolders && m.state.FolderOffset > 0 {
m.l.Info("Will resume folder migration from batch offset %d", m.state.FolderOffset)
}
}
err := conf.Init(m.dep.Logger(), v3ConfPath)
if err != nil {
return nil, err
}
err = model.Init()
if err != nil {
return nil, err
}
v4client, err := inventory.NewRawEntClient(m.l, m.dep.ConfigProvider())
if err != nil {
return nil, err
}
m.v4client = v4client
return m, nil
}
// saveState persists migration state to file
func (m *Migrator) saveState() error {
data, err := json.Marshal(m.state)
if err != nil {
return fmt.Errorf("failed to marshal state: %w", err)
}
return os.WriteFile(m.statePath, data, 0644)
}
// loadState reads migration state from file
func (m *Migrator) loadState() error {
data, err := os.ReadFile(m.statePath)
if err != nil {
return fmt.Errorf("failed to read state file: %w", err)
}
return json.Unmarshal(data, m.state)
}
// updateStep updates current step and persists state
func (m *Migrator) updateStep(step int) error {
m.state.Step = step
return m.saveState()
}
func (m *Migrator) Migrate() error {
// Continue from the current step
if m.state.Step <= StepSchema {
m.l.Info("Creating basic v4 table schema...")
if err := m.v4client.Schema.Create(context.Background()); err != nil {
return fmt.Errorf("failed creating schema resources: %w", err)
}
if err := m.updateStep(StepSettings); err != nil {
return fmt.Errorf("failed to update step: %w", err)
}
}
if m.state.Step <= StepSettings {
if err := m.migrateSettings(); err != nil {
return err
}
if err := m.updateStep(StepNode); err != nil {
return fmt.Errorf("failed to update step: %w", err)
}
}
if m.state.Step <= StepNode {
if err := m.migrateNode(); err != nil {
return err
}
if err := m.updateStep(StepPolicy); err != nil {
return fmt.Errorf("failed to update step: %w", err)
}
}
if m.state.Step <= StepPolicy {
allPolicyIDs, err := m.migratePolicy()
if err != nil {
return err
}
m.state.PolicyIDs = allPolicyIDs
if err := m.updateStep(StepGroup); err != nil {
return fmt.Errorf("failed to update step: %w", err)
}
}
if m.state.Step <= StepGroup {
if err := m.migrateGroup(); err != nil {
return err
}
if err := m.updateStep(StepUser); err != nil {
return fmt.Errorf("failed to update step: %w", err)
}
}
if m.state.Step <= StepUser {
if err := m.migrateUser(); err != nil {
m.saveState()
return err
}
// Reset user offset after completion
m.state.UserOffset = 0
if err := m.updateStep(StepFolders); err != nil {
return fmt.Errorf("failed to update step: %w", err)
}
}
if m.state.Step <= StepFolders {
if err := m.migrateFolders(); err != nil {
m.saveState()
return err
}
// Reset folder offset after completion
m.state.FolderOffset = 0
if err := m.updateStep(StepFolderParent); err != nil {
return fmt.Errorf("failed to update step: %w", err)
}
}
if m.state.Step <= StepFolderParent {
if err := m.migrateFolderParent(); err != nil {
return err
}
if err := m.updateStep(StepFile); err != nil {
return fmt.Errorf("failed to update step: %w", err)
}
}
if m.state.Step <= StepFile {
if err := m.migrateFile(); err != nil {
return err
}
if err := m.updateStep(StepShare); err != nil {
return fmt.Errorf("failed to update step: %w", err)
}
}
if m.state.Step <= StepShare {
if err := m.migrateShare(); err != nil {
return err
}
if err := m.updateStep(StepDirectLink); err != nil {
return fmt.Errorf("failed to update step: %w", err)
}
}
if m.state.Step <= StepDirectLink {
if err := m.migrateDirectLink(); err != nil {
return err
}
if err := m.updateStep(StepAvatar); err != nil {
return fmt.Errorf("failed to update step: %w", err)
}
}
if m.state.Step <= StepAvatar {
if err := migrateAvatars(m); err != nil {
return err
}
if err := m.updateStep(StepWebdav); err != nil {
return fmt.Errorf("failed to update step: %w", err)
}
}
if m.state.Step <= StepWebdav {
if err := m.migrateWebdav(); err != nil {
return err
}
if err := m.updateStep(StepCompleted); err != nil {
return fmt.Errorf("failed to update step: %w", err)
}
}
m.l.Info("Migration completed successfully")
return nil
}
func formatTime(t time.Time) time.Time {
newTime := time.UnixMilli(t.UnixMilli())
return newTime
}

@ -0,0 +1,39 @@
package model
import (
"github.com/jinzhu/gorm"
)
// File 文件
type File struct {
// 表字段
gorm.Model
Name string `gorm:"unique_index:idx_only_one"`
SourceName string `gorm:"type:text"`
UserID uint `gorm:"index:user_id;unique_index:idx_only_one"`
Size uint64
PicInfo string
FolderID uint `gorm:"index:folder_id;unique_index:idx_only_one"`
PolicyID uint
UploadSessionID *string `gorm:"index:session_id;unique_index:session_only_one"`
Metadata string `gorm:"type:text"`
// 关联模型
Policy Policy `gorm:"PRELOAD:false,association_autoupdate:false"`
// 数据库忽略字段
Position string `gorm:"-"`
MetadataSerialized map[string]string `gorm:"-"`
}
// Thumb related metadata
const (
ThumbStatusNotExist = ""
ThumbStatusExist = "exist"
ThumbStatusNotAvailable = "not_available"
ThumbStatusMetadataKey = "thumb_status"
ThumbSidecarMetadataKey = "thumb_sidecar"
ChecksumMetadataKey = "webdav_checksum"
)

@ -0,0 +1,18 @@
package model
import (
"github.com/jinzhu/gorm"
)
// Folder 目录
type Folder struct {
// 表字段
gorm.Model
Name string `gorm:"unique_index:idx_only_one_name"`
ParentID *uint `gorm:"index:parent_id;unique_index:idx_only_one_name"`
OwnerID uint `gorm:"index:owner_id"`
// 数据库忽略字段
Position string `gorm:"-"`
WebdavDstName string `gorm:"-"`
}

@ -1,7 +1,6 @@
package model
import (
"encoding/json"
"github.com/jinzhu/gorm"
)
@ -37,48 +36,3 @@ type GroupOption struct {
AdvanceDelete bool `json:"advance_delete,omitempty"`
WebDAVProxy bool `json:"webdav_proxy,omitempty"`
}
// GetGroupByID 用ID获取用户组
func GetGroupByID(ID interface{}) (Group, error) {
var group Group
result := DB.First(&group, ID)
return group, result.Error
}
// AfterFind 找到用户组后的钩子处理Policy列表
func (group *Group) AfterFind() (err error) {
// 解析用户组策略列表
if group.Policies != "" {
err = json.Unmarshal([]byte(group.Policies), &group.PolicyList)
}
if err != nil {
return err
}
// 解析用户组设置
if group.Options != "" {
err = json.Unmarshal([]byte(group.Options), &group.OptionsSerialized)
}
return err
}
// BeforeSave Save用户前的钩子
func (group *Group) BeforeSave() (err error) {
err = group.SerializePolicyList()
return err
}
// SerializePolicyList 将序列后的可选策略列表、配置写入数据库字段
// TODO 完善测试
func (group *Group) SerializePolicyList() (err error) {
policies, err := json.Marshal(&group.PolicyList)
group.Policies = string(policies)
if err != nil {
return err
}
optionsValue, err := json.Marshal(&group.OptionsSerialized)
group.Options = string(optionsValue)
return err
}

@ -0,0 +1,91 @@
package model
import (
"fmt"
"time"
"github.com/jinzhu/gorm"
"github.com/cloudreve/Cloudreve/v4/application/migrator/conf"
"github.com/cloudreve/Cloudreve/v4/pkg/util"
_ "github.com/jinzhu/gorm/dialects/mssql"
_ "github.com/jinzhu/gorm/dialects/mysql"
_ "github.com/jinzhu/gorm/dialects/postgres"
)
// DB 数据库链接单例
var DB *gorm.DB
// Init 初始化 MySQL 链接
func Init() error {
var (
db *gorm.DB
err error
confDBType string = conf.DatabaseConfig.Type
)
// 兼容已有配置中的 "sqlite3" 配置项
if confDBType == "sqlite3" {
confDBType = "sqlite"
}
switch confDBType {
case "UNSET", "sqlite":
// 未指定数据库或者明确指定为 sqlite 时,使用 SQLite 数据库
db, err = gorm.Open("sqlite3", util.RelativePath(conf.DatabaseConfig.DBFile))
case "postgres":
db, err = gorm.Open(confDBType, fmt.Sprintf("host=%s user=%s password=%s dbname=%s port=%d sslmode=disable",
conf.DatabaseConfig.Host,
conf.DatabaseConfig.User,
conf.DatabaseConfig.Password,
conf.DatabaseConfig.Name,
conf.DatabaseConfig.Port))
case "mysql", "mssql":
var host string
if conf.DatabaseConfig.UnixSocket {
host = fmt.Sprintf("unix(%s)",
conf.DatabaseConfig.Host)
} else {
host = fmt.Sprintf("(%s:%d)",
conf.DatabaseConfig.Host,
conf.DatabaseConfig.Port)
}
db, err = gorm.Open(confDBType, fmt.Sprintf("%s:%s@%s/%s?charset=%s&parseTime=True&loc=Local",
conf.DatabaseConfig.User,
conf.DatabaseConfig.Password,
host,
conf.DatabaseConfig.Name,
conf.DatabaseConfig.Charset))
default:
return fmt.Errorf("unsupported database type %q", confDBType)
}
//db.SetLogger(util.Log())
if err != nil {
return fmt.Errorf("failed to connect to database: %w", err)
}
// 处理表前缀
gorm.DefaultTableNameHandler = func(db *gorm.DB, defaultTableName string) string {
return conf.DatabaseConfig.TablePrefix + defaultTableName
}
// Debug模式下输出所有 SQL 日志
db.LogMode(true)
//设置连接池
db.DB().SetMaxIdleConns(50)
if confDBType == "sqlite" || confDBType == "UNSET" {
db.DB().SetMaxOpenConns(1)
} else {
db.DB().SetMaxOpenConns(100)
}
//超时
db.DB().SetConnMaxLifetime(time.Second * 30)
DB = db
return nil
}

@ -1,7 +1,6 @@
package model
import (
"encoding/json"
"github.com/jinzhu/gorm"
)
@ -50,42 +49,3 @@ const (
SlaveNodeType ModelType = iota
MasterNodeType
)
// GetNodeByID 用ID获取节点
func GetNodeByID(ID interface{}) (Node, error) {
var node Node
result := DB.First(&node, ID)
return node, result.Error
}
// GetNodesByStatus 根据给定状态获取节点
func GetNodesByStatus(status ...NodeStatus) ([]Node, error) {
var nodes []Node
result := DB.Where("status in (?)", status).Find(&nodes)
return nodes, result.Error
}
// AfterFind 找到节点后的钩子
func (node *Node) AfterFind() (err error) {
// 解析离线下载设置到 Aria2OptionsSerialized
if node.Aria2Options != "" {
err = json.Unmarshal([]byte(node.Aria2Options), &node.Aria2OptionsSerialized)
}
return err
}
// BeforeSave Save策略前的钩子
func (node *Node) BeforeSave() (err error) {
optionsValue, err := json.Marshal(&node.Aria2OptionsSerialized)
node.Aria2Options = string(optionsValue)
return err
}
// SetStatus 设置节点启用状态
func (node *Node) SetStatus(status NodeStatus) error {
node.Status = status
return DB.Model(node).Updates(map[string]interface{}{
"status": status,
}).Error
}

@ -0,0 +1,62 @@
package model
import (
"github.com/jinzhu/gorm"
)
// Policy 存储策略
type Policy struct {
// 表字段
gorm.Model
Name string
Type string
Server string
BucketName string
IsPrivate bool
BaseURL string
AccessKey string `gorm:"type:text"`
SecretKey string `gorm:"type:text"`
MaxSize uint64
AutoRename bool
DirNameRule string
FileNameRule string
IsOriginLinkEnable bool
Options string `gorm:"type:text"`
// 数据库忽略字段
OptionsSerialized PolicyOption `gorm:"-"`
MasterID string `gorm:"-"`
}
// PolicyOption 非公有的存储策略属性
type PolicyOption struct {
// Upyun访问Token
Token string `json:"token"`
// 允许的文件扩展名
FileType []string `json:"file_type"`
// MimeType
MimeType string `json:"mimetype"`
// OauthRedirect Oauth 重定向地址
OauthRedirect string `json:"od_redirect,omitempty"`
// OdProxy Onedrive 反代地址
OdProxy string `json:"od_proxy,omitempty"`
// OdDriver OneDrive 驱动器定位符
OdDriver string `json:"od_driver,omitempty"`
// Region 区域代码
Region string `json:"region,omitempty"`
// ServerSideEndpoint 服务端请求使用的 Endpoint为空时使用 Policy.Server 字段
ServerSideEndpoint string `json:"server_side_endpoint,omitempty"`
// 分片上传的分片大小
ChunkSize uint64 `json:"chunk_size,omitempty"`
// 分片上传时是否需要预留空间
PlaceholderWithSize bool `json:"placeholder_with_size,omitempty"`
// 每秒对存储端的 API 请求上限
TPSLimit float64 `json:"tps_limit,omitempty"`
// 每秒 API 请求爆发上限
TPSLimitBurst int `json:"tps_limit_burst,omitempty"`
// Set this to `true` to force the request to use path-style addressing,
// i.e., `http://s3.amazonaws.com/BUCKET/KEY `
S3ForcePathStyle bool `json:"s3_path_style"`
// File extensions that support thumbnail generation using native policy API.
ThumbExts []string `json:"thumb_exts,omitempty"`
}

@ -0,0 +1,13 @@
package model
import (
"github.com/jinzhu/gorm"
)
// Setting 系统设置模型
type Setting struct {
gorm.Model
Type string `gorm:"not null"`
Name string `gorm:"unique;not null;index:setting_key"`
Value string `gorm:"size:65535"`
}

@ -0,0 +1,27 @@
package model
import (
"time"
"github.com/jinzhu/gorm"
)
// Share 分享模型
type Share struct {
gorm.Model
Password string // 分享密码,空值为非加密分享
IsDir bool // 原始资源是否为目录
UserID uint // 创建用户ID
SourceID uint // 原始资源ID
Views int // 浏览数
Downloads int // 下载数
RemainDownloads int // 剩余下载配额,负值标识无限制
Expires *time.Time // 过期时间,空值表示无过期时间
PreviewEnabled bool // 是否允许直接预览
SourceName string `gorm:"index:source"` // 用于搜索的字段
// 数据库忽略字段
User User `gorm:"PRELOAD:false,association_autoupdate:false"`
File File `gorm:"PRELOAD:false,association_autoupdate:false"`
Folder Folder `gorm:"PRELOAD:false,association_autoupdate:false"`
}

@ -0,0 +1,16 @@
package model
import (
"github.com/jinzhu/gorm"
)
// SourceLink represent a shared file source link
type SourceLink struct {
gorm.Model
FileID uint // corresponding file ID
Name string // name of the file while creating the source link, for annotation
Downloads int // 下载数
// 关联模型
File File `gorm:"save_associations:false:false"`
}

@ -0,0 +1,23 @@
package model
import (
"github.com/jinzhu/gorm"
)
// Tag 用户自定义标签
type Tag struct {
gorm.Model
Name string // 标签名
Icon string // 图标标识
Color string // 图标颜色
Type int // 标签类型(文件分类/目录直达)
Expression string `gorm:"type:text"` // 搜索表表达式/直达路径
UserID uint // 创建者ID
}
const (
// FileTagType 文件分类标签
FileTagType = iota
// DirectoryLinkType 目录快捷方式标签
DirectoryLinkType
)

@ -0,0 +1,16 @@
package model
import (
"github.com/jinzhu/gorm"
)
// Task 任务模型
type Task struct {
gorm.Model
Status int // 任务状态
Type int // 任务类型
UserID uint // 发起者UID0表示为系统发起
Progress int // 进度
Error string `gorm:"type:text"` // 错误信息
Props string `gorm:"type:text"` // 任务属性
}

@ -0,0 +1,45 @@
package model
import (
"github.com/jinzhu/gorm"
)
const (
// Active 账户正常状态
Active = iota
// NotActivicated 未激活
NotActivicated
// Baned 被封禁
Baned
// OveruseBaned 超额使用被封禁
OveruseBaned
)
// User 用户模型
type User struct {
// 表字段
gorm.Model
Email string `gorm:"type:varchar(100);unique_index"`
Nick string `gorm:"size:50"`
Password string `json:"-"`
Status int
GroupID uint
Storage uint64
TwoFactor string
Avatar string
Options string `json:"-" gorm:"size:4294967295"`
Authn string `gorm:"size:4294967295"`
// 关联模型
Group Group `gorm:"save_associations:false:false"`
Policy Policy `gorm:"PRELOAD:false,association_autoupdate:false"`
// 数据库忽略字段
OptionsSerialized UserOption `gorm:"-"`
}
// UserOption 用户个性化配置字段
type UserOption struct {
ProfileOff bool `json:"profile_off,omitempty"`
PreferredTheme string `json:"preferred_theme,omitempty"`
}

@ -0,0 +1,16 @@
package model
import (
"github.com/jinzhu/gorm"
)
// Webdav 应用账户
type Webdav struct {
gorm.Model
Name string // 应用名称
Password string `gorm:"unique_index:password_only_on"` // 应用密码
UserID uint `gorm:"unique_index:password_only_on"` // 用户ID
Root string `gorm:"type:text"` // 根目录
Readonly bool `gorm:"type:bool"` // 是否只读
UseProxy bool `gorm:"type:bool"` // 是否进行反代
}

@ -0,0 +1,89 @@
package migrator
import (
"context"
"encoding/json"
"fmt"
"github.com/cloudreve/Cloudreve/v4/application/migrator/model"
"github.com/cloudreve/Cloudreve/v4/ent/node"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
)
func (m *Migrator) migrateNode() error {
m.l.Info("Migrating nodes...")
var nodes []model.Node
if err := model.DB.Find(&nodes).Error; err != nil {
return fmt.Errorf("failed to list v3 nodes: %w", err)
}
for _, n := range nodes {
nodeType := node.TypeSlave
nodeStatus := node.StatusSuspended
if n.Type == model.MasterNodeType {
nodeType = node.TypeMaster
}
if n.Status == model.NodeActive {
nodeStatus = node.StatusActive
}
cap := &boolset.BooleanSet{}
settings := &types.NodeSetting{
Provider: types.DownloaderProviderAria2,
}
if n.Aria2Enabled {
boolset.Sets(map[types.NodeCapability]bool{
types.NodeCapabilityRemoteDownload: true,
}, cap)
aria2Options := &model.Aria2Option{}
if err := json.Unmarshal([]byte(n.Aria2Options), aria2Options); err != nil {
return fmt.Errorf("failed to unmarshal aria2 options: %w", err)
}
downloaderOptions := map[string]any{}
if aria2Options.Options != "" {
if err := json.Unmarshal([]byte(aria2Options.Options), &downloaderOptions); err != nil {
return fmt.Errorf("failed to unmarshal aria2 options: %w", err)
}
}
settings.Aria2Setting = &types.Aria2Setting{
Server: aria2Options.Server,
Token: aria2Options.Token,
Options: downloaderOptions,
TempPath: aria2Options.TempPath,
}
}
if n.Type == model.MasterNodeType {
boolset.Sets(map[types.NodeCapability]bool{
types.NodeCapabilityExtractArchive: true,
types.NodeCapabilityCreateArchive: true,
}, cap)
}
stm := m.v4client.Node.Create().
SetRawID(int(n.ID)).
SetCreatedAt(formatTime(n.CreatedAt)).
SetUpdatedAt(formatTime(n.UpdatedAt)).
SetName(n.Name).
SetType(nodeType).
SetStatus(nodeStatus).
SetServer(n.Server).
SetSlaveKey(n.SlaveKey).
SetCapabilities(cap).
SetSettings(settings).
SetWeight(n.Rank)
if err := stm.Exec(context.Background()); err != nil {
return fmt.Errorf("failed to create node %q: %w", n.Name, err)
}
}
return nil
}

@ -0,0 +1,192 @@
package migrator
import (
"context"
"encoding/json"
"fmt"
"strings"
"github.com/cloudreve/Cloudreve/v4/application/migrator/model"
"github.com/cloudreve/Cloudreve/v4/ent/node"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
"github.com/samber/lo"
)
func (m *Migrator) migratePolicy() (map[int]bool, error) {
m.l.Info("Migrating storage policies...")
var policies []model.Policy
if err := model.DB.Find(&policies).Error; err != nil {
return nil, fmt.Errorf("failed to list v3 storage policies: %w", err)
}
if m.state.LocalPolicyIDs == nil {
m.state.LocalPolicyIDs = make(map[int]bool)
}
if m.state.PolicyIDs == nil {
m.state.PolicyIDs = make(map[int]bool)
}
m.l.Info("Found %d v3 storage policies to be migrated.", len(policies))
// get thumb proxy settings
var (
thumbProxySettings []model.Setting
thumbProxyEnabled bool
thumbProxyPolicy []int
)
if err := model.DB.Where("name in (?)", []string{"thumb_proxy_enabled", "thumb_proxy_policy"}).Find(&thumbProxySettings).Error; err != nil {
m.l.Warning("Failed to list v3 thumb proxy settings: %w", err)
}
tx, err := m.v4client.Tx(context.Background())
if err != nil {
return nil, fmt.Errorf("failed to start transaction: %w", err)
}
for _, s := range thumbProxySettings {
if s.Name == "thumb_proxy_enabled" {
thumbProxyEnabled = setting.IsTrueValue(s.Value)
} else if s.Name == "thumb_proxy_policy" {
if err := json.Unmarshal([]byte(s.Value), &thumbProxyPolicy); err != nil {
m.l.Warning("Failed to unmarshal v3 thumb proxy policy: %w", err)
}
}
}
for _, policy := range policies {
m.l.Info("Migrating storage policy %q...", policy.Name)
if err := json.Unmarshal([]byte(policy.Options), &policy.OptionsSerialized); err != nil {
return nil, fmt.Errorf("failed to unmarshal options for policy %q: %w", policy.Name, err)
}
settings := &types.PolicySetting{
Token: policy.OptionsSerialized.Token,
FileType: policy.OptionsSerialized.FileType,
OauthRedirect: policy.OptionsSerialized.OauthRedirect,
OdDriver: policy.OptionsSerialized.OdDriver,
Region: policy.OptionsSerialized.Region,
ServerSideEndpoint: policy.OptionsSerialized.ServerSideEndpoint,
ChunkSize: int64(policy.OptionsSerialized.ChunkSize),
TPSLimit: policy.OptionsSerialized.TPSLimit,
TPSLimitBurst: policy.OptionsSerialized.TPSLimitBurst,
S3ForcePathStyle: policy.OptionsSerialized.S3ForcePathStyle,
ThumbExts: policy.OptionsSerialized.ThumbExts,
}
if policy.Type == types.PolicyTypeOd {
settings.ThumbSupportAllExts = true
} else {
switch policy.Type {
case types.PolicyTypeCos:
settings.ThumbExts = []string{"png", "jpg", "jpeg", "gif", "bmp", "webp", "heif", "heic"}
case types.PolicyTypeOss:
settings.ThumbExts = []string{"png", "jpg", "jpeg", "gif", "bmp", "webp", "heic", "tiff", "avif"}
case types.PolicyTypeUpyun:
settings.ThumbExts = []string{"png", "jpg", "jpeg", "gif", "bmp", "webp", "svg"}
case types.PolicyTypeQiniu:
settings.ThumbExts = []string{"png", "jpg", "jpeg", "gif", "bmp", "webp", "tiff", "avif", "psd"}
case types.PolicyTypeRemote:
settings.ThumbExts = []string{"png", "jpg", "jpeg", "gif"}
}
}
if policy.Type != types.PolicyTypeOd && policy.BaseURL != "" {
settings.CustomProxy = true
settings.ProxyServer = policy.BaseURL
} else if policy.OptionsSerialized.OdProxy != "" {
settings.CustomProxy = true
settings.ProxyServer = policy.OptionsSerialized.OdProxy
}
if policy.DirNameRule == "" {
policy.DirNameRule = "uploads/{uid}/{path}"
}
if policy.Type == types.PolicyTypeCos {
settings.ChunkSize = 1024 * 1024 * 25
}
if thumbProxyEnabled && lo.Contains(thumbProxyPolicy, int(policy.ID)) {
settings.ThumbGeneratorProxy = true
}
mustContain := []string{"{randomkey16}", "{randomkey8}", "{uuid}"}
hasRandomElement := false
for _, c := range mustContain {
if strings.Contains(policy.FileNameRule, c) {
hasRandomElement = true
break
}
}
if !hasRandomElement {
policy.FileNameRule = "{uid}_{randomkey8}_{originname}"
m.l.Warning("Storage policy %q has no random element in file name rule, using default file name rule.", policy.Name)
}
stm := tx.StoragePolicy.Create().
SetRawID(int(policy.ID)).
SetCreatedAt(formatTime(policy.CreatedAt)).
SetUpdatedAt(formatTime(policy.UpdatedAt)).
SetName(policy.Name).
SetType(policy.Type).
SetServer(policy.Server).
SetBucketName(policy.BucketName).
SetIsPrivate(policy.IsPrivate).
SetAccessKey(policy.AccessKey).
SetSecretKey(policy.SecretKey).
SetMaxSize(int64(policy.MaxSize)).
SetDirNameRule(policy.DirNameRule).
SetFileNameRule(policy.FileNameRule).
SetSettings(settings)
if policy.Type == types.PolicyTypeRemote {
m.l.Info("Storage policy %q is remote, creating node for it...", policy.Name)
bs := &boolset.BooleanSet{}
n, err := tx.Node.Create().
SetName(policy.Name).
SetStatus(node.StatusActive).
SetServer(policy.Server).
SetSlaveKey(policy.SecretKey).
SetType(node.TypeSlave).
SetCapabilities(bs).
SetSettings(&types.NodeSetting{
Provider: types.DownloaderProviderAria2,
}).
Save(context.Background())
if err != nil {
return nil, fmt.Errorf("failed to create node for storage policy %q: %w", policy.Name, err)
}
stm.SetNodeID(n.ID)
}
if _, err := stm.Save(context.Background()); err != nil {
return nil, fmt.Errorf("failed to create storage policy %q: %w", policy.Name, err)
}
m.state.PolicyIDs[int(policy.ID)] = true
if policy.Type == types.PolicyTypeLocal {
m.state.LocalPolicyIDs[int(policy.ID)] = true
}
}
if err := tx.Commit(); err != nil {
return nil, fmt.Errorf("failed to commit transaction: %w", err)
}
if m.dep.ConfigProvider().Database().Type == conf.PostgresDB {
m.l.Info("Resetting storage policy ID sequence for postgres...")
m.v4client.StoragePolicy.ExecContext(context.Background(), "SELECT SETVAL('storage_policies_id_seq', (SELECT MAX(id) FROM storage_policies))")
}
if m.dep.ConfigProvider().Database().Type == conf.PostgresDB {
m.l.Info("Resetting node ID sequence for postgres...")
m.v4client.Node.ExecContext(context.Background(), "SELECT SETVAL('nodes_id_seq', (SELECT MAX(id) FROM nodes))")
}
return m.state.PolicyIDs, nil
}

@ -0,0 +1,213 @@
package migrator
import (
"context"
"fmt"
"github.com/cloudreve/Cloudreve/v4/application/migrator/conf"
"github.com/cloudreve/Cloudreve/v4/application/migrator/model"
)
// TODO:
// 1. Policy thumb proxy migration
type (
settignMigrator func(allSettings map[string]string, name, value string) ([]settingMigrated, error)
settingMigrated struct {
name string
value string
}
// PackProduct 容量包商品
PackProduct struct {
ID int64 `json:"id"`
Name string `json:"name"`
Size uint64 `json:"size"`
Time int64 `json:"time"`
Price int `json:"price"`
Score int `json:"score"`
}
GroupProducts struct {
ID int64 `json:"id"`
Name string `json:"name"`
GroupID uint `json:"group_id"`
Time int64 `json:"time"`
Price int `json:"price"`
Score int `json:"score"`
Des []string `json:"des"`
Highlight bool `json:"highlight"`
}
)
var noopMigrator = func(allSettings map[string]string, name, value string) ([]settingMigrated, error) {
return nil, nil
}
var migrators = map[string]settignMigrator{
"siteKeywords": noopMigrator,
"over_used_template": noopMigrator,
"download_timeout": noopMigrator,
"preview_timeout": noopMigrator,
"doc_preview_timeout": noopMigrator,
"slave_node_retry": noopMigrator,
"slave_ping_interval": noopMigrator,
"slave_recover_interval": noopMigrator,
"slave_transfer_timeout": noopMigrator,
"onedrive_monitor_timeout": noopMigrator,
"onedrive_source_timeout": noopMigrator,
"share_download_session_timeout": noopMigrator,
"onedrive_callback_check": noopMigrator,
"mail_activation_template": noopMigrator,
"mail_reset_pwd_template": noopMigrator,
"appid": noopMigrator,
"appkey": noopMigrator,
"wechat_enabled": noopMigrator,
"wechat_appid": noopMigrator,
"wechat_mchid": noopMigrator,
"wechat_serial_no": noopMigrator,
"wechat_api_key": noopMigrator,
"wechat_pk_content": noopMigrator,
"hot_share_num": noopMigrator,
"defaultTheme": noopMigrator,
"theme_options": noopMigrator,
"max_worker_num": noopMigrator,
"max_parallel_transfer": noopMigrator,
"secret_key": noopMigrator,
"avatar_size_m": noopMigrator,
"avatar_size_s": noopMigrator,
"home_view_method": noopMigrator,
"share_view_method": noopMigrator,
"cron_recycle_upload_session": noopMigrator,
"captcha_type": func(allSettings map[string]string, name, value string) ([]settingMigrated, error) {
if value == "tcaptcha" {
value = "normal"
}
return []settingMigrated{
{
name: "captcha_type",
value: value,
},
}, nil
},
"captcha_TCaptcha_CaptchaAppId": noopMigrator,
"captcha_TCaptcha_AppSecretKey": noopMigrator,
"captcha_TCaptcha_SecretId": noopMigrator,
"captcha_TCaptcha_SecretKey": noopMigrator,
"thumb_file_suffix": func(allSettings map[string]string, name, value string) ([]settingMigrated, error) {
return []settingMigrated{
{
name: "thumb_entity_suffix",
value: value,
},
}, nil
},
"thumb_max_src_size": func(allSettings map[string]string, name, value string) ([]settingMigrated, error) {
return []settingMigrated{
{
name: "thumb_music_cover_max_size",
value: value,
},
{
name: "thumb_libreoffice_max_size",
value: value,
},
{
name: "thumb_ffmpeg_max_size",
value: value,
},
{
name: "thumb_vips_max_size",
value: value,
},
{
name: "thumb_builtin_max_size",
value: value,
},
}, nil
},
"initial_files": noopMigrator,
"office_preview_service": noopMigrator,
"phone_required": noopMigrator,
"phone_enabled": noopMigrator,
"wopi_session_timeout": func(allSettings map[string]string, name, value string) ([]settingMigrated, error) {
return []settingMigrated{
{
name: "viewer_session_timeout",
value: value,
},
}, nil
},
"custom_payment_enabled": noopMigrator,
"custom_payment_endpoint": noopMigrator,
"custom_payment_secret": noopMigrator,
"custom_payment_name": noopMigrator,
}
func (m *Migrator) migrateSettings() error {
m.l.Info("Migrating settings...")
// 1. List all settings
var settings []model.Setting
if err := model.DB.Find(&settings).Error; err != nil {
return fmt.Errorf("failed to list v3 settings: %w", err)
}
m.l.Info("Found %d v3 setting pairs to be migrated.", len(settings))
allSettings := make(map[string]string)
for _, s := range settings {
allSettings[s.Name] = s.Value
}
migratedSettings := make([]settingMigrated, 0)
for _, s := range settings {
if s.Name == "thumb_file_suffix" {
m.state.ThumbSuffix = s.Value
}
if s.Name == "avatar_path" {
m.state.V3AvatarPath = s.Value
}
migrator, ok := migrators[s.Name]
if ok {
newSettings, err := migrator(allSettings, s.Name, s.Value)
if err != nil {
return fmt.Errorf("failed to migrate setting %q: %w", s.Name, err)
}
migratedSettings = append(migratedSettings, newSettings...)
} else {
migratedSettings = append(migratedSettings, settingMigrated{
name: s.Name,
value: s.Value,
})
}
}
tx, err := m.v4client.Tx(context.Background())
if err != nil {
return fmt.Errorf("failed to start transaction: %w", err)
}
// Insert hash_id_salt
if conf.SystemConfig.HashIDSalt != "" {
if err := tx.Setting.Create().SetName("hash_id_salt").SetValue(conf.SystemConfig.HashIDSalt).Exec(context.Background()); err != nil {
if err := tx.Rollback(); err != nil {
return fmt.Errorf("failed to rollback transaction: %w", err)
}
return fmt.Errorf("failed to create setting hash_id_salt: %w", err)
}
} else {
return fmt.Errorf("hash ID salt is not set, please set it from v3 conf file")
}
for _, s := range migratedSettings {
if err := tx.Setting.Create().SetName(s.name).SetValue(s.value).Exec(context.Background()); err != nil {
if err := tx.Rollback(); err != nil {
return fmt.Errorf("failed to rollback transaction: %w", err)
}
return fmt.Errorf("failed to create setting %q: %w", s.name, err)
}
}
if err := tx.Commit(); err != nil {
return fmt.Errorf("failed to commit transaction: %w", err)
}
return nil
}

@ -0,0 +1,102 @@
package migrator
import (
"context"
"fmt"
"github.com/cloudreve/Cloudreve/v4/application/migrator/model"
"github.com/cloudreve/Cloudreve/v4/ent/file"
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
)
func (m *Migrator) migrateShare() error {
m.l.Info("Migrating shares...")
batchSize := 1000
offset := m.state.ShareOffset
ctx := context.Background()
if offset > 0 {
m.l.Info("Resuming share migration from offset %d", offset)
}
for {
m.l.Info("Migrating shares with offset %d", offset)
var shares []model.Share
if err := model.DB.Limit(batchSize).Offset(offset).Find(&shares).Error; err != nil {
return fmt.Errorf("failed to list v3 shares: %w", err)
}
if len(shares) == 0 {
if m.dep.ConfigProvider().Database().Type == conf.PostgresDB {
m.l.Info("Resetting share ID sequence for postgres...")
m.v4client.Share.ExecContext(ctx, "SELECT SETVAL('shares_id_seq', (SELECT MAX(id) FROM shares))")
}
break
}
tx, err := m.v4client.Tx(ctx)
if err != nil {
_ = tx.Rollback()
return fmt.Errorf("failed to start transaction: %w", err)
}
for _, s := range shares {
sourceId := int(s.SourceID)
if !s.IsDir {
sourceId += m.state.LastFolderID
}
// check if file exists
_, err = tx.File.Query().Where(file.ID(sourceId)).First(ctx)
if err != nil {
m.l.Warning("File %d not found, skipping share %d", sourceId, s.ID)
continue
}
// check if user exist
if _, ok := m.state.UserIDs[int(s.UserID)]; !ok {
m.l.Warning("User %d not found, skipping share %d", s.UserID, s.ID)
continue
}
stm := tx.Share.Create().
SetCreatedAt(formatTime(s.CreatedAt)).
SetUpdatedAt(formatTime(s.UpdatedAt)).
SetViews(s.Views).
SetRawID(int(s.ID)).
SetDownloads(s.Downloads).
SetFileID(sourceId).
SetUserID(int(s.UserID))
if s.Password != "" {
stm.SetPassword(s.Password)
}
if s.Expires != nil {
stm.SetNillableExpires(s.Expires)
}
if s.RemainDownloads >= 0 {
stm.SetRemainDownloads(s.RemainDownloads)
}
if _, err := stm.Save(ctx); err != nil {
_ = tx.Rollback()
return fmt.Errorf("failed to create share %d: %w", s.ID, err)
}
}
if err := tx.Commit(); err != nil {
return fmt.Errorf("failed to commit transaction: %w", err)
}
offset += batchSize
m.state.ShareOffset = offset
if err := m.saveState(); err != nil {
m.l.Warning("Failed to save state after share batch: %s", err)
} else {
m.l.Info("Saved migration state after processing this batch")
}
}
return nil
}

@ -0,0 +1,109 @@
package migrator
import (
"context"
"fmt"
"github.com/cloudreve/Cloudreve/v4/application/migrator/model"
"github.com/cloudreve/Cloudreve/v4/ent/user"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
)
func (m *Migrator) migrateUser() error {
m.l.Info("Migrating users...")
batchSize := 1000
// Start from the saved offset if available
offset := m.state.UserOffset
ctx := context.Background()
if m.state.UserIDs == nil {
m.state.UserIDs = make(map[int]bool)
}
// If we're resuming, load existing user IDs
if len(m.state.UserIDs) > 0 {
m.l.Info("Resuming user migration from offset %d, %d users already migrated", offset, len(m.state.UserIDs))
}
for {
m.l.Info("Migrating users with offset %d", offset)
var users []model.User
if err := model.DB.Limit(batchSize).Offset(offset).Find(&users).Error; err != nil {
return fmt.Errorf("failed to list v3 users: %w", err)
}
if len(users) == 0 {
if m.dep.ConfigProvider().Database().Type == conf.PostgresDB {
m.l.Info("Resetting user ID sequence for postgres...")
m.v4client.User.ExecContext(ctx, "SELECT SETVAL('users_id_seq', (SELECT MAX(id) FROM users))")
}
break
}
tx, err := m.v4client.Tx(context.Background())
if err != nil {
_ = tx.Rollback()
return fmt.Errorf("failed to start transaction: %w", err)
}
for _, u := range users {
userStatus := user.StatusActive
switch u.Status {
case model.Active:
userStatus = user.StatusActive
case model.NotActivicated:
userStatus = user.StatusInactive
case model.Baned:
userStatus = user.StatusManualBanned
case model.OveruseBaned:
userStatus = user.StatusSysBanned
}
setting := &types.UserSetting{
VersionRetention: true,
VersionRetentionMax: 10,
}
stm := tx.User.Create().
SetRawID(int(u.ID)).
SetCreatedAt(formatTime(u.CreatedAt)).
SetUpdatedAt(formatTime(u.UpdatedAt)).
SetEmail(u.Email).
SetNick(u.Nick).
SetStatus(userStatus).
SetStorage(int64(u.Storage)).
SetGroupID(int(u.GroupID)).
SetSettings(setting).
SetPassword(u.Password)
if u.TwoFactor != "" {
stm.SetTwoFactorSecret(u.TwoFactor)
}
if u.Avatar != "" {
stm.SetAvatar(u.Avatar)
}
if _, err := stm.Save(ctx); err != nil {
_ = tx.Rollback()
return fmt.Errorf("failed to create user %d: %w", u.ID, err)
}
m.state.UserIDs[int(u.ID)] = true
}
if err := tx.Commit(); err != nil {
return fmt.Errorf("failed to commit transaction: %w", err)
}
// Update the offset in state and save after each batch
offset += batchSize
m.state.UserOffset = offset
if err := m.saveState(); err != nil {
m.l.Warning("Failed to save state after user batch: %s", err)
} else {
m.l.Info("Saved migration state after processing %d users", offset)
}
}
return nil
}

@ -0,0 +1,93 @@
package migrator
import (
"context"
"fmt"
"github.com/cloudreve/Cloudreve/v4/application/migrator/model"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
)
func (m *Migrator) migrateWebdav() error {
m.l.Info("Migrating webdav accounts...")
batchSize := 1000
offset := m.state.WebdavOffset
ctx := context.Background()
if m.state.WebdavOffset > 0 {
m.l.Info("Resuming webdav migration from offset %d", offset)
}
for {
m.l.Info("Migrating webdav accounts with offset %d", offset)
var webdavAccounts []model.Webdav
if err := model.DB.Limit(batchSize).Offset(offset).Find(&webdavAccounts).Error; err != nil {
return fmt.Errorf("failed to list v3 webdav accounts: %w", err)
}
if len(webdavAccounts) == 0 {
if m.dep.ConfigProvider().Database().Type == conf.PostgresDB {
m.l.Info("Resetting webdav account ID sequence for postgres...")
m.v4client.DavAccount.ExecContext(ctx, "SELECT SETVAL('dav_accounts_id_seq', (SELECT MAX(id) FROM dav_accounts))")
}
break
}
tx, err := m.v4client.Tx(ctx)
if err != nil {
_ = tx.Rollback()
return fmt.Errorf("failed to start transaction: %w", err)
}
for _, webdavAccount := range webdavAccounts {
if _, ok := m.state.UserIDs[int(webdavAccount.UserID)]; !ok {
m.l.Warning("User %d not found, skipping webdav account %d", webdavAccount.UserID, webdavAccount.ID)
continue
}
props := types.DavAccountProps{}
options := boolset.BooleanSet{}
if webdavAccount.Readonly {
boolset.Set(int(types.DavAccountReadOnly), true, &options)
}
if webdavAccount.UseProxy {
boolset.Set(int(types.DavAccountProxy), true, &options)
}
stm := tx.DavAccount.Create().
SetCreatedAt(formatTime(webdavAccount.CreatedAt)).
SetUpdatedAt(formatTime(webdavAccount.UpdatedAt)).
SetRawID(int(webdavAccount.ID)).
SetName(webdavAccount.Name).
SetURI("cloudreve://my" + webdavAccount.Root).
SetPassword(webdavAccount.Password).
SetProps(&props).
SetOptions(&options).
SetOwnerID(int(webdavAccount.UserID))
if _, err := stm.Save(ctx); err != nil {
_ = tx.Rollback()
return fmt.Errorf("failed to create webdav account %d: %w", webdavAccount.ID, err)
}
}
if err := tx.Commit(); err != nil {
return fmt.Errorf("failed to commit transaction: %w", err)
}
offset += batchSize
m.state.WebdavOffset = offset
if err := m.saveState(); err != nil {
m.l.Warning("Failed to save state after webdav batch: %s", err)
} else {
m.l.Info("Saved migration state after processing this batch")
}
}
return nil
}

@ -126,7 +126,7 @@
// To support tools that analyze Go packages, the patterns found in //go:embed lines
// are available in “go list” output. See the EmbedPatterns, TestEmbedPatterns,
// and XTestEmbedPatterns fields in the “go help list” output.
package bootstrap
package statics
import (
"errors"

@ -0,0 +1,206 @@
package statics
import (
"archive/zip"
"bufio"
"crypto/sha256"
_ "embed"
"encoding/json"
"fmt"
"io"
"io/fs"
"net/http"
"path/filepath"
"sort"
"strings"
"github.com/cloudreve/Cloudreve/v4/application/constants"
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
"github.com/cloudreve/Cloudreve/v4/pkg/util"
"github.com/gin-contrib/static"
)
const StaticFolder = "statics"
//go:embed assets.zip
var zipContent string
type GinFS struct {
FS http.FileSystem
}
type version struct {
Name string `json:"name"`
Version string `json:"version"`
}
// Open 打开文件
func (b *GinFS) Open(name string) (http.File, error) {
return b.FS.Open(name)
}
// Exists 文件是否存在
func (b *GinFS) Exists(prefix string, filepath string) bool {
if _, err := b.FS.Open(filepath); err != nil {
return false
}
return true
}
// NewServerStaticFS 初始化静态资源文件
func NewServerStaticFS(l logging.Logger, statics fs.FS, isPro bool) (static.ServeFileSystem, error) {
var staticFS static.ServeFileSystem
if util.Exists(util.DataPath(StaticFolder)) {
l.Info("Folder with %q already exists, it will be used to serve static files.", util.DataPath(StaticFolder))
staticFS = static.LocalFile(util.DataPath(StaticFolder), false)
} else {
// 初始化静态资源
embedFS, err := fs.Sub(statics, "assets/build")
if err != nil {
return nil, fmt.Errorf("failed to initialize static resources: %w", err)
}
staticFS = &GinFS{
FS: http.FS(embedFS),
}
}
// 检查静态资源的版本
f, err := staticFS.Open("version.json")
if err != nil {
l.Warning("Missing version identifier file in static resources, please delete \"statics\" folder and rebuild it.")
return staticFS, nil
}
b, err := io.ReadAll(f)
if err != nil {
l.Warning("Failed to read version identifier file in static resources, please delete \"statics\" folder and rebuild it.")
return staticFS, nil
}
var v version
if err := json.Unmarshal(b, &v); err != nil {
l.Warning("Failed to parse version identifier file in static resources: %s", err)
return staticFS, nil
}
staticName := "cloudreve-frontend"
if isPro {
staticName += "-pro"
}
if v.Name != staticName {
l.Panic("Static resource version mismatch, please delete \"statics\" folder and rebuild it.")
}
if v.Version != constants.BackendVersion {
l.Panic("Static resource version mismatch [Current %s, Desired: %s]please delete \"statics\" folder and rebuild it.", v.Version, constants.BackendVersion)
}
return staticFS, nil
}
func NewStaticFS(l logging.Logger) fs.FS {
zipReader, err := zip.NewReader(strings.NewReader(zipContent), int64(len(zipContent)))
if err != nil {
l.Panic("Static resource is not a valid zip file: %s", err)
}
var files []file
err = fs.WalkDir(zipReader, ".", func(path string, d fs.DirEntry, err error) error {
if err != nil {
return fmt.Errorf("cannot walk into %q: %w", path, err)
}
if path == "." {
return nil
}
var f file
if d.IsDir() {
f.name = path + "/"
} else {
f.name = path
rc, err := zipReader.Open(path)
if err != nil {
return fmt.Errorf("canot open %q: %w", path, err)
}
defer rc.Close()
data, err := io.ReadAll(rc)
if err != nil {
return fmt.Errorf("cannot read %q: %w", path, err)
}
f.data = string(data)
hash := sha256.Sum256(data)
for i := range f.hash {
f.hash[i] = ^hash[i]
}
}
files = append(files, f)
return nil
})
if err != nil {
l.Panic("Failed to initialize static resources: %s", err)
}
sort.Slice(files, func(i, j int) bool {
fi, fj := files[i], files[j]
di, ei, _ := split(fi.name)
dj, ej, _ := split(fj.name)
if di != dj {
return di < dj
}
return ei < ej
})
var embedFS FS
embedFS.files = &files
return embedFS
}
// Eject 抽离内置静态资源
func Eject(l logging.Logger, statics fs.FS) error {
// 初始化静态资源
embedFS, err := fs.Sub(statics, "assets/build")
if err != nil {
l.Panic("Failed to initialize static resources: %s", err)
}
var walk func(relPath string, d fs.DirEntry, err error) error
walk = func(relPath string, d fs.DirEntry, err error) error {
if err != nil {
return fmt.Errorf("failed to read info of %q: %s, skipping...", relPath, err)
}
if !d.IsDir() {
// 写入文件
dst := util.DataPath(filepath.Join(StaticFolder, relPath))
out, err := util.CreatNestedFile(dst)
defer out.Close()
if err != nil {
return fmt.Errorf("failed to create file %q: %s, skipping...", dst, err)
}
l.Info("Ejecting %q...", dst)
obj, _ := embedFS.Open(relPath)
if _, err := io.Copy(out, bufio.NewReader(obj)); err != nil {
return fmt.Errorf("cannot write file %q: %s, skipping...", relPath, err)
}
}
return nil
}
// util.Log().Info("开始导出内置静态资源...")
err = fs.WalkDir(embedFS, ".", walk)
if err != nil {
return fmt.Errorf("failed to eject static resources: %w", err)
}
l.Info("Finish ejecting static resources.")
return nil
}

@ -1 +1 @@
Subproject commit 5d4d01a797a1ba2d6866799684bf05de20006e31
Subproject commit b485bf297974cbe4834d2e8e744ae7b7e5b2ad39

Binary file not shown.

@ -0,0 +1,49 @@
trigger:
tags:
include:
- '*'
variables:
GO_VERSION: "1.23.6"
NODE_VERSION: "22.x"
DOCKER_BUILDKIT: 1
pool:
vmImage: ubuntu-latest
jobs:
- job: Release
steps:
- checkout: self
submodules: true
persistCredentials: true
- task: NodeTool@0
inputs:
versionSpec: '$(NODE_VERSION)'
displayName: 'Install Node.js'
- task: GoTool@0
inputs:
version: "$(GO_VERSION)"
displayName: Install Go
- task: Docker@2
inputs:
containerRegistry: "PRO ACR"
command: "login"
addPipelineData: false
addBaseImageData: false
- task: CmdLine@2
displayName: "Install multiarch/qemu-user-static"
inputs:
script: |
docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
- task: goreleaser@0
condition: and(succeeded(), startsWith(variables['Build.SourceBranch'], 'refs/tags/'))
inputs:
version: "latest"
distribution: "goreleaser"
workdir: "$(Build.SourcesDirectory)"
args: "release --timeout 60m"
env:
AWS_ACCESS_KEY_ID: $(AWS_ACCESS_KEY_ID)
AWS_SECRET_ACCESS_KEY: $(AWS_SECRET_ACCESS_KEY)
GITHUB_TOKEN: $(GITHUB_TOKEN)

@ -1,58 +0,0 @@
package bootstrap
import (
"encoding/json"
"fmt"
"github.com/cloudreve/Cloudreve/v3/pkg/conf"
"github.com/cloudreve/Cloudreve/v3/pkg/request"
"github.com/cloudreve/Cloudreve/v3/pkg/util"
"github.com/hashicorp/go-version"
)
// InitApplication 初始化应用常量
func InitApplication() {
fmt.Print(`
___ _ _
/ __\ | ___ _ _ __| |_ __ _____ _____
/ / | |/ _ \| | | |/ _ | '__/ _ \ \ / / _ \
/ /___| | (_) | |_| | (_| | | | __/\ V / __/
\____/|_|\___/ \__,_|\__,_|_| \___| \_/ \___|
V` + conf.BackendVersion + ` Commit #` + conf.LastCommit + ` Pro=` + conf.IsPro + `
================================================
`)
go CheckUpdate()
}
type GitHubRelease struct {
URL string `json:"html_url"`
Name string `json:"name"`
Tag string `json:"tag_name"`
}
// CheckUpdate 检查更新
func CheckUpdate() {
client := request.NewClient()
res, err := client.Request("GET", "https://api.github.com/repos/cloudreve/cloudreve/releases", nil).GetResponse()
if err != nil {
util.Log().Warning("更新检查失败, %s", err)
return
}
var list []GitHubRelease
if err := json.Unmarshal([]byte(res), &list); err != nil {
util.Log().Warning("更新检查失败, %s", err)
return
}
if len(list) > 0 {
present, err1 := version.NewVersion(conf.BackendVersion)
latest, err2 := version.NewVersion(list[0].Tag)
if err1 == nil && err2 == nil && latest.GreaterThan(present) {
util.Log().Info("有新的版本 [%s] 可用,下载:%s", list[0].Name, list[0].URL)
}
}
}

@ -1,75 +0,0 @@
package bootstrap
import (
"archive/zip"
"crypto/sha256"
"github.com/cloudreve/Cloudreve/v3/pkg/util"
"github.com/pkg/errors"
"io"
"io/fs"
"sort"
"strings"
)
func NewFS(zipContent string) fs.FS {
zipReader, err := zip.NewReader(strings.NewReader(zipContent), int64(len(zipContent)))
if err != nil {
util.Log().Panic("Static resource is not a valid zip file: %s", err)
}
var files []file
err = fs.WalkDir(zipReader, ".", func(path string, d fs.DirEntry, err error) error {
if err != nil {
return errors.Errorf("无法获取[%s]的信息, %s, 跳过...", path, err)
}
if path == "." {
return nil
}
var f file
if d.IsDir() {
f.name = path + "/"
} else {
f.name = path
rc, err := zipReader.Open(path)
if err != nil {
return errors.Errorf("无法打开文件[%s], %s, 跳过...", path, err)
}
defer rc.Close()
data, err := io.ReadAll(rc)
if err != nil {
return errors.Errorf("无法读取文件[%s], %s, 跳过...", path, err)
}
f.data = string(data)
hash := sha256.Sum256(data)
for i := range f.hash {
f.hash[i] = ^hash[i]
}
}
files = append(files, f)
return nil
})
if err != nil {
util.Log().Panic("初始化静态资源失败: %s", err)
}
sort.Slice(files, func(i, j int) bool {
fi, fj := files[i], files[j]
di, ei, _ := split(fi.name)
dj, ej, _ := split(fj.name)
if di != dj {
return di < dj
}
return ei < ej
})
var embedFS FS
embedFS.files = &files
return embedFS
}

@ -1,132 +0,0 @@
package bootstrap
import (
model "github.com/cloudreve/Cloudreve/v3/models"
"github.com/cloudreve/Cloudreve/v3/models/scripts"
"github.com/cloudreve/Cloudreve/v3/pkg/aria2"
"github.com/cloudreve/Cloudreve/v3/pkg/auth"
"github.com/cloudreve/Cloudreve/v3/pkg/cache"
"github.com/cloudreve/Cloudreve/v3/pkg/cluster"
"github.com/cloudreve/Cloudreve/v3/pkg/conf"
"github.com/cloudreve/Cloudreve/v3/pkg/crontab"
"github.com/cloudreve/Cloudreve/v3/pkg/email"
"github.com/cloudreve/Cloudreve/v3/pkg/mq"
"github.com/cloudreve/Cloudreve/v3/pkg/task"
"github.com/cloudreve/Cloudreve/v3/pkg/wopi"
"github.com/gin-gonic/gin"
"io/fs"
"path/filepath"
)
// Init 初始化启动
func Init(path string, statics fs.FS) {
InitApplication()
conf.Init(path)
// Debug 关闭时,切换为生产模式
if !conf.SystemConfig.Debug {
gin.SetMode(gin.ReleaseMode)
}
dependencies := []struct {
mode string
factory func()
}{
{
"both",
func() {
scripts.Init()
},
},
{
"both",
func() {
cache.Init()
},
},
{
"slave",
func() {
model.InitSlaveDefaults()
},
},
{
"slave",
func() {
cache.InitSlaveOverwrites()
},
},
{
"master",
func() {
model.Init()
},
},
{
"both",
func() {
cache.Restore(filepath.Join(model.GetSettingByName("temp_path"), cache.DefaultCacheFile))
},
},
{
"both",
func() {
task.Init()
},
},
{
"master",
func() {
cluster.Init()
},
},
{
"master",
func() {
aria2.Init(false, cluster.Default, mq.GlobalMQ)
},
},
{
"master",
func() {
email.Init()
},
},
{
"master",
func() {
crontab.Init()
},
},
{
"master",
func() {
InitStatic(statics)
},
},
{
"slave",
func() {
cluster.InitController()
},
},
{
"both",
func() {
auth.Init()
},
},
{
"master",
func() {
wopi.Init()
},
},
}
for _, dependency := range dependencies {
if dependency.mode == conf.SystemConfig.Mode || dependency.mode == "both" {
dependency.factory()
}
}
}

@ -1,18 +0,0 @@
package bootstrap
import (
"context"
"github.com/cloudreve/Cloudreve/v3/models/scripts/invoker"
"github.com/cloudreve/Cloudreve/v3/pkg/util"
)
func RunScript(name string) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
if err := invoker.RunDBScript(name, ctx); err != nil {
util.Log().Error("Failed to execute database script: %s", err)
return
}
util.Log().Info("Finish executing database script %q.", name)
}

@ -1,136 +0,0 @@
package bootstrap
import (
"bufio"
"encoding/json"
"io"
"io/fs"
"net/http"
"path/filepath"
"github.com/pkg/errors"
"github.com/cloudreve/Cloudreve/v3/pkg/conf"
"github.com/cloudreve/Cloudreve/v3/pkg/util"
"github.com/gin-contrib/static"
)
const StaticFolder = "statics"
type GinFS struct {
FS http.FileSystem
}
type staticVersion struct {
Name string `json:"name"`
Version string `json:"version"`
}
// StaticFS 内置静态文件资源
var StaticFS static.ServeFileSystem
// Open 打开文件
func (b *GinFS) Open(name string) (http.File, error) {
return b.FS.Open(name)
}
// Exists 文件是否存在
func (b *GinFS) Exists(prefix string, filepath string) bool {
if _, err := b.FS.Open(filepath); err != nil {
return false
}
return true
}
// InitStatic 初始化静态资源文件
func InitStatic(statics fs.FS) {
if util.Exists(util.RelativePath(StaticFolder)) {
util.Log().Info("Folder with name \"statics\" already exists, it will be used to serve static files.")
StaticFS = static.LocalFile(util.RelativePath("statics"), false)
} else {
// 初始化静态资源
embedFS, err := fs.Sub(statics, "assets/build")
if err != nil {
util.Log().Panic("Failed to initialize static resources: %s", err)
}
StaticFS = &GinFS{
FS: http.FS(embedFS),
}
}
// 检查静态资源的版本
f, err := StaticFS.Open("version.json")
if err != nil {
util.Log().Warning("Missing version identifier file in static resources, please delete \"statics\" folder and rebuild it.")
return
}
b, err := io.ReadAll(f)
if err != nil {
util.Log().Warning("Failed to read version identifier file in static resources, please delete \"statics\" folder and rebuild it.")
return
}
var v staticVersion
if err := json.Unmarshal(b, &v); err != nil {
util.Log().Warning("Failed to parse version identifier file in static resources: %s", err)
return
}
staticName := "cloudreve-frontend"
if conf.IsPro == "true" {
staticName += "-pro"
}
if v.Name != staticName {
util.Log().Warning("Static resource version mismatch, please delete \"statics\" folder and rebuild it.")
return
}
if v.Version != conf.RequiredStaticVersion {
util.Log().Warning("Static resource version mismatch [Current %s, Desired: %s]please delete \"statics\" folder and rebuild it.", v.Version, conf.RequiredStaticVersion)
return
}
}
// Eject 抽离内置静态资源
func Eject(statics fs.FS) {
// 初始化静态资源
embedFS, err := fs.Sub(statics, "assets/build")
if err != nil {
util.Log().Panic("Failed to initialize static resources: %s", err)
}
var walk func(relPath string, d fs.DirEntry, err error) error
walk = func(relPath string, d fs.DirEntry, err error) error {
if err != nil {
return errors.Errorf("Failed to read info of %q: %s, skipping...", relPath, err)
}
if !d.IsDir() {
// 写入文件
out, err := util.CreatNestedFile(filepath.Join(util.RelativePath(""), StaticFolder, relPath))
defer out.Close()
if err != nil {
return errors.Errorf("Failed to create file %q: %s, skipping...", relPath, err)
}
util.Log().Info("Ejecting %q...", relPath)
obj, _ := embedFS.Open(relPath)
if _, err := io.Copy(out, bufio.NewReader(obj)); err != nil {
return errors.Errorf("Cannot write file %q: %s, skipping...", relPath, err)
}
}
return nil
}
// util.Log().Info("开始导出内置静态资源...")
err = fs.WalkDir(embedFS, ".", walk)
if err != nil {
util.Log().Error("Error occurs while ejecting static resources: %s", err)
return
}
util.Log().Info("Finish ejecting static resources.")
}

@ -0,0 +1,30 @@
package cmd
import (
"github.com/cloudreve/Cloudreve/v4/application/constants"
"github.com/cloudreve/Cloudreve/v4/application/dependency"
"github.com/cloudreve/Cloudreve/v4/application/statics"
"github.com/spf13/cobra"
"os"
)
func init() {
rootCmd.AddCommand(ejectCmd)
}
var ejectCmd = &cobra.Command{
Use: "eject",
Short: "Eject all embedded static files",
Run: func(cmd *cobra.Command, args []string) {
dep := dependency.NewDependency(
dependency.WithConfigPath(confPath),
dependency.WithProFlag(constants.IsPro == "true"),
)
logger := dep.Logger()
if err := statics.Eject(dep.Logger(), dep.Statics()); err != nil {
logger.Error("Failed to eject static files: %s", err)
os.Exit(1)
}
},
}

@ -0,0 +1,69 @@
package cmd
import (
"os"
"path/filepath"
"github.com/cloudreve/Cloudreve/v4/application/constants"
"github.com/cloudreve/Cloudreve/v4/application/dependency"
"github.com/cloudreve/Cloudreve/v4/application/migrator"
"github.com/cloudreve/Cloudreve/v4/pkg/util"
"github.com/spf13/cobra"
)
var (
v3ConfPath string
forceReset bool
)
func init() {
rootCmd.AddCommand(migrateCmd)
migrateCmd.PersistentFlags().StringVar(&v3ConfPath, "v3-conf", "", "Path to the v3 config file")
migrateCmd.PersistentFlags().BoolVar(&forceReset, "force-reset", false, "Force reset migration state and start from beginning")
}
var migrateCmd = &cobra.Command{
Use: "migrate",
Short: "Migrate from v3 to v4",
Run: func(cmd *cobra.Command, args []string) {
dep := dependency.NewDependency(
dependency.WithConfigPath(confPath),
dependency.WithRequiredDbVersion(constants.BackendVersion),
dependency.WithProFlag(constants.IsPro == "true"),
)
logger := dep.Logger()
logger.Info("Migrating from v3 to v4...")
if v3ConfPath == "" {
logger.Error("v3 config file is required, please use -v3-conf to specify the path.")
os.Exit(1)
}
// Check if state file exists and warn about resuming
stateFilePath := filepath.Join(filepath.Dir(v3ConfPath), "migration_state.json")
if util.Exists(stateFilePath) && !forceReset {
logger.Info("Found existing migration state file at %s. Migration will resume from the last successful step.", stateFilePath)
logger.Info("If you want to start migration from the beginning, please use --force-reset flag.")
} else if forceReset && util.Exists(stateFilePath) {
logger.Info("Force resetting migration state. Will start from the beginning.")
if err := os.Remove(stateFilePath); err != nil {
logger.Error("Failed to remove migration state file: %s", err)
os.Exit(1)
}
}
migrator, err := migrator.NewMigrator(dep, v3ConfPath)
if err != nil {
logger.Error("Failed to create migrator: %s", err)
os.Exit(1)
}
if err := migrator.Migrate(); err != nil {
logger.Error("Failed to migrate: %s", err)
logger.Info("Migration failed but state has been saved. You can retry with the same command to resume from the last successful step.")
os.Exit(1)
}
logger.Info("Migration from v3 to v4 completed successfully.")
},
}

@ -0,0 +1,42 @@
package cmd
import (
"fmt"
"github.com/cloudreve/Cloudreve/v4/pkg/util"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"os"
)
var (
confPath string
)
func init() {
rootCmd.PersistentFlags().StringVarP(&confPath, "conf", "c", util.DataPath("conf.ini"), "Path to the config file")
rootCmd.PersistentFlags().BoolVarP(&util.UseWorkingDir, "use-working-dir", "w", false, "Use working directory, instead of executable directory")
}
var rootCmd = &cobra.Command{
Use: "cloudreve",
Short: "Cloudreve is a server-side self-hosted cloud storage platform",
Long: `Self-hosted file management and sharing system, supports multiple storage providers.
Complete documentation is available at https://docs.cloudreve.org/`,
Run: func(cmd *cobra.Command, args []string) {
// Do Stuff Here
},
}
func Execute() {
cmd, _, err := rootCmd.Find(os.Args[1:])
// redirect to default server cmd if no cmd is given
if err == nil && cmd.Use == rootCmd.Use && cmd.Flags().Parse(os.Args[1:]) != pflag.ErrHelp {
args := append([]string{"server"}, os.Args[1:]...)
rootCmd.SetArgs(args)
}
if err := rootCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
}

@ -0,0 +1,60 @@
package cmd
import (
"os"
"os/signal"
"syscall"
"github.com/cloudreve/Cloudreve/v4/application"
"github.com/cloudreve/Cloudreve/v4/application/constants"
"github.com/cloudreve/Cloudreve/v4/application/dependency"
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
"github.com/spf13/cobra"
)
var (
licenseKey string
)
func init() {
rootCmd.AddCommand(serverCmd)
serverCmd.PersistentFlags().StringVarP(&licenseKey, "license-key", "l", "", "License key of your Cloudreve Pro")
}
var serverCmd = &cobra.Command{
Use: "server",
Short: "Start a Cloudreve server with the given config file",
Run: func(cmd *cobra.Command, args []string) {
dep := dependency.NewDependency(
dependency.WithConfigPath(confPath),
dependency.WithProFlag(constants.IsProBool),
dependency.WithRequiredDbVersion(constants.BackendVersion),
dependency.WithLicenseKey(licenseKey),
)
server := application.NewServer(dep)
logger := dep.Logger()
server.PrintBanner()
// Graceful shutdown after received signal.
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, os.Interrupt, syscall.SIGTERM, syscall.SIGHUP, syscall.SIGQUIT)
go shutdown(sigChan, logger, server)
if err := server.Start(); err != nil {
logger.Error("Failed to start server: %s", err)
os.Exit(1)
}
defer func() {
<-sigChan
}()
},
}
func shutdown(sigChan chan os.Signal, logger logging.Logger, server application.Server) {
sig := <-sigChan
logger.Info("Signal %s received, shutting down server...", sig)
server.Close()
close(sigChan)
}

@ -1,45 +1,40 @@
version: "3.8"
services:
redis:
container_name: redis
image: bitnami/redis:latest
restart: unless-stopped
pro:
image: cloudreve.azurecr.io/cloudreve/pro:latest
container_name: cloudreve-pro-backend
depends_on:
- postgresql
- redis
restart: always
ports:
- 5212:5212
environment:
- ALLOW_EMPTY_PASSWORD=yes
- CR_CONF_Database.Type=postgres
- CR_CONF_Database.Host=postgresql
- CR_CONF_Database.User=cloudreve
- CR_CONF_Database.Name=cloudreve
- CR_CONF_Database.Port=5432
- CR_CONF_Redis.Server=redis:6379
- CR_LICENSE_KEY=${CR_LICENSE_KEY}
volumes:
- redis_data:/bitnami/redis/data
- backend_data:/cloudreve/data
cloudreve:
container_name: cloudreve
image: cloudreve/cloudreve:latest
restart: unless-stopped
ports:
- "5212:5212"
postgresql:
image: postgres:latest
container_name: postgresql
environment:
- POSTGRES_USER=cloudreve
- POSTGRES_DB=cloudreve
- POSTGRES_HOST_AUTH_METHOD=trust
volumes:
- temp_data:/data
- ./cloudreve/uploads:/cloudreve/uploads
- ./cloudreve/conf.ini:/cloudreve/conf.ini
- ./cloudreve/cloudreve.db:/cloudreve/cloudreve.db
- ./cloudreve/avatar:/cloudreve/avatar
depends_on:
- aria2
- database_postgres:/var/lib/postgresql/data
aria2:
container_name: aria2
image: p3terx/aria2-pro # third party image, please keep notice what you are doing
restart: unless-stopped
environment:
- RPC_SECRET=your_aria_rpc_token # aria rpc token, customize your own
- RPC_PORT=6800
redis:
image: redis:latest
container_name: redis
volumes:
- ./aria2/config:/config
- temp_data:/data
- backend_data:/data
volumes:
redis_data:
driver: local
temp_data:
driver: local
driver_opts:
type: none
device: $PWD/data
o: bind
backend_data:
database_postgres:

File diff suppressed because it is too large Load Diff

@ -0,0 +1,242 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"encoding/json"
"fmt"
"strings"
"time"
"entgo.io/ent"
"entgo.io/ent/dialect/sql"
"github.com/cloudreve/Cloudreve/v4/ent/davaccount"
"github.com/cloudreve/Cloudreve/v4/ent/user"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
)
// DavAccount is the model entity for the DavAccount schema.
type DavAccount struct {
config `json:"-"`
// ID of the ent.
ID int `json:"id,omitempty"`
// CreatedAt holds the value of the "created_at" field.
CreatedAt time.Time `json:"created_at,omitempty"`
// UpdatedAt holds the value of the "updated_at" field.
UpdatedAt time.Time `json:"updated_at,omitempty"`
// DeletedAt holds the value of the "deleted_at" field.
DeletedAt *time.Time `json:"deleted_at,omitempty"`
// Name holds the value of the "name" field.
Name string `json:"name,omitempty"`
// URI holds the value of the "uri" field.
URI string `json:"uri,omitempty"`
// Password holds the value of the "password" field.
Password string `json:"-"`
// Options holds the value of the "options" field.
Options *boolset.BooleanSet `json:"options,omitempty"`
// Props holds the value of the "props" field.
Props *types.DavAccountProps `json:"props,omitempty"`
// OwnerID holds the value of the "owner_id" field.
OwnerID int `json:"owner_id,omitempty"`
// Edges holds the relations/edges for other nodes in the graph.
// The values are being populated by the DavAccountQuery when eager-loading is set.
Edges DavAccountEdges `json:"edges"`
selectValues sql.SelectValues
}
// DavAccountEdges holds the relations/edges for other nodes in the graph.
type DavAccountEdges struct {
// Owner holds the value of the owner edge.
Owner *User `json:"owner,omitempty"`
// loadedTypes holds the information for reporting if a
// type was loaded (or requested) in eager-loading or not.
loadedTypes [1]bool
}
// OwnerOrErr returns the Owner value or an error if the edge
// was not loaded in eager-loading, or loaded but was not found.
func (e DavAccountEdges) OwnerOrErr() (*User, error) {
if e.loadedTypes[0] {
if e.Owner == nil {
// Edge was loaded but was not found.
return nil, &NotFoundError{label: user.Label}
}
return e.Owner, nil
}
return nil, &NotLoadedError{edge: "owner"}
}
// scanValues returns the types for scanning values from sql.Rows.
func (*DavAccount) scanValues(columns []string) ([]any, error) {
values := make([]any, len(columns))
for i := range columns {
switch columns[i] {
case davaccount.FieldProps:
values[i] = new([]byte)
case davaccount.FieldOptions:
values[i] = new(boolset.BooleanSet)
case davaccount.FieldID, davaccount.FieldOwnerID:
values[i] = new(sql.NullInt64)
case davaccount.FieldName, davaccount.FieldURI, davaccount.FieldPassword:
values[i] = new(sql.NullString)
case davaccount.FieldCreatedAt, davaccount.FieldUpdatedAt, davaccount.FieldDeletedAt:
values[i] = new(sql.NullTime)
default:
values[i] = new(sql.UnknownType)
}
}
return values, nil
}
// assignValues assigns the values that were returned from sql.Rows (after scanning)
// to the DavAccount fields.
func (da *DavAccount) assignValues(columns []string, values []any) error {
if m, n := len(values), len(columns); m < n {
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
}
for i := range columns {
switch columns[i] {
case davaccount.FieldID:
value, ok := values[i].(*sql.NullInt64)
if !ok {
return fmt.Errorf("unexpected type %T for field id", value)
}
da.ID = int(value.Int64)
case davaccount.FieldCreatedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field created_at", values[i])
} else if value.Valid {
da.CreatedAt = value.Time
}
case davaccount.FieldUpdatedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field updated_at", values[i])
} else if value.Valid {
da.UpdatedAt = value.Time
}
case davaccount.FieldDeletedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field deleted_at", values[i])
} else if value.Valid {
da.DeletedAt = new(time.Time)
*da.DeletedAt = value.Time
}
case davaccount.FieldName:
if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field name", values[i])
} else if value.Valid {
da.Name = value.String
}
case davaccount.FieldURI:
if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field uri", values[i])
} else if value.Valid {
da.URI = value.String
}
case davaccount.FieldPassword:
if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field password", values[i])
} else if value.Valid {
da.Password = value.String
}
case davaccount.FieldOptions:
if value, ok := values[i].(*boolset.BooleanSet); !ok {
return fmt.Errorf("unexpected type %T for field options", values[i])
} else if value != nil {
da.Options = value
}
case davaccount.FieldProps:
if value, ok := values[i].(*[]byte); !ok {
return fmt.Errorf("unexpected type %T for field props", values[i])
} else if value != nil && len(*value) > 0 {
if err := json.Unmarshal(*value, &da.Props); err != nil {
return fmt.Errorf("unmarshal field props: %w", err)
}
}
case davaccount.FieldOwnerID:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field owner_id", values[i])
} else if value.Valid {
da.OwnerID = int(value.Int64)
}
default:
da.selectValues.Set(columns[i], values[i])
}
}
return nil
}
// Value returns the ent.Value that was dynamically selected and assigned to the DavAccount.
// This includes values selected through modifiers, order, etc.
func (da *DavAccount) Value(name string) (ent.Value, error) {
return da.selectValues.Get(name)
}
// QueryOwner queries the "owner" edge of the DavAccount entity.
func (da *DavAccount) QueryOwner() *UserQuery {
return NewDavAccountClient(da.config).QueryOwner(da)
}
// Update returns a builder for updating this DavAccount.
// Note that you need to call DavAccount.Unwrap() before calling this method if this DavAccount
// was returned from a transaction, and the transaction was committed or rolled back.
func (da *DavAccount) Update() *DavAccountUpdateOne {
return NewDavAccountClient(da.config).UpdateOne(da)
}
// Unwrap unwraps the DavAccount entity that was returned from a transaction after it was closed,
// so that all future queries will be executed through the driver which created the transaction.
func (da *DavAccount) Unwrap() *DavAccount {
_tx, ok := da.config.driver.(*txDriver)
if !ok {
panic("ent: DavAccount is not a transactional entity")
}
da.config.driver = _tx.drv
return da
}
// String implements the fmt.Stringer.
func (da *DavAccount) String() string {
var builder strings.Builder
builder.WriteString("DavAccount(")
builder.WriteString(fmt.Sprintf("id=%v, ", da.ID))
builder.WriteString("created_at=")
builder.WriteString(da.CreatedAt.Format(time.ANSIC))
builder.WriteString(", ")
builder.WriteString("updated_at=")
builder.WriteString(da.UpdatedAt.Format(time.ANSIC))
builder.WriteString(", ")
if v := da.DeletedAt; v != nil {
builder.WriteString("deleted_at=")
builder.WriteString(v.Format(time.ANSIC))
}
builder.WriteString(", ")
builder.WriteString("name=")
builder.WriteString(da.Name)
builder.WriteString(", ")
builder.WriteString("uri=")
builder.WriteString(da.URI)
builder.WriteString(", ")
builder.WriteString("password=<sensitive>")
builder.WriteString(", ")
builder.WriteString("options=")
builder.WriteString(fmt.Sprintf("%v", da.Options))
builder.WriteString(", ")
builder.WriteString("props=")
builder.WriteString(fmt.Sprintf("%v", da.Props))
builder.WriteString(", ")
builder.WriteString("owner_id=")
builder.WriteString(fmt.Sprintf("%v", da.OwnerID))
builder.WriteByte(')')
return builder.String()
}
// SetOwner manually set the edge as loaded state.
func (e *DavAccount) SetOwner(v *User) {
e.Edges.Owner = v
e.Edges.loadedTypes[0] = true
}
// DavAccounts is a parsable slice of DavAccount.
type DavAccounts []*DavAccount

@ -0,0 +1,144 @@
// Code generated by ent, DO NOT EDIT.
package davaccount
import (
"time"
"entgo.io/ent"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
)
const (
// Label holds the string label denoting the davaccount type in the database.
Label = "dav_account"
// FieldID holds the string denoting the id field in the database.
FieldID = "id"
// FieldCreatedAt holds the string denoting the created_at field in the database.
FieldCreatedAt = "created_at"
// FieldUpdatedAt holds the string denoting the updated_at field in the database.
FieldUpdatedAt = "updated_at"
// FieldDeletedAt holds the string denoting the deleted_at field in the database.
FieldDeletedAt = "deleted_at"
// FieldName holds the string denoting the name field in the database.
FieldName = "name"
// FieldURI holds the string denoting the uri field in the database.
FieldURI = "uri"
// FieldPassword holds the string denoting the password field in the database.
FieldPassword = "password"
// FieldOptions holds the string denoting the options field in the database.
FieldOptions = "options"
// FieldProps holds the string denoting the props field in the database.
FieldProps = "props"
// FieldOwnerID holds the string denoting the owner_id field in the database.
FieldOwnerID = "owner_id"
// EdgeOwner holds the string denoting the owner edge name in mutations.
EdgeOwner = "owner"
// Table holds the table name of the davaccount in the database.
Table = "dav_accounts"
// OwnerTable is the table that holds the owner relation/edge.
OwnerTable = "dav_accounts"
// OwnerInverseTable is the table name for the User entity.
// It exists in this package in order to avoid circular dependency with the "user" package.
OwnerInverseTable = "users"
// OwnerColumn is the table column denoting the owner relation/edge.
OwnerColumn = "owner_id"
)
// Columns holds all SQL columns for davaccount fields.
var Columns = []string{
FieldID,
FieldCreatedAt,
FieldUpdatedAt,
FieldDeletedAt,
FieldName,
FieldURI,
FieldPassword,
FieldOptions,
FieldProps,
FieldOwnerID,
}
// ValidColumn reports if the column name is valid (part of the table columns).
func ValidColumn(column string) bool {
for i := range Columns {
if column == Columns[i] {
return true
}
}
return false
}
// Note that the variables below are initialized by the runtime
// package on the initialization of the application. Therefore,
// it should be imported in the main as follows:
//
// import _ "github.com/cloudreve/Cloudreve/v4/ent/runtime"
var (
Hooks [1]ent.Hook
Interceptors [1]ent.Interceptor
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
DefaultCreatedAt func() time.Time
// DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
DefaultUpdatedAt func() time.Time
// UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
UpdateDefaultUpdatedAt func() time.Time
)
// OrderOption defines the ordering options for the DavAccount queries.
type OrderOption func(*sql.Selector)
// ByID orders the results by the id field.
func ByID(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldID, opts...).ToFunc()
}
// ByCreatedAt orders the results by the created_at field.
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
}
// ByUpdatedAt orders the results by the updated_at field.
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
}
// ByDeletedAt orders the results by the deleted_at field.
func ByDeletedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldDeletedAt, opts...).ToFunc()
}
// ByName orders the results by the name field.
func ByName(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldName, opts...).ToFunc()
}
// ByURI orders the results by the uri field.
func ByURI(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldURI, opts...).ToFunc()
}
// ByPassword orders the results by the password field.
func ByPassword(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldPassword, opts...).ToFunc()
}
// ByOwnerID orders the results by the owner_id field.
func ByOwnerID(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldOwnerID, opts...).ToFunc()
}
// ByOwnerField orders the results by owner field.
func ByOwnerField(field string, opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newOwnerStep(), sql.OrderByField(field, opts...))
}
}
func newOwnerStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(OwnerInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn),
)
}

@ -0,0 +1,530 @@
// Code generated by ent, DO NOT EDIT.
package davaccount
import (
"time"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
)
// ID filters vertices based on their ID field.
func ID(id int) predicate.DavAccount {
return predicate.DavAccount(sql.FieldEQ(FieldID, id))
}
// IDEQ applies the EQ predicate on the ID field.
func IDEQ(id int) predicate.DavAccount {
return predicate.DavAccount(sql.FieldEQ(FieldID, id))
}
// IDNEQ applies the NEQ predicate on the ID field.
func IDNEQ(id int) predicate.DavAccount {
return predicate.DavAccount(sql.FieldNEQ(FieldID, id))
}
// IDIn applies the In predicate on the ID field.
func IDIn(ids ...int) predicate.DavAccount {
return predicate.DavAccount(sql.FieldIn(FieldID, ids...))
}
// IDNotIn applies the NotIn predicate on the ID field.
func IDNotIn(ids ...int) predicate.DavAccount {
return predicate.DavAccount(sql.FieldNotIn(FieldID, ids...))
}
// IDGT applies the GT predicate on the ID field.
func IDGT(id int) predicate.DavAccount {
return predicate.DavAccount(sql.FieldGT(FieldID, id))
}
// IDGTE applies the GTE predicate on the ID field.
func IDGTE(id int) predicate.DavAccount {
return predicate.DavAccount(sql.FieldGTE(FieldID, id))
}
// IDLT applies the LT predicate on the ID field.
func IDLT(id int) predicate.DavAccount {
return predicate.DavAccount(sql.FieldLT(FieldID, id))
}
// IDLTE applies the LTE predicate on the ID field.
func IDLTE(id int) predicate.DavAccount {
return predicate.DavAccount(sql.FieldLTE(FieldID, id))
}
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
func CreatedAt(v time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldEQ(FieldCreatedAt, v))
}
// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
func UpdatedAt(v time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldEQ(FieldUpdatedAt, v))
}
// DeletedAt applies equality check predicate on the "deleted_at" field. It's identical to DeletedAtEQ.
func DeletedAt(v time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldEQ(FieldDeletedAt, v))
}
// Name applies equality check predicate on the "name" field. It's identical to NameEQ.
func Name(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldEQ(FieldName, v))
}
// URI applies equality check predicate on the "uri" field. It's identical to URIEQ.
func URI(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldEQ(FieldURI, v))
}
// Password applies equality check predicate on the "password" field. It's identical to PasswordEQ.
func Password(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldEQ(FieldPassword, v))
}
// Options applies equality check predicate on the "options" field. It's identical to OptionsEQ.
func Options(v *boolset.BooleanSet) predicate.DavAccount {
return predicate.DavAccount(sql.FieldEQ(FieldOptions, v))
}
// OwnerID applies equality check predicate on the "owner_id" field. It's identical to OwnerIDEQ.
func OwnerID(v int) predicate.DavAccount {
return predicate.DavAccount(sql.FieldEQ(FieldOwnerID, v))
}
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
func CreatedAtEQ(v time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldEQ(FieldCreatedAt, v))
}
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
func CreatedAtNEQ(v time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldNEQ(FieldCreatedAt, v))
}
// CreatedAtIn applies the In predicate on the "created_at" field.
func CreatedAtIn(vs ...time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldIn(FieldCreatedAt, vs...))
}
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
func CreatedAtNotIn(vs ...time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldNotIn(FieldCreatedAt, vs...))
}
// CreatedAtGT applies the GT predicate on the "created_at" field.
func CreatedAtGT(v time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldGT(FieldCreatedAt, v))
}
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
func CreatedAtGTE(v time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldGTE(FieldCreatedAt, v))
}
// CreatedAtLT applies the LT predicate on the "created_at" field.
func CreatedAtLT(v time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldLT(FieldCreatedAt, v))
}
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
func CreatedAtLTE(v time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldLTE(FieldCreatedAt, v))
}
// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
func UpdatedAtEQ(v time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldEQ(FieldUpdatedAt, v))
}
// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
func UpdatedAtNEQ(v time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldNEQ(FieldUpdatedAt, v))
}
// UpdatedAtIn applies the In predicate on the "updated_at" field.
func UpdatedAtIn(vs ...time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldIn(FieldUpdatedAt, vs...))
}
// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
func UpdatedAtNotIn(vs ...time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldNotIn(FieldUpdatedAt, vs...))
}
// UpdatedAtGT applies the GT predicate on the "updated_at" field.
func UpdatedAtGT(v time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldGT(FieldUpdatedAt, v))
}
// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
func UpdatedAtGTE(v time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldGTE(FieldUpdatedAt, v))
}
// UpdatedAtLT applies the LT predicate on the "updated_at" field.
func UpdatedAtLT(v time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldLT(FieldUpdatedAt, v))
}
// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
func UpdatedAtLTE(v time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldLTE(FieldUpdatedAt, v))
}
// DeletedAtEQ applies the EQ predicate on the "deleted_at" field.
func DeletedAtEQ(v time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldEQ(FieldDeletedAt, v))
}
// DeletedAtNEQ applies the NEQ predicate on the "deleted_at" field.
func DeletedAtNEQ(v time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldNEQ(FieldDeletedAt, v))
}
// DeletedAtIn applies the In predicate on the "deleted_at" field.
func DeletedAtIn(vs ...time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldIn(FieldDeletedAt, vs...))
}
// DeletedAtNotIn applies the NotIn predicate on the "deleted_at" field.
func DeletedAtNotIn(vs ...time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldNotIn(FieldDeletedAt, vs...))
}
// DeletedAtGT applies the GT predicate on the "deleted_at" field.
func DeletedAtGT(v time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldGT(FieldDeletedAt, v))
}
// DeletedAtGTE applies the GTE predicate on the "deleted_at" field.
func DeletedAtGTE(v time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldGTE(FieldDeletedAt, v))
}
// DeletedAtLT applies the LT predicate on the "deleted_at" field.
func DeletedAtLT(v time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldLT(FieldDeletedAt, v))
}
// DeletedAtLTE applies the LTE predicate on the "deleted_at" field.
func DeletedAtLTE(v time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldLTE(FieldDeletedAt, v))
}
// DeletedAtIsNil applies the IsNil predicate on the "deleted_at" field.
func DeletedAtIsNil() predicate.DavAccount {
return predicate.DavAccount(sql.FieldIsNull(FieldDeletedAt))
}
// DeletedAtNotNil applies the NotNil predicate on the "deleted_at" field.
func DeletedAtNotNil() predicate.DavAccount {
return predicate.DavAccount(sql.FieldNotNull(FieldDeletedAt))
}
// NameEQ applies the EQ predicate on the "name" field.
func NameEQ(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldEQ(FieldName, v))
}
// NameNEQ applies the NEQ predicate on the "name" field.
func NameNEQ(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldNEQ(FieldName, v))
}
// NameIn applies the In predicate on the "name" field.
func NameIn(vs ...string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldIn(FieldName, vs...))
}
// NameNotIn applies the NotIn predicate on the "name" field.
func NameNotIn(vs ...string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldNotIn(FieldName, vs...))
}
// NameGT applies the GT predicate on the "name" field.
func NameGT(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldGT(FieldName, v))
}
// NameGTE applies the GTE predicate on the "name" field.
func NameGTE(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldGTE(FieldName, v))
}
// NameLT applies the LT predicate on the "name" field.
func NameLT(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldLT(FieldName, v))
}
// NameLTE applies the LTE predicate on the "name" field.
func NameLTE(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldLTE(FieldName, v))
}
// NameContains applies the Contains predicate on the "name" field.
func NameContains(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldContains(FieldName, v))
}
// NameHasPrefix applies the HasPrefix predicate on the "name" field.
func NameHasPrefix(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldHasPrefix(FieldName, v))
}
// NameHasSuffix applies the HasSuffix predicate on the "name" field.
func NameHasSuffix(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldHasSuffix(FieldName, v))
}
// NameEqualFold applies the EqualFold predicate on the "name" field.
func NameEqualFold(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldEqualFold(FieldName, v))
}
// NameContainsFold applies the ContainsFold predicate on the "name" field.
func NameContainsFold(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldContainsFold(FieldName, v))
}
// URIEQ applies the EQ predicate on the "uri" field.
func URIEQ(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldEQ(FieldURI, v))
}
// URINEQ applies the NEQ predicate on the "uri" field.
func URINEQ(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldNEQ(FieldURI, v))
}
// URIIn applies the In predicate on the "uri" field.
func URIIn(vs ...string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldIn(FieldURI, vs...))
}
// URINotIn applies the NotIn predicate on the "uri" field.
func URINotIn(vs ...string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldNotIn(FieldURI, vs...))
}
// URIGT applies the GT predicate on the "uri" field.
func URIGT(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldGT(FieldURI, v))
}
// URIGTE applies the GTE predicate on the "uri" field.
func URIGTE(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldGTE(FieldURI, v))
}
// URILT applies the LT predicate on the "uri" field.
func URILT(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldLT(FieldURI, v))
}
// URILTE applies the LTE predicate on the "uri" field.
func URILTE(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldLTE(FieldURI, v))
}
// URIContains applies the Contains predicate on the "uri" field.
func URIContains(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldContains(FieldURI, v))
}
// URIHasPrefix applies the HasPrefix predicate on the "uri" field.
func URIHasPrefix(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldHasPrefix(FieldURI, v))
}
// URIHasSuffix applies the HasSuffix predicate on the "uri" field.
func URIHasSuffix(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldHasSuffix(FieldURI, v))
}
// URIEqualFold applies the EqualFold predicate on the "uri" field.
func URIEqualFold(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldEqualFold(FieldURI, v))
}
// URIContainsFold applies the ContainsFold predicate on the "uri" field.
func URIContainsFold(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldContainsFold(FieldURI, v))
}
// PasswordEQ applies the EQ predicate on the "password" field.
func PasswordEQ(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldEQ(FieldPassword, v))
}
// PasswordNEQ applies the NEQ predicate on the "password" field.
func PasswordNEQ(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldNEQ(FieldPassword, v))
}
// PasswordIn applies the In predicate on the "password" field.
func PasswordIn(vs ...string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldIn(FieldPassword, vs...))
}
// PasswordNotIn applies the NotIn predicate on the "password" field.
func PasswordNotIn(vs ...string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldNotIn(FieldPassword, vs...))
}
// PasswordGT applies the GT predicate on the "password" field.
func PasswordGT(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldGT(FieldPassword, v))
}
// PasswordGTE applies the GTE predicate on the "password" field.
func PasswordGTE(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldGTE(FieldPassword, v))
}
// PasswordLT applies the LT predicate on the "password" field.
func PasswordLT(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldLT(FieldPassword, v))
}
// PasswordLTE applies the LTE predicate on the "password" field.
func PasswordLTE(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldLTE(FieldPassword, v))
}
// PasswordContains applies the Contains predicate on the "password" field.
func PasswordContains(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldContains(FieldPassword, v))
}
// PasswordHasPrefix applies the HasPrefix predicate on the "password" field.
func PasswordHasPrefix(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldHasPrefix(FieldPassword, v))
}
// PasswordHasSuffix applies the HasSuffix predicate on the "password" field.
func PasswordHasSuffix(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldHasSuffix(FieldPassword, v))
}
// PasswordEqualFold applies the EqualFold predicate on the "password" field.
func PasswordEqualFold(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldEqualFold(FieldPassword, v))
}
// PasswordContainsFold applies the ContainsFold predicate on the "password" field.
func PasswordContainsFold(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldContainsFold(FieldPassword, v))
}
// OptionsEQ applies the EQ predicate on the "options" field.
func OptionsEQ(v *boolset.BooleanSet) predicate.DavAccount {
return predicate.DavAccount(sql.FieldEQ(FieldOptions, v))
}
// OptionsNEQ applies the NEQ predicate on the "options" field.
func OptionsNEQ(v *boolset.BooleanSet) predicate.DavAccount {
return predicate.DavAccount(sql.FieldNEQ(FieldOptions, v))
}
// OptionsIn applies the In predicate on the "options" field.
func OptionsIn(vs ...*boolset.BooleanSet) predicate.DavAccount {
return predicate.DavAccount(sql.FieldIn(FieldOptions, vs...))
}
// OptionsNotIn applies the NotIn predicate on the "options" field.
func OptionsNotIn(vs ...*boolset.BooleanSet) predicate.DavAccount {
return predicate.DavAccount(sql.FieldNotIn(FieldOptions, vs...))
}
// OptionsGT applies the GT predicate on the "options" field.
func OptionsGT(v *boolset.BooleanSet) predicate.DavAccount {
return predicate.DavAccount(sql.FieldGT(FieldOptions, v))
}
// OptionsGTE applies the GTE predicate on the "options" field.
func OptionsGTE(v *boolset.BooleanSet) predicate.DavAccount {
return predicate.DavAccount(sql.FieldGTE(FieldOptions, v))
}
// OptionsLT applies the LT predicate on the "options" field.
func OptionsLT(v *boolset.BooleanSet) predicate.DavAccount {
return predicate.DavAccount(sql.FieldLT(FieldOptions, v))
}
// OptionsLTE applies the LTE predicate on the "options" field.
func OptionsLTE(v *boolset.BooleanSet) predicate.DavAccount {
return predicate.DavAccount(sql.FieldLTE(FieldOptions, v))
}
// PropsIsNil applies the IsNil predicate on the "props" field.
func PropsIsNil() predicate.DavAccount {
return predicate.DavAccount(sql.FieldIsNull(FieldProps))
}
// PropsNotNil applies the NotNil predicate on the "props" field.
func PropsNotNil() predicate.DavAccount {
return predicate.DavAccount(sql.FieldNotNull(FieldProps))
}
// OwnerIDEQ applies the EQ predicate on the "owner_id" field.
func OwnerIDEQ(v int) predicate.DavAccount {
return predicate.DavAccount(sql.FieldEQ(FieldOwnerID, v))
}
// OwnerIDNEQ applies the NEQ predicate on the "owner_id" field.
func OwnerIDNEQ(v int) predicate.DavAccount {
return predicate.DavAccount(sql.FieldNEQ(FieldOwnerID, v))
}
// OwnerIDIn applies the In predicate on the "owner_id" field.
func OwnerIDIn(vs ...int) predicate.DavAccount {
return predicate.DavAccount(sql.FieldIn(FieldOwnerID, vs...))
}
// OwnerIDNotIn applies the NotIn predicate on the "owner_id" field.
func OwnerIDNotIn(vs ...int) predicate.DavAccount {
return predicate.DavAccount(sql.FieldNotIn(FieldOwnerID, vs...))
}
// HasOwner applies the HasEdge predicate on the "owner" edge.
func HasOwner() predicate.DavAccount {
return predicate.DavAccount(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn),
)
sqlgraph.HasNeighbors(s, step)
})
}
// HasOwnerWith applies the HasEdge predicate on the "owner" edge with a given conditions (other predicates).
func HasOwnerWith(preds ...predicate.User) predicate.DavAccount {
return predicate.DavAccount(func(s *sql.Selector) {
step := newOwnerStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
}
})
})
}
// And groups predicates with the AND operator between them.
func And(predicates ...predicate.DavAccount) predicate.DavAccount {
return predicate.DavAccount(sql.AndPredicates(predicates...))
}
// Or groups predicates with the OR operator between them.
func Or(predicates ...predicate.DavAccount) predicate.DavAccount {
return predicate.DavAccount(sql.OrPredicates(predicates...))
}
// Not applies the not operator on the given predicate.
func Not(p predicate.DavAccount) predicate.DavAccount {
return predicate.DavAccount(sql.NotPredicates(p))
}

@ -0,0 +1,968 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"context"
"errors"
"fmt"
"time"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/cloudreve/Cloudreve/v4/ent/davaccount"
"github.com/cloudreve/Cloudreve/v4/ent/user"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
)
// DavAccountCreate is the builder for creating a DavAccount entity.
type DavAccountCreate struct {
config
mutation *DavAccountMutation
hooks []Hook
conflict []sql.ConflictOption
}
// SetCreatedAt sets the "created_at" field.
func (dac *DavAccountCreate) SetCreatedAt(t time.Time) *DavAccountCreate {
dac.mutation.SetCreatedAt(t)
return dac
}
// SetNillableCreatedAt sets the "created_at" field if the given value is not nil.
func (dac *DavAccountCreate) SetNillableCreatedAt(t *time.Time) *DavAccountCreate {
if t != nil {
dac.SetCreatedAt(*t)
}
return dac
}
// SetUpdatedAt sets the "updated_at" field.
func (dac *DavAccountCreate) SetUpdatedAt(t time.Time) *DavAccountCreate {
dac.mutation.SetUpdatedAt(t)
return dac
}
// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil.
func (dac *DavAccountCreate) SetNillableUpdatedAt(t *time.Time) *DavAccountCreate {
if t != nil {
dac.SetUpdatedAt(*t)
}
return dac
}
// SetDeletedAt sets the "deleted_at" field.
func (dac *DavAccountCreate) SetDeletedAt(t time.Time) *DavAccountCreate {
dac.mutation.SetDeletedAt(t)
return dac
}
// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil.
func (dac *DavAccountCreate) SetNillableDeletedAt(t *time.Time) *DavAccountCreate {
if t != nil {
dac.SetDeletedAt(*t)
}
return dac
}
// SetName sets the "name" field.
func (dac *DavAccountCreate) SetName(s string) *DavAccountCreate {
dac.mutation.SetName(s)
return dac
}
// SetURI sets the "uri" field.
func (dac *DavAccountCreate) SetURI(s string) *DavAccountCreate {
dac.mutation.SetURI(s)
return dac
}
// SetPassword sets the "password" field.
func (dac *DavAccountCreate) SetPassword(s string) *DavAccountCreate {
dac.mutation.SetPassword(s)
return dac
}
// SetOptions sets the "options" field.
func (dac *DavAccountCreate) SetOptions(bs *boolset.BooleanSet) *DavAccountCreate {
dac.mutation.SetOptions(bs)
return dac
}
// SetProps sets the "props" field.
func (dac *DavAccountCreate) SetProps(tap *types.DavAccountProps) *DavAccountCreate {
dac.mutation.SetProps(tap)
return dac
}
// SetOwnerID sets the "owner_id" field.
func (dac *DavAccountCreate) SetOwnerID(i int) *DavAccountCreate {
dac.mutation.SetOwnerID(i)
return dac
}
// SetOwner sets the "owner" edge to the User entity.
func (dac *DavAccountCreate) SetOwner(u *User) *DavAccountCreate {
return dac.SetOwnerID(u.ID)
}
// Mutation returns the DavAccountMutation object of the builder.
func (dac *DavAccountCreate) Mutation() *DavAccountMutation {
return dac.mutation
}
// Save creates the DavAccount in the database.
func (dac *DavAccountCreate) Save(ctx context.Context) (*DavAccount, error) {
if err := dac.defaults(); err != nil {
return nil, err
}
return withHooks(ctx, dac.sqlSave, dac.mutation, dac.hooks)
}
// SaveX calls Save and panics if Save returns an error.
func (dac *DavAccountCreate) SaveX(ctx context.Context) *DavAccount {
v, err := dac.Save(ctx)
if err != nil {
panic(err)
}
return v
}
// Exec executes the query.
func (dac *DavAccountCreate) Exec(ctx context.Context) error {
_, err := dac.Save(ctx)
return err
}
// ExecX is like Exec, but panics if an error occurs.
func (dac *DavAccountCreate) ExecX(ctx context.Context) {
if err := dac.Exec(ctx); err != nil {
panic(err)
}
}
// defaults sets the default values of the builder before save.
func (dac *DavAccountCreate) defaults() error {
if _, ok := dac.mutation.CreatedAt(); !ok {
if davaccount.DefaultCreatedAt == nil {
return fmt.Errorf("ent: uninitialized davaccount.DefaultCreatedAt (forgotten import ent/runtime?)")
}
v := davaccount.DefaultCreatedAt()
dac.mutation.SetCreatedAt(v)
}
if _, ok := dac.mutation.UpdatedAt(); !ok {
if davaccount.DefaultUpdatedAt == nil {
return fmt.Errorf("ent: uninitialized davaccount.DefaultUpdatedAt (forgotten import ent/runtime?)")
}
v := davaccount.DefaultUpdatedAt()
dac.mutation.SetUpdatedAt(v)
}
return nil
}
// check runs all checks and user-defined validators on the builder.
func (dac *DavAccountCreate) check() error {
if _, ok := dac.mutation.CreatedAt(); !ok {
return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "DavAccount.created_at"`)}
}
if _, ok := dac.mutation.UpdatedAt(); !ok {
return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "DavAccount.updated_at"`)}
}
if _, ok := dac.mutation.Name(); !ok {
return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "DavAccount.name"`)}
}
if _, ok := dac.mutation.URI(); !ok {
return &ValidationError{Name: "uri", err: errors.New(`ent: missing required field "DavAccount.uri"`)}
}
if _, ok := dac.mutation.Password(); !ok {
return &ValidationError{Name: "password", err: errors.New(`ent: missing required field "DavAccount.password"`)}
}
if _, ok := dac.mutation.Options(); !ok {
return &ValidationError{Name: "options", err: errors.New(`ent: missing required field "DavAccount.options"`)}
}
if _, ok := dac.mutation.OwnerID(); !ok {
return &ValidationError{Name: "owner_id", err: errors.New(`ent: missing required field "DavAccount.owner_id"`)}
}
if _, ok := dac.mutation.OwnerID(); !ok {
return &ValidationError{Name: "owner", err: errors.New(`ent: missing required edge "DavAccount.owner"`)}
}
return nil
}
func (dac *DavAccountCreate) sqlSave(ctx context.Context) (*DavAccount, error) {
if err := dac.check(); err != nil {
return nil, err
}
_node, _spec := dac.createSpec()
if err := sqlgraph.CreateNode(ctx, dac.driver, _spec); err != nil {
if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
return nil, err
}
id := _spec.ID.Value.(int64)
_node.ID = int(id)
dac.mutation.id = &_node.ID
dac.mutation.done = true
return _node, nil
}
func (dac *DavAccountCreate) createSpec() (*DavAccount, *sqlgraph.CreateSpec) {
var (
_node = &DavAccount{config: dac.config}
_spec = sqlgraph.NewCreateSpec(davaccount.Table, sqlgraph.NewFieldSpec(davaccount.FieldID, field.TypeInt))
)
if id, ok := dac.mutation.ID(); ok {
_node.ID = id
id64 := int64(id)
_spec.ID.Value = id64
}
_spec.OnConflict = dac.conflict
if value, ok := dac.mutation.CreatedAt(); ok {
_spec.SetField(davaccount.FieldCreatedAt, field.TypeTime, value)
_node.CreatedAt = value
}
if value, ok := dac.mutation.UpdatedAt(); ok {
_spec.SetField(davaccount.FieldUpdatedAt, field.TypeTime, value)
_node.UpdatedAt = value
}
if value, ok := dac.mutation.DeletedAt(); ok {
_spec.SetField(davaccount.FieldDeletedAt, field.TypeTime, value)
_node.DeletedAt = &value
}
if value, ok := dac.mutation.Name(); ok {
_spec.SetField(davaccount.FieldName, field.TypeString, value)
_node.Name = value
}
if value, ok := dac.mutation.URI(); ok {
_spec.SetField(davaccount.FieldURI, field.TypeString, value)
_node.URI = value
}
if value, ok := dac.mutation.Password(); ok {
_spec.SetField(davaccount.FieldPassword, field.TypeString, value)
_node.Password = value
}
if value, ok := dac.mutation.Options(); ok {
_spec.SetField(davaccount.FieldOptions, field.TypeBytes, value)
_node.Options = value
}
if value, ok := dac.mutation.Props(); ok {
_spec.SetField(davaccount.FieldProps, field.TypeJSON, value)
_node.Props = value
}
if nodes := dac.mutation.OwnerIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
Inverse: true,
Table: davaccount.OwnerTable,
Columns: []string{davaccount.OwnerColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_node.OwnerID = nodes[0]
_spec.Edges = append(_spec.Edges, edge)
}
return _node, _spec
}
// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause
// of the `INSERT` statement. For example:
//
// client.DavAccount.Create().
// SetCreatedAt(v).
// OnConflict(
// // Update the row with the new values
// // the was proposed for insertion.
// sql.ResolveWithNewValues(),
// ).
// // Override some of the fields with custom
// // update values.
// Update(func(u *ent.DavAccountUpsert) {
// SetCreatedAt(v+v).
// }).
// Exec(ctx)
func (dac *DavAccountCreate) OnConflict(opts ...sql.ConflictOption) *DavAccountUpsertOne {
dac.conflict = opts
return &DavAccountUpsertOne{
create: dac,
}
}
// OnConflictColumns calls `OnConflict` and configures the columns
// as conflict target. Using this option is equivalent to using:
//
// client.DavAccount.Create().
// OnConflict(sql.ConflictColumns(columns...)).
// Exec(ctx)
func (dac *DavAccountCreate) OnConflictColumns(columns ...string) *DavAccountUpsertOne {
dac.conflict = append(dac.conflict, sql.ConflictColumns(columns...))
return &DavAccountUpsertOne{
create: dac,
}
}
type (
// DavAccountUpsertOne is the builder for "upsert"-ing
// one DavAccount node.
DavAccountUpsertOne struct {
create *DavAccountCreate
}
// DavAccountUpsert is the "OnConflict" setter.
DavAccountUpsert struct {
*sql.UpdateSet
}
)
// SetUpdatedAt sets the "updated_at" field.
func (u *DavAccountUpsert) SetUpdatedAt(v time.Time) *DavAccountUpsert {
u.Set(davaccount.FieldUpdatedAt, v)
return u
}
// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create.
func (u *DavAccountUpsert) UpdateUpdatedAt() *DavAccountUpsert {
u.SetExcluded(davaccount.FieldUpdatedAt)
return u
}
// SetDeletedAt sets the "deleted_at" field.
func (u *DavAccountUpsert) SetDeletedAt(v time.Time) *DavAccountUpsert {
u.Set(davaccount.FieldDeletedAt, v)
return u
}
// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create.
func (u *DavAccountUpsert) UpdateDeletedAt() *DavAccountUpsert {
u.SetExcluded(davaccount.FieldDeletedAt)
return u
}
// ClearDeletedAt clears the value of the "deleted_at" field.
func (u *DavAccountUpsert) ClearDeletedAt() *DavAccountUpsert {
u.SetNull(davaccount.FieldDeletedAt)
return u
}
// SetName sets the "name" field.
func (u *DavAccountUpsert) SetName(v string) *DavAccountUpsert {
u.Set(davaccount.FieldName, v)
return u
}
// UpdateName sets the "name" field to the value that was provided on create.
func (u *DavAccountUpsert) UpdateName() *DavAccountUpsert {
u.SetExcluded(davaccount.FieldName)
return u
}
// SetURI sets the "uri" field.
func (u *DavAccountUpsert) SetURI(v string) *DavAccountUpsert {
u.Set(davaccount.FieldURI, v)
return u
}
// UpdateURI sets the "uri" field to the value that was provided on create.
func (u *DavAccountUpsert) UpdateURI() *DavAccountUpsert {
u.SetExcluded(davaccount.FieldURI)
return u
}
// SetPassword sets the "password" field.
func (u *DavAccountUpsert) SetPassword(v string) *DavAccountUpsert {
u.Set(davaccount.FieldPassword, v)
return u
}
// UpdatePassword sets the "password" field to the value that was provided on create.
func (u *DavAccountUpsert) UpdatePassword() *DavAccountUpsert {
u.SetExcluded(davaccount.FieldPassword)
return u
}
// SetOptions sets the "options" field.
func (u *DavAccountUpsert) SetOptions(v *boolset.BooleanSet) *DavAccountUpsert {
u.Set(davaccount.FieldOptions, v)
return u
}
// UpdateOptions sets the "options" field to the value that was provided on create.
func (u *DavAccountUpsert) UpdateOptions() *DavAccountUpsert {
u.SetExcluded(davaccount.FieldOptions)
return u
}
// SetProps sets the "props" field.
func (u *DavAccountUpsert) SetProps(v *types.DavAccountProps) *DavAccountUpsert {
u.Set(davaccount.FieldProps, v)
return u
}
// UpdateProps sets the "props" field to the value that was provided on create.
func (u *DavAccountUpsert) UpdateProps() *DavAccountUpsert {
u.SetExcluded(davaccount.FieldProps)
return u
}
// ClearProps clears the value of the "props" field.
func (u *DavAccountUpsert) ClearProps() *DavAccountUpsert {
u.SetNull(davaccount.FieldProps)
return u
}
// SetOwnerID sets the "owner_id" field.
func (u *DavAccountUpsert) SetOwnerID(v int) *DavAccountUpsert {
u.Set(davaccount.FieldOwnerID, v)
return u
}
// UpdateOwnerID sets the "owner_id" field to the value that was provided on create.
func (u *DavAccountUpsert) UpdateOwnerID() *DavAccountUpsert {
u.SetExcluded(davaccount.FieldOwnerID)
return u
}
// UpdateNewValues updates the mutable fields using the new values that were set on create.
// Using this option is equivalent to using:
//
// client.DavAccount.Create().
// OnConflict(
// sql.ResolveWithNewValues(),
// ).
// Exec(ctx)
func (u *DavAccountUpsertOne) UpdateNewValues() *DavAccountUpsertOne {
u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues())
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) {
if _, exists := u.create.mutation.CreatedAt(); exists {
s.SetIgnore(davaccount.FieldCreatedAt)
}
}))
return u
}
// Ignore sets each column to itself in case of conflict.
// Using this option is equivalent to using:
//
// client.DavAccount.Create().
// OnConflict(sql.ResolveWithIgnore()).
// Exec(ctx)
func (u *DavAccountUpsertOne) Ignore() *DavAccountUpsertOne {
u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore())
return u
}
// DoNothing configures the conflict_action to `DO NOTHING`.
// Supported only by SQLite and PostgreSQL.
func (u *DavAccountUpsertOne) DoNothing() *DavAccountUpsertOne {
u.create.conflict = append(u.create.conflict, sql.DoNothing())
return u
}
// Update allows overriding fields `UPDATE` values. See the DavAccountCreate.OnConflict
// documentation for more info.
func (u *DavAccountUpsertOne) Update(set func(*DavAccountUpsert)) *DavAccountUpsertOne {
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) {
set(&DavAccountUpsert{UpdateSet: update})
}))
return u
}
// SetUpdatedAt sets the "updated_at" field.
func (u *DavAccountUpsertOne) SetUpdatedAt(v time.Time) *DavAccountUpsertOne {
return u.Update(func(s *DavAccountUpsert) {
s.SetUpdatedAt(v)
})
}
// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create.
func (u *DavAccountUpsertOne) UpdateUpdatedAt() *DavAccountUpsertOne {
return u.Update(func(s *DavAccountUpsert) {
s.UpdateUpdatedAt()
})
}
// SetDeletedAt sets the "deleted_at" field.
func (u *DavAccountUpsertOne) SetDeletedAt(v time.Time) *DavAccountUpsertOne {
return u.Update(func(s *DavAccountUpsert) {
s.SetDeletedAt(v)
})
}
// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create.
func (u *DavAccountUpsertOne) UpdateDeletedAt() *DavAccountUpsertOne {
return u.Update(func(s *DavAccountUpsert) {
s.UpdateDeletedAt()
})
}
// ClearDeletedAt clears the value of the "deleted_at" field.
func (u *DavAccountUpsertOne) ClearDeletedAt() *DavAccountUpsertOne {
return u.Update(func(s *DavAccountUpsert) {
s.ClearDeletedAt()
})
}
// SetName sets the "name" field.
func (u *DavAccountUpsertOne) SetName(v string) *DavAccountUpsertOne {
return u.Update(func(s *DavAccountUpsert) {
s.SetName(v)
})
}
// UpdateName sets the "name" field to the value that was provided on create.
func (u *DavAccountUpsertOne) UpdateName() *DavAccountUpsertOne {
return u.Update(func(s *DavAccountUpsert) {
s.UpdateName()
})
}
// SetURI sets the "uri" field.
func (u *DavAccountUpsertOne) SetURI(v string) *DavAccountUpsertOne {
return u.Update(func(s *DavAccountUpsert) {
s.SetURI(v)
})
}
// UpdateURI sets the "uri" field to the value that was provided on create.
func (u *DavAccountUpsertOne) UpdateURI() *DavAccountUpsertOne {
return u.Update(func(s *DavAccountUpsert) {
s.UpdateURI()
})
}
// SetPassword sets the "password" field.
func (u *DavAccountUpsertOne) SetPassword(v string) *DavAccountUpsertOne {
return u.Update(func(s *DavAccountUpsert) {
s.SetPassword(v)
})
}
// UpdatePassword sets the "password" field to the value that was provided on create.
func (u *DavAccountUpsertOne) UpdatePassword() *DavAccountUpsertOne {
return u.Update(func(s *DavAccountUpsert) {
s.UpdatePassword()
})
}
// SetOptions sets the "options" field.
func (u *DavAccountUpsertOne) SetOptions(v *boolset.BooleanSet) *DavAccountUpsertOne {
return u.Update(func(s *DavAccountUpsert) {
s.SetOptions(v)
})
}
// UpdateOptions sets the "options" field to the value that was provided on create.
func (u *DavAccountUpsertOne) UpdateOptions() *DavAccountUpsertOne {
return u.Update(func(s *DavAccountUpsert) {
s.UpdateOptions()
})
}
// SetProps sets the "props" field.
func (u *DavAccountUpsertOne) SetProps(v *types.DavAccountProps) *DavAccountUpsertOne {
return u.Update(func(s *DavAccountUpsert) {
s.SetProps(v)
})
}
// UpdateProps sets the "props" field to the value that was provided on create.
func (u *DavAccountUpsertOne) UpdateProps() *DavAccountUpsertOne {
return u.Update(func(s *DavAccountUpsert) {
s.UpdateProps()
})
}
// ClearProps clears the value of the "props" field.
func (u *DavAccountUpsertOne) ClearProps() *DavAccountUpsertOne {
return u.Update(func(s *DavAccountUpsert) {
s.ClearProps()
})
}
// SetOwnerID sets the "owner_id" field.
func (u *DavAccountUpsertOne) SetOwnerID(v int) *DavAccountUpsertOne {
return u.Update(func(s *DavAccountUpsert) {
s.SetOwnerID(v)
})
}
// UpdateOwnerID sets the "owner_id" field to the value that was provided on create.
func (u *DavAccountUpsertOne) UpdateOwnerID() *DavAccountUpsertOne {
return u.Update(func(s *DavAccountUpsert) {
s.UpdateOwnerID()
})
}
// Exec executes the query.
func (u *DavAccountUpsertOne) Exec(ctx context.Context) error {
if len(u.create.conflict) == 0 {
return errors.New("ent: missing options for DavAccountCreate.OnConflict")
}
return u.create.Exec(ctx)
}
// ExecX is like Exec, but panics if an error occurs.
func (u *DavAccountUpsertOne) ExecX(ctx context.Context) {
if err := u.create.Exec(ctx); err != nil {
panic(err)
}
}
// Exec executes the UPSERT query and returns the inserted/updated ID.
func (u *DavAccountUpsertOne) ID(ctx context.Context) (id int, err error) {
node, err := u.create.Save(ctx)
if err != nil {
return id, err
}
return node.ID, nil
}
// IDX is like ID, but panics if an error occurs.
func (u *DavAccountUpsertOne) IDX(ctx context.Context) int {
id, err := u.ID(ctx)
if err != nil {
panic(err)
}
return id
}
func (m *DavAccountCreate) SetRawID(t int) *DavAccountCreate {
m.mutation.SetRawID(t)
return m
}
// DavAccountCreateBulk is the builder for creating many DavAccount entities in bulk.
type DavAccountCreateBulk struct {
config
err error
builders []*DavAccountCreate
conflict []sql.ConflictOption
}
// Save creates the DavAccount entities in the database.
func (dacb *DavAccountCreateBulk) Save(ctx context.Context) ([]*DavAccount, error) {
if dacb.err != nil {
return nil, dacb.err
}
specs := make([]*sqlgraph.CreateSpec, len(dacb.builders))
nodes := make([]*DavAccount, len(dacb.builders))
mutators := make([]Mutator, len(dacb.builders))
for i := range dacb.builders {
func(i int, root context.Context) {
builder := dacb.builders[i]
builder.defaults()
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*DavAccountMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
if err := builder.check(); err != nil {
return nil, err
}
builder.mutation = mutation
var err error
nodes[i], specs[i] = builder.createSpec()
if i < len(mutators)-1 {
_, err = mutators[i+1].Mutate(root, dacb.builders[i+1].mutation)
} else {
spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
spec.OnConflict = dacb.conflict
// Invoke the actual operation on the latest mutation in the chain.
if err = sqlgraph.BatchCreate(ctx, dacb.driver, spec); err != nil {
if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
}
}
if err != nil {
return nil, err
}
mutation.id = &nodes[i].ID
if specs[i].ID.Value != nil {
id := specs[i].ID.Value.(int64)
nodes[i].ID = int(id)
}
mutation.done = true
return nodes[i], nil
})
for i := len(builder.hooks) - 1; i >= 0; i-- {
mut = builder.hooks[i](mut)
}
mutators[i] = mut
}(i, ctx)
}
if len(mutators) > 0 {
if _, err := mutators[0].Mutate(ctx, dacb.builders[0].mutation); err != nil {
return nil, err
}
}
return nodes, nil
}
// SaveX is like Save, but panics if an error occurs.
func (dacb *DavAccountCreateBulk) SaveX(ctx context.Context) []*DavAccount {
v, err := dacb.Save(ctx)
if err != nil {
panic(err)
}
return v
}
// Exec executes the query.
func (dacb *DavAccountCreateBulk) Exec(ctx context.Context) error {
_, err := dacb.Save(ctx)
return err
}
// ExecX is like Exec, but panics if an error occurs.
func (dacb *DavAccountCreateBulk) ExecX(ctx context.Context) {
if err := dacb.Exec(ctx); err != nil {
panic(err)
}
}
// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause
// of the `INSERT` statement. For example:
//
// client.DavAccount.CreateBulk(builders...).
// OnConflict(
// // Update the row with the new values
// // the was proposed for insertion.
// sql.ResolveWithNewValues(),
// ).
// // Override some of the fields with custom
// // update values.
// Update(func(u *ent.DavAccountUpsert) {
// SetCreatedAt(v+v).
// }).
// Exec(ctx)
func (dacb *DavAccountCreateBulk) OnConflict(opts ...sql.ConflictOption) *DavAccountUpsertBulk {
dacb.conflict = opts
return &DavAccountUpsertBulk{
create: dacb,
}
}
// OnConflictColumns calls `OnConflict` and configures the columns
// as conflict target. Using this option is equivalent to using:
//
// client.DavAccount.Create().
// OnConflict(sql.ConflictColumns(columns...)).
// Exec(ctx)
func (dacb *DavAccountCreateBulk) OnConflictColumns(columns ...string) *DavAccountUpsertBulk {
dacb.conflict = append(dacb.conflict, sql.ConflictColumns(columns...))
return &DavAccountUpsertBulk{
create: dacb,
}
}
// DavAccountUpsertBulk is the builder for "upsert"-ing
// a bulk of DavAccount nodes.
type DavAccountUpsertBulk struct {
create *DavAccountCreateBulk
}
// UpdateNewValues updates the mutable fields using the new values that
// were set on create. Using this option is equivalent to using:
//
// client.DavAccount.Create().
// OnConflict(
// sql.ResolveWithNewValues(),
// ).
// Exec(ctx)
func (u *DavAccountUpsertBulk) UpdateNewValues() *DavAccountUpsertBulk {
u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues())
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) {
for _, b := range u.create.builders {
if _, exists := b.mutation.CreatedAt(); exists {
s.SetIgnore(davaccount.FieldCreatedAt)
}
}
}))
return u
}
// Ignore sets each column to itself in case of conflict.
// Using this option is equivalent to using:
//
// client.DavAccount.Create().
// OnConflict(sql.ResolveWithIgnore()).
// Exec(ctx)
func (u *DavAccountUpsertBulk) Ignore() *DavAccountUpsertBulk {
u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore())
return u
}
// DoNothing configures the conflict_action to `DO NOTHING`.
// Supported only by SQLite and PostgreSQL.
func (u *DavAccountUpsertBulk) DoNothing() *DavAccountUpsertBulk {
u.create.conflict = append(u.create.conflict, sql.DoNothing())
return u
}
// Update allows overriding fields `UPDATE` values. See the DavAccountCreateBulk.OnConflict
// documentation for more info.
func (u *DavAccountUpsertBulk) Update(set func(*DavAccountUpsert)) *DavAccountUpsertBulk {
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) {
set(&DavAccountUpsert{UpdateSet: update})
}))
return u
}
// SetUpdatedAt sets the "updated_at" field.
func (u *DavAccountUpsertBulk) SetUpdatedAt(v time.Time) *DavAccountUpsertBulk {
return u.Update(func(s *DavAccountUpsert) {
s.SetUpdatedAt(v)
})
}
// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create.
func (u *DavAccountUpsertBulk) UpdateUpdatedAt() *DavAccountUpsertBulk {
return u.Update(func(s *DavAccountUpsert) {
s.UpdateUpdatedAt()
})
}
// SetDeletedAt sets the "deleted_at" field.
func (u *DavAccountUpsertBulk) SetDeletedAt(v time.Time) *DavAccountUpsertBulk {
return u.Update(func(s *DavAccountUpsert) {
s.SetDeletedAt(v)
})
}
// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create.
func (u *DavAccountUpsertBulk) UpdateDeletedAt() *DavAccountUpsertBulk {
return u.Update(func(s *DavAccountUpsert) {
s.UpdateDeletedAt()
})
}
// ClearDeletedAt clears the value of the "deleted_at" field.
func (u *DavAccountUpsertBulk) ClearDeletedAt() *DavAccountUpsertBulk {
return u.Update(func(s *DavAccountUpsert) {
s.ClearDeletedAt()
})
}
// SetName sets the "name" field.
func (u *DavAccountUpsertBulk) SetName(v string) *DavAccountUpsertBulk {
return u.Update(func(s *DavAccountUpsert) {
s.SetName(v)
})
}
// UpdateName sets the "name" field to the value that was provided on create.
func (u *DavAccountUpsertBulk) UpdateName() *DavAccountUpsertBulk {
return u.Update(func(s *DavAccountUpsert) {
s.UpdateName()
})
}
// SetURI sets the "uri" field.
func (u *DavAccountUpsertBulk) SetURI(v string) *DavAccountUpsertBulk {
return u.Update(func(s *DavAccountUpsert) {
s.SetURI(v)
})
}
// UpdateURI sets the "uri" field to the value that was provided on create.
func (u *DavAccountUpsertBulk) UpdateURI() *DavAccountUpsertBulk {
return u.Update(func(s *DavAccountUpsert) {
s.UpdateURI()
})
}
// SetPassword sets the "password" field.
func (u *DavAccountUpsertBulk) SetPassword(v string) *DavAccountUpsertBulk {
return u.Update(func(s *DavAccountUpsert) {
s.SetPassword(v)
})
}
// UpdatePassword sets the "password" field to the value that was provided on create.
func (u *DavAccountUpsertBulk) UpdatePassword() *DavAccountUpsertBulk {
return u.Update(func(s *DavAccountUpsert) {
s.UpdatePassword()
})
}
// SetOptions sets the "options" field.
func (u *DavAccountUpsertBulk) SetOptions(v *boolset.BooleanSet) *DavAccountUpsertBulk {
return u.Update(func(s *DavAccountUpsert) {
s.SetOptions(v)
})
}
// UpdateOptions sets the "options" field to the value that was provided on create.
func (u *DavAccountUpsertBulk) UpdateOptions() *DavAccountUpsertBulk {
return u.Update(func(s *DavAccountUpsert) {
s.UpdateOptions()
})
}
// SetProps sets the "props" field.
func (u *DavAccountUpsertBulk) SetProps(v *types.DavAccountProps) *DavAccountUpsertBulk {
return u.Update(func(s *DavAccountUpsert) {
s.SetProps(v)
})
}
// UpdateProps sets the "props" field to the value that was provided on create.
func (u *DavAccountUpsertBulk) UpdateProps() *DavAccountUpsertBulk {
return u.Update(func(s *DavAccountUpsert) {
s.UpdateProps()
})
}
// ClearProps clears the value of the "props" field.
func (u *DavAccountUpsertBulk) ClearProps() *DavAccountUpsertBulk {
return u.Update(func(s *DavAccountUpsert) {
s.ClearProps()
})
}
// SetOwnerID sets the "owner_id" field.
func (u *DavAccountUpsertBulk) SetOwnerID(v int) *DavAccountUpsertBulk {
return u.Update(func(s *DavAccountUpsert) {
s.SetOwnerID(v)
})
}
// UpdateOwnerID sets the "owner_id" field to the value that was provided on create.
func (u *DavAccountUpsertBulk) UpdateOwnerID() *DavAccountUpsertBulk {
return u.Update(func(s *DavAccountUpsert) {
s.UpdateOwnerID()
})
}
// Exec executes the query.
func (u *DavAccountUpsertBulk) Exec(ctx context.Context) error {
if u.create.err != nil {
return u.create.err
}
for i, b := range u.create.builders {
if len(b.conflict) != 0 {
return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the DavAccountCreateBulk instead", i)
}
}
if len(u.create.conflict) == 0 {
return errors.New("ent: missing options for DavAccountCreateBulk.OnConflict")
}
return u.create.Exec(ctx)
}
// ExecX is like Exec, but panics if an error occurs.
func (u *DavAccountUpsertBulk) ExecX(ctx context.Context) {
if err := u.create.Exec(ctx); err != nil {
panic(err)
}
}

@ -0,0 +1,88 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"context"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/cloudreve/Cloudreve/v4/ent/davaccount"
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
)
// DavAccountDelete is the builder for deleting a DavAccount entity.
type DavAccountDelete struct {
config
hooks []Hook
mutation *DavAccountMutation
}
// Where appends a list predicates to the DavAccountDelete builder.
func (dad *DavAccountDelete) Where(ps ...predicate.DavAccount) *DavAccountDelete {
dad.mutation.Where(ps...)
return dad
}
// Exec executes the deletion query and returns how many vertices were deleted.
func (dad *DavAccountDelete) Exec(ctx context.Context) (int, error) {
return withHooks(ctx, dad.sqlExec, dad.mutation, dad.hooks)
}
// ExecX is like Exec, but panics if an error occurs.
func (dad *DavAccountDelete) ExecX(ctx context.Context) int {
n, err := dad.Exec(ctx)
if err != nil {
panic(err)
}
return n
}
func (dad *DavAccountDelete) sqlExec(ctx context.Context) (int, error) {
_spec := sqlgraph.NewDeleteSpec(davaccount.Table, sqlgraph.NewFieldSpec(davaccount.FieldID, field.TypeInt))
if ps := dad.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
affected, err := sqlgraph.DeleteNodes(ctx, dad.driver, _spec)
if err != nil && sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
dad.mutation.done = true
return affected, err
}
// DavAccountDeleteOne is the builder for deleting a single DavAccount entity.
type DavAccountDeleteOne struct {
dad *DavAccountDelete
}
// Where appends a list predicates to the DavAccountDelete builder.
func (dado *DavAccountDeleteOne) Where(ps ...predicate.DavAccount) *DavAccountDeleteOne {
dado.dad.mutation.Where(ps...)
return dado
}
// Exec executes the deletion query.
func (dado *DavAccountDeleteOne) Exec(ctx context.Context) error {
n, err := dado.dad.Exec(ctx)
switch {
case err != nil:
return err
case n == 0:
return &NotFoundError{davaccount.Label}
default:
return nil
}
}
// ExecX is like Exec, but panics if an error occurs.
func (dado *DavAccountDeleteOne) ExecX(ctx context.Context) {
if err := dado.Exec(ctx); err != nil {
panic(err)
}
}

@ -0,0 +1,605 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"context"
"fmt"
"math"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/cloudreve/Cloudreve/v4/ent/davaccount"
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
"github.com/cloudreve/Cloudreve/v4/ent/user"
)
// DavAccountQuery is the builder for querying DavAccount entities.
type DavAccountQuery struct {
config
ctx *QueryContext
order []davaccount.OrderOption
inters []Interceptor
predicates []predicate.DavAccount
withOwner *UserQuery
// intermediate query (i.e. traversal path).
sql *sql.Selector
path func(context.Context) (*sql.Selector, error)
}
// Where adds a new predicate for the DavAccountQuery builder.
func (daq *DavAccountQuery) Where(ps ...predicate.DavAccount) *DavAccountQuery {
daq.predicates = append(daq.predicates, ps...)
return daq
}
// Limit the number of records to be returned by this query.
func (daq *DavAccountQuery) Limit(limit int) *DavAccountQuery {
daq.ctx.Limit = &limit
return daq
}
// Offset to start from.
func (daq *DavAccountQuery) Offset(offset int) *DavAccountQuery {
daq.ctx.Offset = &offset
return daq
}
// Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method.
func (daq *DavAccountQuery) Unique(unique bool) *DavAccountQuery {
daq.ctx.Unique = &unique
return daq
}
// Order specifies how the records should be ordered.
func (daq *DavAccountQuery) Order(o ...davaccount.OrderOption) *DavAccountQuery {
daq.order = append(daq.order, o...)
return daq
}
// QueryOwner chains the current query on the "owner" edge.
func (daq *DavAccountQuery) QueryOwner() *UserQuery {
query := (&UserClient{config: daq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := daq.prepareQuery(ctx); err != nil {
return nil, err
}
selector := daq.sqlQuery(ctx)
if err := selector.Err(); err != nil {
return nil, err
}
step := sqlgraph.NewStep(
sqlgraph.From(davaccount.Table, davaccount.FieldID, selector),
sqlgraph.To(user.Table, user.FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, davaccount.OwnerTable, davaccount.OwnerColumn),
)
fromU = sqlgraph.SetNeighbors(daq.driver.Dialect(), step)
return fromU, nil
}
return query
}
// First returns the first DavAccount entity from the query.
// Returns a *NotFoundError when no DavAccount was found.
func (daq *DavAccountQuery) First(ctx context.Context) (*DavAccount, error) {
nodes, err := daq.Limit(1).All(setContextOp(ctx, daq.ctx, "First"))
if err != nil {
return nil, err
}
if len(nodes) == 0 {
return nil, &NotFoundError{davaccount.Label}
}
return nodes[0], nil
}
// FirstX is like First, but panics if an error occurs.
func (daq *DavAccountQuery) FirstX(ctx context.Context) *DavAccount {
node, err := daq.First(ctx)
if err != nil && !IsNotFound(err) {
panic(err)
}
return node
}
// FirstID returns the first DavAccount ID from the query.
// Returns a *NotFoundError when no DavAccount ID was found.
func (daq *DavAccountQuery) FirstID(ctx context.Context) (id int, err error) {
var ids []int
if ids, err = daq.Limit(1).IDs(setContextOp(ctx, daq.ctx, "FirstID")); err != nil {
return
}
if len(ids) == 0 {
err = &NotFoundError{davaccount.Label}
return
}
return ids[0], nil
}
// FirstIDX is like FirstID, but panics if an error occurs.
func (daq *DavAccountQuery) FirstIDX(ctx context.Context) int {
id, err := daq.FirstID(ctx)
if err != nil && !IsNotFound(err) {
panic(err)
}
return id
}
// Only returns a single DavAccount entity found by the query, ensuring it only returns one.
// Returns a *NotSingularError when more than one DavAccount entity is found.
// Returns a *NotFoundError when no DavAccount entities are found.
func (daq *DavAccountQuery) Only(ctx context.Context) (*DavAccount, error) {
nodes, err := daq.Limit(2).All(setContextOp(ctx, daq.ctx, "Only"))
if err != nil {
return nil, err
}
switch len(nodes) {
case 1:
return nodes[0], nil
case 0:
return nil, &NotFoundError{davaccount.Label}
default:
return nil, &NotSingularError{davaccount.Label}
}
}
// OnlyX is like Only, but panics if an error occurs.
func (daq *DavAccountQuery) OnlyX(ctx context.Context) *DavAccount {
node, err := daq.Only(ctx)
if err != nil {
panic(err)
}
return node
}
// OnlyID is like Only, but returns the only DavAccount ID in the query.
// Returns a *NotSingularError when more than one DavAccount ID is found.
// Returns a *NotFoundError when no entities are found.
func (daq *DavAccountQuery) OnlyID(ctx context.Context) (id int, err error) {
var ids []int
if ids, err = daq.Limit(2).IDs(setContextOp(ctx, daq.ctx, "OnlyID")); err != nil {
return
}
switch len(ids) {
case 1:
id = ids[0]
case 0:
err = &NotFoundError{davaccount.Label}
default:
err = &NotSingularError{davaccount.Label}
}
return
}
// OnlyIDX is like OnlyID, but panics if an error occurs.
func (daq *DavAccountQuery) OnlyIDX(ctx context.Context) int {
id, err := daq.OnlyID(ctx)
if err != nil {
panic(err)
}
return id
}
// All executes the query and returns a list of DavAccounts.
func (daq *DavAccountQuery) All(ctx context.Context) ([]*DavAccount, error) {
ctx = setContextOp(ctx, daq.ctx, "All")
if err := daq.prepareQuery(ctx); err != nil {
return nil, err
}
qr := querierAll[[]*DavAccount, *DavAccountQuery]()
return withInterceptors[[]*DavAccount](ctx, daq, qr, daq.inters)
}
// AllX is like All, but panics if an error occurs.
func (daq *DavAccountQuery) AllX(ctx context.Context) []*DavAccount {
nodes, err := daq.All(ctx)
if err != nil {
panic(err)
}
return nodes
}
// IDs executes the query and returns a list of DavAccount IDs.
func (daq *DavAccountQuery) IDs(ctx context.Context) (ids []int, err error) {
if daq.ctx.Unique == nil && daq.path != nil {
daq.Unique(true)
}
ctx = setContextOp(ctx, daq.ctx, "IDs")
if err = daq.Select(davaccount.FieldID).Scan(ctx, &ids); err != nil {
return nil, err
}
return ids, nil
}
// IDsX is like IDs, but panics if an error occurs.
func (daq *DavAccountQuery) IDsX(ctx context.Context) []int {
ids, err := daq.IDs(ctx)
if err != nil {
panic(err)
}
return ids
}
// Count returns the count of the given query.
func (daq *DavAccountQuery) Count(ctx context.Context) (int, error) {
ctx = setContextOp(ctx, daq.ctx, "Count")
if err := daq.prepareQuery(ctx); err != nil {
return 0, err
}
return withInterceptors[int](ctx, daq, querierCount[*DavAccountQuery](), daq.inters)
}
// CountX is like Count, but panics if an error occurs.
func (daq *DavAccountQuery) CountX(ctx context.Context) int {
count, err := daq.Count(ctx)
if err != nil {
panic(err)
}
return count
}
// Exist returns true if the query has elements in the graph.
func (daq *DavAccountQuery) Exist(ctx context.Context) (bool, error) {
ctx = setContextOp(ctx, daq.ctx, "Exist")
switch _, err := daq.FirstID(ctx); {
case IsNotFound(err):
return false, nil
case err != nil:
return false, fmt.Errorf("ent: check existence: %w", err)
default:
return true, nil
}
}
// ExistX is like Exist, but panics if an error occurs.
func (daq *DavAccountQuery) ExistX(ctx context.Context) bool {
exist, err := daq.Exist(ctx)
if err != nil {
panic(err)
}
return exist
}
// Clone returns a duplicate of the DavAccountQuery builder, including all associated steps. It can be
// used to prepare common query builders and use them differently after the clone is made.
func (daq *DavAccountQuery) Clone() *DavAccountQuery {
if daq == nil {
return nil
}
return &DavAccountQuery{
config: daq.config,
ctx: daq.ctx.Clone(),
order: append([]davaccount.OrderOption{}, daq.order...),
inters: append([]Interceptor{}, daq.inters...),
predicates: append([]predicate.DavAccount{}, daq.predicates...),
withOwner: daq.withOwner.Clone(),
// clone intermediate query.
sql: daq.sql.Clone(),
path: daq.path,
}
}
// WithOwner tells the query-builder to eager-load the nodes that are connected to
// the "owner" edge. The optional arguments are used to configure the query builder of the edge.
func (daq *DavAccountQuery) WithOwner(opts ...func(*UserQuery)) *DavAccountQuery {
query := (&UserClient{config: daq.config}).Query()
for _, opt := range opts {
opt(query)
}
daq.withOwner = query
return daq
}
// GroupBy is used to group vertices by one or more fields/columns.
// It is often used with aggregate functions, like: count, max, mean, min, sum.
//
// Example:
//
// var v []struct {
// CreatedAt time.Time `json:"created_at,omitempty"`
// Count int `json:"count,omitempty"`
// }
//
// client.DavAccount.Query().
// GroupBy(davaccount.FieldCreatedAt).
// Aggregate(ent.Count()).
// Scan(ctx, &v)
func (daq *DavAccountQuery) GroupBy(field string, fields ...string) *DavAccountGroupBy {
daq.ctx.Fields = append([]string{field}, fields...)
grbuild := &DavAccountGroupBy{build: daq}
grbuild.flds = &daq.ctx.Fields
grbuild.label = davaccount.Label
grbuild.scan = grbuild.Scan
return grbuild
}
// Select allows the selection one or more fields/columns for the given query,
// instead of selecting all fields in the entity.
//
// Example:
//
// var v []struct {
// CreatedAt time.Time `json:"created_at,omitempty"`
// }
//
// client.DavAccount.Query().
// Select(davaccount.FieldCreatedAt).
// Scan(ctx, &v)
func (daq *DavAccountQuery) Select(fields ...string) *DavAccountSelect {
daq.ctx.Fields = append(daq.ctx.Fields, fields...)
sbuild := &DavAccountSelect{DavAccountQuery: daq}
sbuild.label = davaccount.Label
sbuild.flds, sbuild.scan = &daq.ctx.Fields, sbuild.Scan
return sbuild
}
// Aggregate returns a DavAccountSelect configured with the given aggregations.
func (daq *DavAccountQuery) Aggregate(fns ...AggregateFunc) *DavAccountSelect {
return daq.Select().Aggregate(fns...)
}
func (daq *DavAccountQuery) prepareQuery(ctx context.Context) error {
for _, inter := range daq.inters {
if inter == nil {
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
}
if trv, ok := inter.(Traverser); ok {
if err := trv.Traverse(ctx, daq); err != nil {
return err
}
}
}
for _, f := range daq.ctx.Fields {
if !davaccount.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
}
}
if daq.path != nil {
prev, err := daq.path(ctx)
if err != nil {
return err
}
daq.sql = prev
}
return nil
}
func (daq *DavAccountQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*DavAccount, error) {
var (
nodes = []*DavAccount{}
_spec = daq.querySpec()
loadedTypes = [1]bool{
daq.withOwner != nil,
}
)
_spec.ScanValues = func(columns []string) ([]any, error) {
return (*DavAccount).scanValues(nil, columns)
}
_spec.Assign = func(columns []string, values []any) error {
node := &DavAccount{config: daq.config}
nodes = append(nodes, node)
node.Edges.loadedTypes = loadedTypes
return node.assignValues(columns, values)
}
for i := range hooks {
hooks[i](ctx, _spec)
}
if err := sqlgraph.QueryNodes(ctx, daq.driver, _spec); err != nil {
return nil, err
}
if len(nodes) == 0 {
return nodes, nil
}
if query := daq.withOwner; query != nil {
if err := daq.loadOwner(ctx, query, nodes, nil,
func(n *DavAccount, e *User) { n.Edges.Owner = e }); err != nil {
return nil, err
}
}
return nodes, nil
}
func (daq *DavAccountQuery) loadOwner(ctx context.Context, query *UserQuery, nodes []*DavAccount, init func(*DavAccount), assign func(*DavAccount, *User)) error {
ids := make([]int, 0, len(nodes))
nodeids := make(map[int][]*DavAccount)
for i := range nodes {
fk := nodes[i].OwnerID
if _, ok := nodeids[fk]; !ok {
ids = append(ids, fk)
}
nodeids[fk] = append(nodeids[fk], nodes[i])
}
if len(ids) == 0 {
return nil
}
query.Where(user.IDIn(ids...))
neighbors, err := query.All(ctx)
if err != nil {
return err
}
for _, n := range neighbors {
nodes, ok := nodeids[n.ID]
if !ok {
return fmt.Errorf(`unexpected foreign-key "owner_id" returned %v`, n.ID)
}
for i := range nodes {
assign(nodes[i], n)
}
}
return nil
}
func (daq *DavAccountQuery) sqlCount(ctx context.Context) (int, error) {
_spec := daq.querySpec()
_spec.Node.Columns = daq.ctx.Fields
if len(daq.ctx.Fields) > 0 {
_spec.Unique = daq.ctx.Unique != nil && *daq.ctx.Unique
}
return sqlgraph.CountNodes(ctx, daq.driver, _spec)
}
func (daq *DavAccountQuery) querySpec() *sqlgraph.QuerySpec {
_spec := sqlgraph.NewQuerySpec(davaccount.Table, davaccount.Columns, sqlgraph.NewFieldSpec(davaccount.FieldID, field.TypeInt))
_spec.From = daq.sql
if unique := daq.ctx.Unique; unique != nil {
_spec.Unique = *unique
} else if daq.path != nil {
_spec.Unique = true
}
if fields := daq.ctx.Fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, davaccount.FieldID)
for i := range fields {
if fields[i] != davaccount.FieldID {
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
}
}
if daq.withOwner != nil {
_spec.Node.AddColumnOnce(davaccount.FieldOwnerID)
}
}
if ps := daq.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
if limit := daq.ctx.Limit; limit != nil {
_spec.Limit = *limit
}
if offset := daq.ctx.Offset; offset != nil {
_spec.Offset = *offset
}
if ps := daq.order; len(ps) > 0 {
_spec.Order = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
return _spec
}
func (daq *DavAccountQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(daq.driver.Dialect())
t1 := builder.Table(davaccount.Table)
columns := daq.ctx.Fields
if len(columns) == 0 {
columns = davaccount.Columns
}
selector := builder.Select(t1.Columns(columns...)...).From(t1)
if daq.sql != nil {
selector = daq.sql
selector.Select(selector.Columns(columns...)...)
}
if daq.ctx.Unique != nil && *daq.ctx.Unique {
selector.Distinct()
}
for _, p := range daq.predicates {
p(selector)
}
for _, p := range daq.order {
p(selector)
}
if offset := daq.ctx.Offset; offset != nil {
// limit is mandatory for offset clause. We start
// with default value, and override it below if needed.
selector.Offset(*offset).Limit(math.MaxInt32)
}
if limit := daq.ctx.Limit; limit != nil {
selector.Limit(*limit)
}
return selector
}
// DavAccountGroupBy is the group-by builder for DavAccount entities.
type DavAccountGroupBy struct {
selector
build *DavAccountQuery
}
// Aggregate adds the given aggregation functions to the group-by query.
func (dagb *DavAccountGroupBy) Aggregate(fns ...AggregateFunc) *DavAccountGroupBy {
dagb.fns = append(dagb.fns, fns...)
return dagb
}
// Scan applies the selector query and scans the result into the given value.
func (dagb *DavAccountGroupBy) Scan(ctx context.Context, v any) error {
ctx = setContextOp(ctx, dagb.build.ctx, "GroupBy")
if err := dagb.build.prepareQuery(ctx); err != nil {
return err
}
return scanWithInterceptors[*DavAccountQuery, *DavAccountGroupBy](ctx, dagb.build, dagb, dagb.build.inters, v)
}
func (dagb *DavAccountGroupBy) sqlScan(ctx context.Context, root *DavAccountQuery, v any) error {
selector := root.sqlQuery(ctx).Select()
aggregation := make([]string, 0, len(dagb.fns))
for _, fn := range dagb.fns {
aggregation = append(aggregation, fn(selector))
}
if len(selector.SelectedColumns()) == 0 {
columns := make([]string, 0, len(*dagb.flds)+len(dagb.fns))
for _, f := range *dagb.flds {
columns = append(columns, selector.C(f))
}
columns = append(columns, aggregation...)
selector.Select(columns...)
}
selector.GroupBy(selector.Columns(*dagb.flds...)...)
if err := selector.Err(); err != nil {
return err
}
rows := &sql.Rows{}
query, args := selector.Query()
if err := dagb.build.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}
// DavAccountSelect is the builder for selecting fields of DavAccount entities.
type DavAccountSelect struct {
*DavAccountQuery
selector
}
// Aggregate adds the given aggregation functions to the selector query.
func (das *DavAccountSelect) Aggregate(fns ...AggregateFunc) *DavAccountSelect {
das.fns = append(das.fns, fns...)
return das
}
// Scan applies the selector query and scans the result into the given value.
func (das *DavAccountSelect) Scan(ctx context.Context, v any) error {
ctx = setContextOp(ctx, das.ctx, "Select")
if err := das.prepareQuery(ctx); err != nil {
return err
}
return scanWithInterceptors[*DavAccountQuery, *DavAccountSelect](ctx, das.DavAccountQuery, das, das.inters, v)
}
func (das *DavAccountSelect) sqlScan(ctx context.Context, root *DavAccountQuery, v any) error {
selector := root.sqlQuery(ctx)
aggregation := make([]string, 0, len(das.fns))
for _, fn := range das.fns {
aggregation = append(aggregation, fn(selector))
}
switch n := len(*das.selector.flds); {
case n == 0 && len(aggregation) > 0:
selector.Select(aggregation...)
case n != 0 && len(aggregation) > 0:
selector.AppendSelect(aggregation...)
}
rows := &sql.Rows{}
query, args := selector.Query()
if err := das.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}

@ -0,0 +1,565 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"context"
"errors"
"fmt"
"time"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/cloudreve/Cloudreve/v4/ent/davaccount"
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
"github.com/cloudreve/Cloudreve/v4/ent/user"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
)
// DavAccountUpdate is the builder for updating DavAccount entities.
type DavAccountUpdate struct {
config
hooks []Hook
mutation *DavAccountMutation
}
// Where appends a list predicates to the DavAccountUpdate builder.
func (dau *DavAccountUpdate) Where(ps ...predicate.DavAccount) *DavAccountUpdate {
dau.mutation.Where(ps...)
return dau
}
// SetUpdatedAt sets the "updated_at" field.
func (dau *DavAccountUpdate) SetUpdatedAt(t time.Time) *DavAccountUpdate {
dau.mutation.SetUpdatedAt(t)
return dau
}
// SetDeletedAt sets the "deleted_at" field.
func (dau *DavAccountUpdate) SetDeletedAt(t time.Time) *DavAccountUpdate {
dau.mutation.SetDeletedAt(t)
return dau
}
// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil.
func (dau *DavAccountUpdate) SetNillableDeletedAt(t *time.Time) *DavAccountUpdate {
if t != nil {
dau.SetDeletedAt(*t)
}
return dau
}
// ClearDeletedAt clears the value of the "deleted_at" field.
func (dau *DavAccountUpdate) ClearDeletedAt() *DavAccountUpdate {
dau.mutation.ClearDeletedAt()
return dau
}
// SetName sets the "name" field.
func (dau *DavAccountUpdate) SetName(s string) *DavAccountUpdate {
dau.mutation.SetName(s)
return dau
}
// SetNillableName sets the "name" field if the given value is not nil.
func (dau *DavAccountUpdate) SetNillableName(s *string) *DavAccountUpdate {
if s != nil {
dau.SetName(*s)
}
return dau
}
// SetURI sets the "uri" field.
func (dau *DavAccountUpdate) SetURI(s string) *DavAccountUpdate {
dau.mutation.SetURI(s)
return dau
}
// SetNillableURI sets the "uri" field if the given value is not nil.
func (dau *DavAccountUpdate) SetNillableURI(s *string) *DavAccountUpdate {
if s != nil {
dau.SetURI(*s)
}
return dau
}
// SetPassword sets the "password" field.
func (dau *DavAccountUpdate) SetPassword(s string) *DavAccountUpdate {
dau.mutation.SetPassword(s)
return dau
}
// SetNillablePassword sets the "password" field if the given value is not nil.
func (dau *DavAccountUpdate) SetNillablePassword(s *string) *DavAccountUpdate {
if s != nil {
dau.SetPassword(*s)
}
return dau
}
// SetOptions sets the "options" field.
func (dau *DavAccountUpdate) SetOptions(bs *boolset.BooleanSet) *DavAccountUpdate {
dau.mutation.SetOptions(bs)
return dau
}
// SetProps sets the "props" field.
func (dau *DavAccountUpdate) SetProps(tap *types.DavAccountProps) *DavAccountUpdate {
dau.mutation.SetProps(tap)
return dau
}
// ClearProps clears the value of the "props" field.
func (dau *DavAccountUpdate) ClearProps() *DavAccountUpdate {
dau.mutation.ClearProps()
return dau
}
// SetOwnerID sets the "owner_id" field.
func (dau *DavAccountUpdate) SetOwnerID(i int) *DavAccountUpdate {
dau.mutation.SetOwnerID(i)
return dau
}
// SetNillableOwnerID sets the "owner_id" field if the given value is not nil.
func (dau *DavAccountUpdate) SetNillableOwnerID(i *int) *DavAccountUpdate {
if i != nil {
dau.SetOwnerID(*i)
}
return dau
}
// SetOwner sets the "owner" edge to the User entity.
func (dau *DavAccountUpdate) SetOwner(u *User) *DavAccountUpdate {
return dau.SetOwnerID(u.ID)
}
// Mutation returns the DavAccountMutation object of the builder.
func (dau *DavAccountUpdate) Mutation() *DavAccountMutation {
return dau.mutation
}
// ClearOwner clears the "owner" edge to the User entity.
func (dau *DavAccountUpdate) ClearOwner() *DavAccountUpdate {
dau.mutation.ClearOwner()
return dau
}
// Save executes the query and returns the number of nodes affected by the update operation.
func (dau *DavAccountUpdate) Save(ctx context.Context) (int, error) {
if err := dau.defaults(); err != nil {
return 0, err
}
return withHooks(ctx, dau.sqlSave, dau.mutation, dau.hooks)
}
// SaveX is like Save, but panics if an error occurs.
func (dau *DavAccountUpdate) SaveX(ctx context.Context) int {
affected, err := dau.Save(ctx)
if err != nil {
panic(err)
}
return affected
}
// Exec executes the query.
func (dau *DavAccountUpdate) Exec(ctx context.Context) error {
_, err := dau.Save(ctx)
return err
}
// ExecX is like Exec, but panics if an error occurs.
func (dau *DavAccountUpdate) ExecX(ctx context.Context) {
if err := dau.Exec(ctx); err != nil {
panic(err)
}
}
// defaults sets the default values of the builder before save.
func (dau *DavAccountUpdate) defaults() error {
if _, ok := dau.mutation.UpdatedAt(); !ok {
if davaccount.UpdateDefaultUpdatedAt == nil {
return fmt.Errorf("ent: uninitialized davaccount.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)")
}
v := davaccount.UpdateDefaultUpdatedAt()
dau.mutation.SetUpdatedAt(v)
}
return nil
}
// check runs all checks and user-defined validators on the builder.
func (dau *DavAccountUpdate) check() error {
if _, ok := dau.mutation.OwnerID(); dau.mutation.OwnerCleared() && !ok {
return errors.New(`ent: clearing a required unique edge "DavAccount.owner"`)
}
return nil
}
func (dau *DavAccountUpdate) sqlSave(ctx context.Context) (n int, err error) {
if err := dau.check(); err != nil {
return n, err
}
_spec := sqlgraph.NewUpdateSpec(davaccount.Table, davaccount.Columns, sqlgraph.NewFieldSpec(davaccount.FieldID, field.TypeInt))
if ps := dau.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
if value, ok := dau.mutation.UpdatedAt(); ok {
_spec.SetField(davaccount.FieldUpdatedAt, field.TypeTime, value)
}
if value, ok := dau.mutation.DeletedAt(); ok {
_spec.SetField(davaccount.FieldDeletedAt, field.TypeTime, value)
}
if dau.mutation.DeletedAtCleared() {
_spec.ClearField(davaccount.FieldDeletedAt, field.TypeTime)
}
if value, ok := dau.mutation.Name(); ok {
_spec.SetField(davaccount.FieldName, field.TypeString, value)
}
if value, ok := dau.mutation.URI(); ok {
_spec.SetField(davaccount.FieldURI, field.TypeString, value)
}
if value, ok := dau.mutation.Password(); ok {
_spec.SetField(davaccount.FieldPassword, field.TypeString, value)
}
if value, ok := dau.mutation.Options(); ok {
_spec.SetField(davaccount.FieldOptions, field.TypeBytes, value)
}
if value, ok := dau.mutation.Props(); ok {
_spec.SetField(davaccount.FieldProps, field.TypeJSON, value)
}
if dau.mutation.PropsCleared() {
_spec.ClearField(davaccount.FieldProps, field.TypeJSON)
}
if dau.mutation.OwnerCleared() {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
Inverse: true,
Table: davaccount.OwnerTable,
Columns: []string{davaccount.OwnerColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
}
if nodes := dau.mutation.OwnerIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
Inverse: true,
Table: davaccount.OwnerTable,
Columns: []string{davaccount.OwnerColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges.Add = append(_spec.Edges.Add, edge)
}
if n, err = sqlgraph.UpdateNodes(ctx, dau.driver, _spec); err != nil {
if _, ok := err.(*sqlgraph.NotFoundError); ok {
err = &NotFoundError{davaccount.Label}
} else if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
return 0, err
}
dau.mutation.done = true
return n, nil
}
// DavAccountUpdateOne is the builder for updating a single DavAccount entity.
type DavAccountUpdateOne struct {
config
fields []string
hooks []Hook
mutation *DavAccountMutation
}
// SetUpdatedAt sets the "updated_at" field.
func (dauo *DavAccountUpdateOne) SetUpdatedAt(t time.Time) *DavAccountUpdateOne {
dauo.mutation.SetUpdatedAt(t)
return dauo
}
// SetDeletedAt sets the "deleted_at" field.
func (dauo *DavAccountUpdateOne) SetDeletedAt(t time.Time) *DavAccountUpdateOne {
dauo.mutation.SetDeletedAt(t)
return dauo
}
// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil.
func (dauo *DavAccountUpdateOne) SetNillableDeletedAt(t *time.Time) *DavAccountUpdateOne {
if t != nil {
dauo.SetDeletedAt(*t)
}
return dauo
}
// ClearDeletedAt clears the value of the "deleted_at" field.
func (dauo *DavAccountUpdateOne) ClearDeletedAt() *DavAccountUpdateOne {
dauo.mutation.ClearDeletedAt()
return dauo
}
// SetName sets the "name" field.
func (dauo *DavAccountUpdateOne) SetName(s string) *DavAccountUpdateOne {
dauo.mutation.SetName(s)
return dauo
}
// SetNillableName sets the "name" field if the given value is not nil.
func (dauo *DavAccountUpdateOne) SetNillableName(s *string) *DavAccountUpdateOne {
if s != nil {
dauo.SetName(*s)
}
return dauo
}
// SetURI sets the "uri" field.
func (dauo *DavAccountUpdateOne) SetURI(s string) *DavAccountUpdateOne {
dauo.mutation.SetURI(s)
return dauo
}
// SetNillableURI sets the "uri" field if the given value is not nil.
func (dauo *DavAccountUpdateOne) SetNillableURI(s *string) *DavAccountUpdateOne {
if s != nil {
dauo.SetURI(*s)
}
return dauo
}
// SetPassword sets the "password" field.
func (dauo *DavAccountUpdateOne) SetPassword(s string) *DavAccountUpdateOne {
dauo.mutation.SetPassword(s)
return dauo
}
// SetNillablePassword sets the "password" field if the given value is not nil.
func (dauo *DavAccountUpdateOne) SetNillablePassword(s *string) *DavAccountUpdateOne {
if s != nil {
dauo.SetPassword(*s)
}
return dauo
}
// SetOptions sets the "options" field.
func (dauo *DavAccountUpdateOne) SetOptions(bs *boolset.BooleanSet) *DavAccountUpdateOne {
dauo.mutation.SetOptions(bs)
return dauo
}
// SetProps sets the "props" field.
func (dauo *DavAccountUpdateOne) SetProps(tap *types.DavAccountProps) *DavAccountUpdateOne {
dauo.mutation.SetProps(tap)
return dauo
}
// ClearProps clears the value of the "props" field.
func (dauo *DavAccountUpdateOne) ClearProps() *DavAccountUpdateOne {
dauo.mutation.ClearProps()
return dauo
}
// SetOwnerID sets the "owner_id" field.
func (dauo *DavAccountUpdateOne) SetOwnerID(i int) *DavAccountUpdateOne {
dauo.mutation.SetOwnerID(i)
return dauo
}
// SetNillableOwnerID sets the "owner_id" field if the given value is not nil.
func (dauo *DavAccountUpdateOne) SetNillableOwnerID(i *int) *DavAccountUpdateOne {
if i != nil {
dauo.SetOwnerID(*i)
}
return dauo
}
// SetOwner sets the "owner" edge to the User entity.
func (dauo *DavAccountUpdateOne) SetOwner(u *User) *DavAccountUpdateOne {
return dauo.SetOwnerID(u.ID)
}
// Mutation returns the DavAccountMutation object of the builder.
func (dauo *DavAccountUpdateOne) Mutation() *DavAccountMutation {
return dauo.mutation
}
// ClearOwner clears the "owner" edge to the User entity.
func (dauo *DavAccountUpdateOne) ClearOwner() *DavAccountUpdateOne {
dauo.mutation.ClearOwner()
return dauo
}
// Where appends a list predicates to the DavAccountUpdate builder.
func (dauo *DavAccountUpdateOne) Where(ps ...predicate.DavAccount) *DavAccountUpdateOne {
dauo.mutation.Where(ps...)
return dauo
}
// Select allows selecting one or more fields (columns) of the returned entity.
// The default is selecting all fields defined in the entity schema.
func (dauo *DavAccountUpdateOne) Select(field string, fields ...string) *DavAccountUpdateOne {
dauo.fields = append([]string{field}, fields...)
return dauo
}
// Save executes the query and returns the updated DavAccount entity.
func (dauo *DavAccountUpdateOne) Save(ctx context.Context) (*DavAccount, error) {
if err := dauo.defaults(); err != nil {
return nil, err
}
return withHooks(ctx, dauo.sqlSave, dauo.mutation, dauo.hooks)
}
// SaveX is like Save, but panics if an error occurs.
func (dauo *DavAccountUpdateOne) SaveX(ctx context.Context) *DavAccount {
node, err := dauo.Save(ctx)
if err != nil {
panic(err)
}
return node
}
// Exec executes the query on the entity.
func (dauo *DavAccountUpdateOne) Exec(ctx context.Context) error {
_, err := dauo.Save(ctx)
return err
}
// ExecX is like Exec, but panics if an error occurs.
func (dauo *DavAccountUpdateOne) ExecX(ctx context.Context) {
if err := dauo.Exec(ctx); err != nil {
panic(err)
}
}
// defaults sets the default values of the builder before save.
func (dauo *DavAccountUpdateOne) defaults() error {
if _, ok := dauo.mutation.UpdatedAt(); !ok {
if davaccount.UpdateDefaultUpdatedAt == nil {
return fmt.Errorf("ent: uninitialized davaccount.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)")
}
v := davaccount.UpdateDefaultUpdatedAt()
dauo.mutation.SetUpdatedAt(v)
}
return nil
}
// check runs all checks and user-defined validators on the builder.
func (dauo *DavAccountUpdateOne) check() error {
if _, ok := dauo.mutation.OwnerID(); dauo.mutation.OwnerCleared() && !ok {
return errors.New(`ent: clearing a required unique edge "DavAccount.owner"`)
}
return nil
}
func (dauo *DavAccountUpdateOne) sqlSave(ctx context.Context) (_node *DavAccount, err error) {
if err := dauo.check(); err != nil {
return _node, err
}
_spec := sqlgraph.NewUpdateSpec(davaccount.Table, davaccount.Columns, sqlgraph.NewFieldSpec(davaccount.FieldID, field.TypeInt))
id, ok := dauo.mutation.ID()
if !ok {
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "DavAccount.id" for update`)}
}
_spec.Node.ID.Value = id
if fields := dauo.fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, davaccount.FieldID)
for _, f := range fields {
if !davaccount.ValidColumn(f) {
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
}
if f != davaccount.FieldID {
_spec.Node.Columns = append(_spec.Node.Columns, f)
}
}
}
if ps := dauo.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
if value, ok := dauo.mutation.UpdatedAt(); ok {
_spec.SetField(davaccount.FieldUpdatedAt, field.TypeTime, value)
}
if value, ok := dauo.mutation.DeletedAt(); ok {
_spec.SetField(davaccount.FieldDeletedAt, field.TypeTime, value)
}
if dauo.mutation.DeletedAtCleared() {
_spec.ClearField(davaccount.FieldDeletedAt, field.TypeTime)
}
if value, ok := dauo.mutation.Name(); ok {
_spec.SetField(davaccount.FieldName, field.TypeString, value)
}
if value, ok := dauo.mutation.URI(); ok {
_spec.SetField(davaccount.FieldURI, field.TypeString, value)
}
if value, ok := dauo.mutation.Password(); ok {
_spec.SetField(davaccount.FieldPassword, field.TypeString, value)
}
if value, ok := dauo.mutation.Options(); ok {
_spec.SetField(davaccount.FieldOptions, field.TypeBytes, value)
}
if value, ok := dauo.mutation.Props(); ok {
_spec.SetField(davaccount.FieldProps, field.TypeJSON, value)
}
if dauo.mutation.PropsCleared() {
_spec.ClearField(davaccount.FieldProps, field.TypeJSON)
}
if dauo.mutation.OwnerCleared() {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
Inverse: true,
Table: davaccount.OwnerTable,
Columns: []string{davaccount.OwnerColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
}
if nodes := dauo.mutation.OwnerIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
Inverse: true,
Table: davaccount.OwnerTable,
Columns: []string{davaccount.OwnerColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges.Add = append(_spec.Edges.Add, edge)
}
_node = &DavAccount{config: dauo.config}
_spec.Assign = _node.assignValues
_spec.ScanValues = _node.scanValues
if err = sqlgraph.UpdateNode(ctx, dauo.driver, _spec); err != nil {
if _, ok := err.(*sqlgraph.NotFoundError); ok {
err = &NotFoundError{davaccount.Label}
} else if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
return nil, err
}
dauo.mutation.done = true
return _node, nil
}

@ -0,0 +1,212 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"fmt"
"strings"
"time"
"entgo.io/ent"
"entgo.io/ent/dialect/sql"
"github.com/cloudreve/Cloudreve/v4/ent/directlink"
"github.com/cloudreve/Cloudreve/v4/ent/file"
)
// DirectLink is the model entity for the DirectLink schema.
type DirectLink struct {
config `json:"-"`
// ID of the ent.
ID int `json:"id,omitempty"`
// CreatedAt holds the value of the "created_at" field.
CreatedAt time.Time `json:"created_at,omitempty"`
// UpdatedAt holds the value of the "updated_at" field.
UpdatedAt time.Time `json:"updated_at,omitempty"`
// DeletedAt holds the value of the "deleted_at" field.
DeletedAt *time.Time `json:"deleted_at,omitempty"`
// Name holds the value of the "name" field.
Name string `json:"name,omitempty"`
// Downloads holds the value of the "downloads" field.
Downloads int `json:"downloads,omitempty"`
// FileID holds the value of the "file_id" field.
FileID int `json:"file_id,omitempty"`
// Speed holds the value of the "speed" field.
Speed int `json:"speed,omitempty"`
// Edges holds the relations/edges for other nodes in the graph.
// The values are being populated by the DirectLinkQuery when eager-loading is set.
Edges DirectLinkEdges `json:"edges"`
selectValues sql.SelectValues
}
// DirectLinkEdges holds the relations/edges for other nodes in the graph.
type DirectLinkEdges struct {
// File holds the value of the file edge.
File *File `json:"file,omitempty"`
// loadedTypes holds the information for reporting if a
// type was loaded (or requested) in eager-loading or not.
loadedTypes [1]bool
}
// FileOrErr returns the File value or an error if the edge
// was not loaded in eager-loading, or loaded but was not found.
func (e DirectLinkEdges) FileOrErr() (*File, error) {
if e.loadedTypes[0] {
if e.File == nil {
// Edge was loaded but was not found.
return nil, &NotFoundError{label: file.Label}
}
return e.File, nil
}
return nil, &NotLoadedError{edge: "file"}
}
// scanValues returns the types for scanning values from sql.Rows.
func (*DirectLink) scanValues(columns []string) ([]any, error) {
values := make([]any, len(columns))
for i := range columns {
switch columns[i] {
case directlink.FieldID, directlink.FieldDownloads, directlink.FieldFileID, directlink.FieldSpeed:
values[i] = new(sql.NullInt64)
case directlink.FieldName:
values[i] = new(sql.NullString)
case directlink.FieldCreatedAt, directlink.FieldUpdatedAt, directlink.FieldDeletedAt:
values[i] = new(sql.NullTime)
default:
values[i] = new(sql.UnknownType)
}
}
return values, nil
}
// assignValues assigns the values that were returned from sql.Rows (after scanning)
// to the DirectLink fields.
func (dl *DirectLink) assignValues(columns []string, values []any) error {
if m, n := len(values), len(columns); m < n {
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
}
for i := range columns {
switch columns[i] {
case directlink.FieldID:
value, ok := values[i].(*sql.NullInt64)
if !ok {
return fmt.Errorf("unexpected type %T for field id", value)
}
dl.ID = int(value.Int64)
case directlink.FieldCreatedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field created_at", values[i])
} else if value.Valid {
dl.CreatedAt = value.Time
}
case directlink.FieldUpdatedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field updated_at", values[i])
} else if value.Valid {
dl.UpdatedAt = value.Time
}
case directlink.FieldDeletedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field deleted_at", values[i])
} else if value.Valid {
dl.DeletedAt = new(time.Time)
*dl.DeletedAt = value.Time
}
case directlink.FieldName:
if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field name", values[i])
} else if value.Valid {
dl.Name = value.String
}
case directlink.FieldDownloads:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field downloads", values[i])
} else if value.Valid {
dl.Downloads = int(value.Int64)
}
case directlink.FieldFileID:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field file_id", values[i])
} else if value.Valid {
dl.FileID = int(value.Int64)
}
case directlink.FieldSpeed:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field speed", values[i])
} else if value.Valid {
dl.Speed = int(value.Int64)
}
default:
dl.selectValues.Set(columns[i], values[i])
}
}
return nil
}
// Value returns the ent.Value that was dynamically selected and assigned to the DirectLink.
// This includes values selected through modifiers, order, etc.
func (dl *DirectLink) Value(name string) (ent.Value, error) {
return dl.selectValues.Get(name)
}
// QueryFile queries the "file" edge of the DirectLink entity.
func (dl *DirectLink) QueryFile() *FileQuery {
return NewDirectLinkClient(dl.config).QueryFile(dl)
}
// Update returns a builder for updating this DirectLink.
// Note that you need to call DirectLink.Unwrap() before calling this method if this DirectLink
// was returned from a transaction, and the transaction was committed or rolled back.
func (dl *DirectLink) Update() *DirectLinkUpdateOne {
return NewDirectLinkClient(dl.config).UpdateOne(dl)
}
// Unwrap unwraps the DirectLink entity that was returned from a transaction after it was closed,
// so that all future queries will be executed through the driver which created the transaction.
func (dl *DirectLink) Unwrap() *DirectLink {
_tx, ok := dl.config.driver.(*txDriver)
if !ok {
panic("ent: DirectLink is not a transactional entity")
}
dl.config.driver = _tx.drv
return dl
}
// String implements the fmt.Stringer.
func (dl *DirectLink) String() string {
var builder strings.Builder
builder.WriteString("DirectLink(")
builder.WriteString(fmt.Sprintf("id=%v, ", dl.ID))
builder.WriteString("created_at=")
builder.WriteString(dl.CreatedAt.Format(time.ANSIC))
builder.WriteString(", ")
builder.WriteString("updated_at=")
builder.WriteString(dl.UpdatedAt.Format(time.ANSIC))
builder.WriteString(", ")
if v := dl.DeletedAt; v != nil {
builder.WriteString("deleted_at=")
builder.WriteString(v.Format(time.ANSIC))
}
builder.WriteString(", ")
builder.WriteString("name=")
builder.WriteString(dl.Name)
builder.WriteString(", ")
builder.WriteString("downloads=")
builder.WriteString(fmt.Sprintf("%v", dl.Downloads))
builder.WriteString(", ")
builder.WriteString("file_id=")
builder.WriteString(fmt.Sprintf("%v", dl.FileID))
builder.WriteString(", ")
builder.WriteString("speed=")
builder.WriteString(fmt.Sprintf("%v", dl.Speed))
builder.WriteByte(')')
return builder.String()
}
// SetFile manually set the edge as loaded state.
func (e *DirectLink) SetFile(v *File) {
e.Edges.File = v
e.Edges.loadedTypes[0] = true
}
// DirectLinks is a parsable slice of DirectLink.
type DirectLinks []*DirectLink

@ -0,0 +1,138 @@
// Code generated by ent, DO NOT EDIT.
package directlink
import (
"time"
"entgo.io/ent"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
)
const (
// Label holds the string label denoting the directlink type in the database.
Label = "direct_link"
// FieldID holds the string denoting the id field in the database.
FieldID = "id"
// FieldCreatedAt holds the string denoting the created_at field in the database.
FieldCreatedAt = "created_at"
// FieldUpdatedAt holds the string denoting the updated_at field in the database.
FieldUpdatedAt = "updated_at"
// FieldDeletedAt holds the string denoting the deleted_at field in the database.
FieldDeletedAt = "deleted_at"
// FieldName holds the string denoting the name field in the database.
FieldName = "name"
// FieldDownloads holds the string denoting the downloads field in the database.
FieldDownloads = "downloads"
// FieldFileID holds the string denoting the file_id field in the database.
FieldFileID = "file_id"
// FieldSpeed holds the string denoting the speed field in the database.
FieldSpeed = "speed"
// EdgeFile holds the string denoting the file edge name in mutations.
EdgeFile = "file"
// Table holds the table name of the directlink in the database.
Table = "direct_links"
// FileTable is the table that holds the file relation/edge.
FileTable = "direct_links"
// FileInverseTable is the table name for the File entity.
// It exists in this package in order to avoid circular dependency with the "file" package.
FileInverseTable = "files"
// FileColumn is the table column denoting the file relation/edge.
FileColumn = "file_id"
)
// Columns holds all SQL columns for directlink fields.
var Columns = []string{
FieldID,
FieldCreatedAt,
FieldUpdatedAt,
FieldDeletedAt,
FieldName,
FieldDownloads,
FieldFileID,
FieldSpeed,
}
// ValidColumn reports if the column name is valid (part of the table columns).
func ValidColumn(column string) bool {
for i := range Columns {
if column == Columns[i] {
return true
}
}
return false
}
// Note that the variables below are initialized by the runtime
// package on the initialization of the application. Therefore,
// it should be imported in the main as follows:
//
// import _ "github.com/cloudreve/Cloudreve/v4/ent/runtime"
var (
Hooks [1]ent.Hook
Interceptors [1]ent.Interceptor
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
DefaultCreatedAt func() time.Time
// DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
DefaultUpdatedAt func() time.Time
// UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
UpdateDefaultUpdatedAt func() time.Time
)
// OrderOption defines the ordering options for the DirectLink queries.
type OrderOption func(*sql.Selector)
// ByID orders the results by the id field.
func ByID(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldID, opts...).ToFunc()
}
// ByCreatedAt orders the results by the created_at field.
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
}
// ByUpdatedAt orders the results by the updated_at field.
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
}
// ByDeletedAt orders the results by the deleted_at field.
func ByDeletedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldDeletedAt, opts...).ToFunc()
}
// ByName orders the results by the name field.
func ByName(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldName, opts...).ToFunc()
}
// ByDownloads orders the results by the downloads field.
func ByDownloads(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldDownloads, opts...).ToFunc()
}
// ByFileID orders the results by the file_id field.
func ByFileID(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldFileID, opts...).ToFunc()
}
// BySpeed orders the results by the speed field.
func BySpeed(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldSpeed, opts...).ToFunc()
}
// ByFileField orders the results by file field.
func ByFileField(field string, opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newFileStep(), sql.OrderByField(field, opts...))
}
}
func newFileStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(FileInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, FileTable, FileColumn),
)
}

@ -0,0 +1,424 @@
// Code generated by ent, DO NOT EDIT.
package directlink
import (
"time"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
)
// ID filters vertices based on their ID field.
func ID(id int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldEQ(FieldID, id))
}
// IDEQ applies the EQ predicate on the ID field.
func IDEQ(id int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldEQ(FieldID, id))
}
// IDNEQ applies the NEQ predicate on the ID field.
func IDNEQ(id int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldNEQ(FieldID, id))
}
// IDIn applies the In predicate on the ID field.
func IDIn(ids ...int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldIn(FieldID, ids...))
}
// IDNotIn applies the NotIn predicate on the ID field.
func IDNotIn(ids ...int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldNotIn(FieldID, ids...))
}
// IDGT applies the GT predicate on the ID field.
func IDGT(id int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldGT(FieldID, id))
}
// IDGTE applies the GTE predicate on the ID field.
func IDGTE(id int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldGTE(FieldID, id))
}
// IDLT applies the LT predicate on the ID field.
func IDLT(id int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldLT(FieldID, id))
}
// IDLTE applies the LTE predicate on the ID field.
func IDLTE(id int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldLTE(FieldID, id))
}
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
func CreatedAt(v time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldEQ(FieldCreatedAt, v))
}
// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
func UpdatedAt(v time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldEQ(FieldUpdatedAt, v))
}
// DeletedAt applies equality check predicate on the "deleted_at" field. It's identical to DeletedAtEQ.
func DeletedAt(v time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldEQ(FieldDeletedAt, v))
}
// Name applies equality check predicate on the "name" field. It's identical to NameEQ.
func Name(v string) predicate.DirectLink {
return predicate.DirectLink(sql.FieldEQ(FieldName, v))
}
// Downloads applies equality check predicate on the "downloads" field. It's identical to DownloadsEQ.
func Downloads(v int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldEQ(FieldDownloads, v))
}
// FileID applies equality check predicate on the "file_id" field. It's identical to FileIDEQ.
func FileID(v int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldEQ(FieldFileID, v))
}
// Speed applies equality check predicate on the "speed" field. It's identical to SpeedEQ.
func Speed(v int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldEQ(FieldSpeed, v))
}
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
func CreatedAtEQ(v time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldEQ(FieldCreatedAt, v))
}
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
func CreatedAtNEQ(v time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldNEQ(FieldCreatedAt, v))
}
// CreatedAtIn applies the In predicate on the "created_at" field.
func CreatedAtIn(vs ...time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldIn(FieldCreatedAt, vs...))
}
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
func CreatedAtNotIn(vs ...time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldNotIn(FieldCreatedAt, vs...))
}
// CreatedAtGT applies the GT predicate on the "created_at" field.
func CreatedAtGT(v time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldGT(FieldCreatedAt, v))
}
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
func CreatedAtGTE(v time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldGTE(FieldCreatedAt, v))
}
// CreatedAtLT applies the LT predicate on the "created_at" field.
func CreatedAtLT(v time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldLT(FieldCreatedAt, v))
}
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
func CreatedAtLTE(v time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldLTE(FieldCreatedAt, v))
}
// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
func UpdatedAtEQ(v time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldEQ(FieldUpdatedAt, v))
}
// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
func UpdatedAtNEQ(v time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldNEQ(FieldUpdatedAt, v))
}
// UpdatedAtIn applies the In predicate on the "updated_at" field.
func UpdatedAtIn(vs ...time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldIn(FieldUpdatedAt, vs...))
}
// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
func UpdatedAtNotIn(vs ...time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldNotIn(FieldUpdatedAt, vs...))
}
// UpdatedAtGT applies the GT predicate on the "updated_at" field.
func UpdatedAtGT(v time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldGT(FieldUpdatedAt, v))
}
// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
func UpdatedAtGTE(v time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldGTE(FieldUpdatedAt, v))
}
// UpdatedAtLT applies the LT predicate on the "updated_at" field.
func UpdatedAtLT(v time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldLT(FieldUpdatedAt, v))
}
// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
func UpdatedAtLTE(v time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldLTE(FieldUpdatedAt, v))
}
// DeletedAtEQ applies the EQ predicate on the "deleted_at" field.
func DeletedAtEQ(v time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldEQ(FieldDeletedAt, v))
}
// DeletedAtNEQ applies the NEQ predicate on the "deleted_at" field.
func DeletedAtNEQ(v time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldNEQ(FieldDeletedAt, v))
}
// DeletedAtIn applies the In predicate on the "deleted_at" field.
func DeletedAtIn(vs ...time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldIn(FieldDeletedAt, vs...))
}
// DeletedAtNotIn applies the NotIn predicate on the "deleted_at" field.
func DeletedAtNotIn(vs ...time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldNotIn(FieldDeletedAt, vs...))
}
// DeletedAtGT applies the GT predicate on the "deleted_at" field.
func DeletedAtGT(v time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldGT(FieldDeletedAt, v))
}
// DeletedAtGTE applies the GTE predicate on the "deleted_at" field.
func DeletedAtGTE(v time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldGTE(FieldDeletedAt, v))
}
// DeletedAtLT applies the LT predicate on the "deleted_at" field.
func DeletedAtLT(v time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldLT(FieldDeletedAt, v))
}
// DeletedAtLTE applies the LTE predicate on the "deleted_at" field.
func DeletedAtLTE(v time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldLTE(FieldDeletedAt, v))
}
// DeletedAtIsNil applies the IsNil predicate on the "deleted_at" field.
func DeletedAtIsNil() predicate.DirectLink {
return predicate.DirectLink(sql.FieldIsNull(FieldDeletedAt))
}
// DeletedAtNotNil applies the NotNil predicate on the "deleted_at" field.
func DeletedAtNotNil() predicate.DirectLink {
return predicate.DirectLink(sql.FieldNotNull(FieldDeletedAt))
}
// NameEQ applies the EQ predicate on the "name" field.
func NameEQ(v string) predicate.DirectLink {
return predicate.DirectLink(sql.FieldEQ(FieldName, v))
}
// NameNEQ applies the NEQ predicate on the "name" field.
func NameNEQ(v string) predicate.DirectLink {
return predicate.DirectLink(sql.FieldNEQ(FieldName, v))
}
// NameIn applies the In predicate on the "name" field.
func NameIn(vs ...string) predicate.DirectLink {
return predicate.DirectLink(sql.FieldIn(FieldName, vs...))
}
// NameNotIn applies the NotIn predicate on the "name" field.
func NameNotIn(vs ...string) predicate.DirectLink {
return predicate.DirectLink(sql.FieldNotIn(FieldName, vs...))
}
// NameGT applies the GT predicate on the "name" field.
func NameGT(v string) predicate.DirectLink {
return predicate.DirectLink(sql.FieldGT(FieldName, v))
}
// NameGTE applies the GTE predicate on the "name" field.
func NameGTE(v string) predicate.DirectLink {
return predicate.DirectLink(sql.FieldGTE(FieldName, v))
}
// NameLT applies the LT predicate on the "name" field.
func NameLT(v string) predicate.DirectLink {
return predicate.DirectLink(sql.FieldLT(FieldName, v))
}
// NameLTE applies the LTE predicate on the "name" field.
func NameLTE(v string) predicate.DirectLink {
return predicate.DirectLink(sql.FieldLTE(FieldName, v))
}
// NameContains applies the Contains predicate on the "name" field.
func NameContains(v string) predicate.DirectLink {
return predicate.DirectLink(sql.FieldContains(FieldName, v))
}
// NameHasPrefix applies the HasPrefix predicate on the "name" field.
func NameHasPrefix(v string) predicate.DirectLink {
return predicate.DirectLink(sql.FieldHasPrefix(FieldName, v))
}
// NameHasSuffix applies the HasSuffix predicate on the "name" field.
func NameHasSuffix(v string) predicate.DirectLink {
return predicate.DirectLink(sql.FieldHasSuffix(FieldName, v))
}
// NameEqualFold applies the EqualFold predicate on the "name" field.
func NameEqualFold(v string) predicate.DirectLink {
return predicate.DirectLink(sql.FieldEqualFold(FieldName, v))
}
// NameContainsFold applies the ContainsFold predicate on the "name" field.
func NameContainsFold(v string) predicate.DirectLink {
return predicate.DirectLink(sql.FieldContainsFold(FieldName, v))
}
// DownloadsEQ applies the EQ predicate on the "downloads" field.
func DownloadsEQ(v int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldEQ(FieldDownloads, v))
}
// DownloadsNEQ applies the NEQ predicate on the "downloads" field.
func DownloadsNEQ(v int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldNEQ(FieldDownloads, v))
}
// DownloadsIn applies the In predicate on the "downloads" field.
func DownloadsIn(vs ...int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldIn(FieldDownloads, vs...))
}
// DownloadsNotIn applies the NotIn predicate on the "downloads" field.
func DownloadsNotIn(vs ...int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldNotIn(FieldDownloads, vs...))
}
// DownloadsGT applies the GT predicate on the "downloads" field.
func DownloadsGT(v int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldGT(FieldDownloads, v))
}
// DownloadsGTE applies the GTE predicate on the "downloads" field.
func DownloadsGTE(v int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldGTE(FieldDownloads, v))
}
// DownloadsLT applies the LT predicate on the "downloads" field.
func DownloadsLT(v int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldLT(FieldDownloads, v))
}
// DownloadsLTE applies the LTE predicate on the "downloads" field.
func DownloadsLTE(v int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldLTE(FieldDownloads, v))
}
// FileIDEQ applies the EQ predicate on the "file_id" field.
func FileIDEQ(v int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldEQ(FieldFileID, v))
}
// FileIDNEQ applies the NEQ predicate on the "file_id" field.
func FileIDNEQ(v int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldNEQ(FieldFileID, v))
}
// FileIDIn applies the In predicate on the "file_id" field.
func FileIDIn(vs ...int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldIn(FieldFileID, vs...))
}
// FileIDNotIn applies the NotIn predicate on the "file_id" field.
func FileIDNotIn(vs ...int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldNotIn(FieldFileID, vs...))
}
// SpeedEQ applies the EQ predicate on the "speed" field.
func SpeedEQ(v int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldEQ(FieldSpeed, v))
}
// SpeedNEQ applies the NEQ predicate on the "speed" field.
func SpeedNEQ(v int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldNEQ(FieldSpeed, v))
}
// SpeedIn applies the In predicate on the "speed" field.
func SpeedIn(vs ...int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldIn(FieldSpeed, vs...))
}
// SpeedNotIn applies the NotIn predicate on the "speed" field.
func SpeedNotIn(vs ...int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldNotIn(FieldSpeed, vs...))
}
// SpeedGT applies the GT predicate on the "speed" field.
func SpeedGT(v int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldGT(FieldSpeed, v))
}
// SpeedGTE applies the GTE predicate on the "speed" field.
func SpeedGTE(v int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldGTE(FieldSpeed, v))
}
// SpeedLT applies the LT predicate on the "speed" field.
func SpeedLT(v int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldLT(FieldSpeed, v))
}
// SpeedLTE applies the LTE predicate on the "speed" field.
func SpeedLTE(v int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldLTE(FieldSpeed, v))
}
// HasFile applies the HasEdge predicate on the "file" edge.
func HasFile() predicate.DirectLink {
return predicate.DirectLink(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, FileTable, FileColumn),
)
sqlgraph.HasNeighbors(s, step)
})
}
// HasFileWith applies the HasEdge predicate on the "file" edge with a given conditions (other predicates).
func HasFileWith(preds ...predicate.File) predicate.DirectLink {
return predicate.DirectLink(func(s *sql.Selector) {
step := newFileStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
}
})
})
}
// And groups predicates with the AND operator between them.
func And(predicates ...predicate.DirectLink) predicate.DirectLink {
return predicate.DirectLink(sql.AndPredicates(predicates...))
}
// Or groups predicates with the OR operator between them.
func Or(predicates ...predicate.DirectLink) predicate.DirectLink {
return predicate.DirectLink(sql.OrPredicates(predicates...))
}
// Not applies the not operator on the given predicate.
func Not(p predicate.DirectLink) predicate.DirectLink {
return predicate.DirectLink(sql.NotPredicates(p))
}

@ -0,0 +1,883 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"context"
"errors"
"fmt"
"time"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/cloudreve/Cloudreve/v4/ent/directlink"
"github.com/cloudreve/Cloudreve/v4/ent/file"
)
// DirectLinkCreate is the builder for creating a DirectLink entity.
type DirectLinkCreate struct {
config
mutation *DirectLinkMutation
hooks []Hook
conflict []sql.ConflictOption
}
// SetCreatedAt sets the "created_at" field.
func (dlc *DirectLinkCreate) SetCreatedAt(t time.Time) *DirectLinkCreate {
dlc.mutation.SetCreatedAt(t)
return dlc
}
// SetNillableCreatedAt sets the "created_at" field if the given value is not nil.
func (dlc *DirectLinkCreate) SetNillableCreatedAt(t *time.Time) *DirectLinkCreate {
if t != nil {
dlc.SetCreatedAt(*t)
}
return dlc
}
// SetUpdatedAt sets the "updated_at" field.
func (dlc *DirectLinkCreate) SetUpdatedAt(t time.Time) *DirectLinkCreate {
dlc.mutation.SetUpdatedAt(t)
return dlc
}
// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil.
func (dlc *DirectLinkCreate) SetNillableUpdatedAt(t *time.Time) *DirectLinkCreate {
if t != nil {
dlc.SetUpdatedAt(*t)
}
return dlc
}
// SetDeletedAt sets the "deleted_at" field.
func (dlc *DirectLinkCreate) SetDeletedAt(t time.Time) *DirectLinkCreate {
dlc.mutation.SetDeletedAt(t)
return dlc
}
// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil.
func (dlc *DirectLinkCreate) SetNillableDeletedAt(t *time.Time) *DirectLinkCreate {
if t != nil {
dlc.SetDeletedAt(*t)
}
return dlc
}
// SetName sets the "name" field.
func (dlc *DirectLinkCreate) SetName(s string) *DirectLinkCreate {
dlc.mutation.SetName(s)
return dlc
}
// SetDownloads sets the "downloads" field.
func (dlc *DirectLinkCreate) SetDownloads(i int) *DirectLinkCreate {
dlc.mutation.SetDownloads(i)
return dlc
}
// SetFileID sets the "file_id" field.
func (dlc *DirectLinkCreate) SetFileID(i int) *DirectLinkCreate {
dlc.mutation.SetFileID(i)
return dlc
}
// SetSpeed sets the "speed" field.
func (dlc *DirectLinkCreate) SetSpeed(i int) *DirectLinkCreate {
dlc.mutation.SetSpeed(i)
return dlc
}
// SetFile sets the "file" edge to the File entity.
func (dlc *DirectLinkCreate) SetFile(f *File) *DirectLinkCreate {
return dlc.SetFileID(f.ID)
}
// Mutation returns the DirectLinkMutation object of the builder.
func (dlc *DirectLinkCreate) Mutation() *DirectLinkMutation {
return dlc.mutation
}
// Save creates the DirectLink in the database.
func (dlc *DirectLinkCreate) Save(ctx context.Context) (*DirectLink, error) {
if err := dlc.defaults(); err != nil {
return nil, err
}
return withHooks(ctx, dlc.sqlSave, dlc.mutation, dlc.hooks)
}
// SaveX calls Save and panics if Save returns an error.
func (dlc *DirectLinkCreate) SaveX(ctx context.Context) *DirectLink {
v, err := dlc.Save(ctx)
if err != nil {
panic(err)
}
return v
}
// Exec executes the query.
func (dlc *DirectLinkCreate) Exec(ctx context.Context) error {
_, err := dlc.Save(ctx)
return err
}
// ExecX is like Exec, but panics if an error occurs.
func (dlc *DirectLinkCreate) ExecX(ctx context.Context) {
if err := dlc.Exec(ctx); err != nil {
panic(err)
}
}
// defaults sets the default values of the builder before save.
func (dlc *DirectLinkCreate) defaults() error {
if _, ok := dlc.mutation.CreatedAt(); !ok {
if directlink.DefaultCreatedAt == nil {
return fmt.Errorf("ent: uninitialized directlink.DefaultCreatedAt (forgotten import ent/runtime?)")
}
v := directlink.DefaultCreatedAt()
dlc.mutation.SetCreatedAt(v)
}
if _, ok := dlc.mutation.UpdatedAt(); !ok {
if directlink.DefaultUpdatedAt == nil {
return fmt.Errorf("ent: uninitialized directlink.DefaultUpdatedAt (forgotten import ent/runtime?)")
}
v := directlink.DefaultUpdatedAt()
dlc.mutation.SetUpdatedAt(v)
}
return nil
}
// check runs all checks and user-defined validators on the builder.
func (dlc *DirectLinkCreate) check() error {
if _, ok := dlc.mutation.CreatedAt(); !ok {
return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "DirectLink.created_at"`)}
}
if _, ok := dlc.mutation.UpdatedAt(); !ok {
return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "DirectLink.updated_at"`)}
}
if _, ok := dlc.mutation.Name(); !ok {
return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "DirectLink.name"`)}
}
if _, ok := dlc.mutation.Downloads(); !ok {
return &ValidationError{Name: "downloads", err: errors.New(`ent: missing required field "DirectLink.downloads"`)}
}
if _, ok := dlc.mutation.FileID(); !ok {
return &ValidationError{Name: "file_id", err: errors.New(`ent: missing required field "DirectLink.file_id"`)}
}
if _, ok := dlc.mutation.Speed(); !ok {
return &ValidationError{Name: "speed", err: errors.New(`ent: missing required field "DirectLink.speed"`)}
}
if _, ok := dlc.mutation.FileID(); !ok {
return &ValidationError{Name: "file", err: errors.New(`ent: missing required edge "DirectLink.file"`)}
}
return nil
}
func (dlc *DirectLinkCreate) sqlSave(ctx context.Context) (*DirectLink, error) {
if err := dlc.check(); err != nil {
return nil, err
}
_node, _spec := dlc.createSpec()
if err := sqlgraph.CreateNode(ctx, dlc.driver, _spec); err != nil {
if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
return nil, err
}
id := _spec.ID.Value.(int64)
_node.ID = int(id)
dlc.mutation.id = &_node.ID
dlc.mutation.done = true
return _node, nil
}
func (dlc *DirectLinkCreate) createSpec() (*DirectLink, *sqlgraph.CreateSpec) {
var (
_node = &DirectLink{config: dlc.config}
_spec = sqlgraph.NewCreateSpec(directlink.Table, sqlgraph.NewFieldSpec(directlink.FieldID, field.TypeInt))
)
if id, ok := dlc.mutation.ID(); ok {
_node.ID = id
id64 := int64(id)
_spec.ID.Value = id64
}
_spec.OnConflict = dlc.conflict
if value, ok := dlc.mutation.CreatedAt(); ok {
_spec.SetField(directlink.FieldCreatedAt, field.TypeTime, value)
_node.CreatedAt = value
}
if value, ok := dlc.mutation.UpdatedAt(); ok {
_spec.SetField(directlink.FieldUpdatedAt, field.TypeTime, value)
_node.UpdatedAt = value
}
if value, ok := dlc.mutation.DeletedAt(); ok {
_spec.SetField(directlink.FieldDeletedAt, field.TypeTime, value)
_node.DeletedAt = &value
}
if value, ok := dlc.mutation.Name(); ok {
_spec.SetField(directlink.FieldName, field.TypeString, value)
_node.Name = value
}
if value, ok := dlc.mutation.Downloads(); ok {
_spec.SetField(directlink.FieldDownloads, field.TypeInt, value)
_node.Downloads = value
}
if value, ok := dlc.mutation.Speed(); ok {
_spec.SetField(directlink.FieldSpeed, field.TypeInt, value)
_node.Speed = value
}
if nodes := dlc.mutation.FileIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
Inverse: true,
Table: directlink.FileTable,
Columns: []string{directlink.FileColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(file.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_node.FileID = nodes[0]
_spec.Edges = append(_spec.Edges, edge)
}
return _node, _spec
}
// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause
// of the `INSERT` statement. For example:
//
// client.DirectLink.Create().
// SetCreatedAt(v).
// OnConflict(
// // Update the row with the new values
// // the was proposed for insertion.
// sql.ResolveWithNewValues(),
// ).
// // Override some of the fields with custom
// // update values.
// Update(func(u *ent.DirectLinkUpsert) {
// SetCreatedAt(v+v).
// }).
// Exec(ctx)
func (dlc *DirectLinkCreate) OnConflict(opts ...sql.ConflictOption) *DirectLinkUpsertOne {
dlc.conflict = opts
return &DirectLinkUpsertOne{
create: dlc,
}
}
// OnConflictColumns calls `OnConflict` and configures the columns
// as conflict target. Using this option is equivalent to using:
//
// client.DirectLink.Create().
// OnConflict(sql.ConflictColumns(columns...)).
// Exec(ctx)
func (dlc *DirectLinkCreate) OnConflictColumns(columns ...string) *DirectLinkUpsertOne {
dlc.conflict = append(dlc.conflict, sql.ConflictColumns(columns...))
return &DirectLinkUpsertOne{
create: dlc,
}
}
type (
// DirectLinkUpsertOne is the builder for "upsert"-ing
// one DirectLink node.
DirectLinkUpsertOne struct {
create *DirectLinkCreate
}
// DirectLinkUpsert is the "OnConflict" setter.
DirectLinkUpsert struct {
*sql.UpdateSet
}
)
// SetUpdatedAt sets the "updated_at" field.
func (u *DirectLinkUpsert) SetUpdatedAt(v time.Time) *DirectLinkUpsert {
u.Set(directlink.FieldUpdatedAt, v)
return u
}
// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create.
func (u *DirectLinkUpsert) UpdateUpdatedAt() *DirectLinkUpsert {
u.SetExcluded(directlink.FieldUpdatedAt)
return u
}
// SetDeletedAt sets the "deleted_at" field.
func (u *DirectLinkUpsert) SetDeletedAt(v time.Time) *DirectLinkUpsert {
u.Set(directlink.FieldDeletedAt, v)
return u
}
// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create.
func (u *DirectLinkUpsert) UpdateDeletedAt() *DirectLinkUpsert {
u.SetExcluded(directlink.FieldDeletedAt)
return u
}
// ClearDeletedAt clears the value of the "deleted_at" field.
func (u *DirectLinkUpsert) ClearDeletedAt() *DirectLinkUpsert {
u.SetNull(directlink.FieldDeletedAt)
return u
}
// SetName sets the "name" field.
func (u *DirectLinkUpsert) SetName(v string) *DirectLinkUpsert {
u.Set(directlink.FieldName, v)
return u
}
// UpdateName sets the "name" field to the value that was provided on create.
func (u *DirectLinkUpsert) UpdateName() *DirectLinkUpsert {
u.SetExcluded(directlink.FieldName)
return u
}
// SetDownloads sets the "downloads" field.
func (u *DirectLinkUpsert) SetDownloads(v int) *DirectLinkUpsert {
u.Set(directlink.FieldDownloads, v)
return u
}
// UpdateDownloads sets the "downloads" field to the value that was provided on create.
func (u *DirectLinkUpsert) UpdateDownloads() *DirectLinkUpsert {
u.SetExcluded(directlink.FieldDownloads)
return u
}
// AddDownloads adds v to the "downloads" field.
func (u *DirectLinkUpsert) AddDownloads(v int) *DirectLinkUpsert {
u.Add(directlink.FieldDownloads, v)
return u
}
// SetFileID sets the "file_id" field.
func (u *DirectLinkUpsert) SetFileID(v int) *DirectLinkUpsert {
u.Set(directlink.FieldFileID, v)
return u
}
// UpdateFileID sets the "file_id" field to the value that was provided on create.
func (u *DirectLinkUpsert) UpdateFileID() *DirectLinkUpsert {
u.SetExcluded(directlink.FieldFileID)
return u
}
// SetSpeed sets the "speed" field.
func (u *DirectLinkUpsert) SetSpeed(v int) *DirectLinkUpsert {
u.Set(directlink.FieldSpeed, v)
return u
}
// UpdateSpeed sets the "speed" field to the value that was provided on create.
func (u *DirectLinkUpsert) UpdateSpeed() *DirectLinkUpsert {
u.SetExcluded(directlink.FieldSpeed)
return u
}
// AddSpeed adds v to the "speed" field.
func (u *DirectLinkUpsert) AddSpeed(v int) *DirectLinkUpsert {
u.Add(directlink.FieldSpeed, v)
return u
}
// UpdateNewValues updates the mutable fields using the new values that were set on create.
// Using this option is equivalent to using:
//
// client.DirectLink.Create().
// OnConflict(
// sql.ResolveWithNewValues(),
// ).
// Exec(ctx)
func (u *DirectLinkUpsertOne) UpdateNewValues() *DirectLinkUpsertOne {
u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues())
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) {
if _, exists := u.create.mutation.CreatedAt(); exists {
s.SetIgnore(directlink.FieldCreatedAt)
}
}))
return u
}
// Ignore sets each column to itself in case of conflict.
// Using this option is equivalent to using:
//
// client.DirectLink.Create().
// OnConflict(sql.ResolveWithIgnore()).
// Exec(ctx)
func (u *DirectLinkUpsertOne) Ignore() *DirectLinkUpsertOne {
u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore())
return u
}
// DoNothing configures the conflict_action to `DO NOTHING`.
// Supported only by SQLite and PostgreSQL.
func (u *DirectLinkUpsertOne) DoNothing() *DirectLinkUpsertOne {
u.create.conflict = append(u.create.conflict, sql.DoNothing())
return u
}
// Update allows overriding fields `UPDATE` values. See the DirectLinkCreate.OnConflict
// documentation for more info.
func (u *DirectLinkUpsertOne) Update(set func(*DirectLinkUpsert)) *DirectLinkUpsertOne {
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) {
set(&DirectLinkUpsert{UpdateSet: update})
}))
return u
}
// SetUpdatedAt sets the "updated_at" field.
func (u *DirectLinkUpsertOne) SetUpdatedAt(v time.Time) *DirectLinkUpsertOne {
return u.Update(func(s *DirectLinkUpsert) {
s.SetUpdatedAt(v)
})
}
// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create.
func (u *DirectLinkUpsertOne) UpdateUpdatedAt() *DirectLinkUpsertOne {
return u.Update(func(s *DirectLinkUpsert) {
s.UpdateUpdatedAt()
})
}
// SetDeletedAt sets the "deleted_at" field.
func (u *DirectLinkUpsertOne) SetDeletedAt(v time.Time) *DirectLinkUpsertOne {
return u.Update(func(s *DirectLinkUpsert) {
s.SetDeletedAt(v)
})
}
// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create.
func (u *DirectLinkUpsertOne) UpdateDeletedAt() *DirectLinkUpsertOne {
return u.Update(func(s *DirectLinkUpsert) {
s.UpdateDeletedAt()
})
}
// ClearDeletedAt clears the value of the "deleted_at" field.
func (u *DirectLinkUpsertOne) ClearDeletedAt() *DirectLinkUpsertOne {
return u.Update(func(s *DirectLinkUpsert) {
s.ClearDeletedAt()
})
}
// SetName sets the "name" field.
func (u *DirectLinkUpsertOne) SetName(v string) *DirectLinkUpsertOne {
return u.Update(func(s *DirectLinkUpsert) {
s.SetName(v)
})
}
// UpdateName sets the "name" field to the value that was provided on create.
func (u *DirectLinkUpsertOne) UpdateName() *DirectLinkUpsertOne {
return u.Update(func(s *DirectLinkUpsert) {
s.UpdateName()
})
}
// SetDownloads sets the "downloads" field.
func (u *DirectLinkUpsertOne) SetDownloads(v int) *DirectLinkUpsertOne {
return u.Update(func(s *DirectLinkUpsert) {
s.SetDownloads(v)
})
}
// AddDownloads adds v to the "downloads" field.
func (u *DirectLinkUpsertOne) AddDownloads(v int) *DirectLinkUpsertOne {
return u.Update(func(s *DirectLinkUpsert) {
s.AddDownloads(v)
})
}
// UpdateDownloads sets the "downloads" field to the value that was provided on create.
func (u *DirectLinkUpsertOne) UpdateDownloads() *DirectLinkUpsertOne {
return u.Update(func(s *DirectLinkUpsert) {
s.UpdateDownloads()
})
}
// SetFileID sets the "file_id" field.
func (u *DirectLinkUpsertOne) SetFileID(v int) *DirectLinkUpsertOne {
return u.Update(func(s *DirectLinkUpsert) {
s.SetFileID(v)
})
}
// UpdateFileID sets the "file_id" field to the value that was provided on create.
func (u *DirectLinkUpsertOne) UpdateFileID() *DirectLinkUpsertOne {
return u.Update(func(s *DirectLinkUpsert) {
s.UpdateFileID()
})
}
// SetSpeed sets the "speed" field.
func (u *DirectLinkUpsertOne) SetSpeed(v int) *DirectLinkUpsertOne {
return u.Update(func(s *DirectLinkUpsert) {
s.SetSpeed(v)
})
}
// AddSpeed adds v to the "speed" field.
func (u *DirectLinkUpsertOne) AddSpeed(v int) *DirectLinkUpsertOne {
return u.Update(func(s *DirectLinkUpsert) {
s.AddSpeed(v)
})
}
// UpdateSpeed sets the "speed" field to the value that was provided on create.
func (u *DirectLinkUpsertOne) UpdateSpeed() *DirectLinkUpsertOne {
return u.Update(func(s *DirectLinkUpsert) {
s.UpdateSpeed()
})
}
// Exec executes the query.
func (u *DirectLinkUpsertOne) Exec(ctx context.Context) error {
if len(u.create.conflict) == 0 {
return errors.New("ent: missing options for DirectLinkCreate.OnConflict")
}
return u.create.Exec(ctx)
}
// ExecX is like Exec, but panics if an error occurs.
func (u *DirectLinkUpsertOne) ExecX(ctx context.Context) {
if err := u.create.Exec(ctx); err != nil {
panic(err)
}
}
// Exec executes the UPSERT query and returns the inserted/updated ID.
func (u *DirectLinkUpsertOne) ID(ctx context.Context) (id int, err error) {
node, err := u.create.Save(ctx)
if err != nil {
return id, err
}
return node.ID, nil
}
// IDX is like ID, but panics if an error occurs.
func (u *DirectLinkUpsertOne) IDX(ctx context.Context) int {
id, err := u.ID(ctx)
if err != nil {
panic(err)
}
return id
}
func (m *DirectLinkCreate) SetRawID(t int) *DirectLinkCreate {
m.mutation.SetRawID(t)
return m
}
// DirectLinkCreateBulk is the builder for creating many DirectLink entities in bulk.
type DirectLinkCreateBulk struct {
config
err error
builders []*DirectLinkCreate
conflict []sql.ConflictOption
}
// Save creates the DirectLink entities in the database.
func (dlcb *DirectLinkCreateBulk) Save(ctx context.Context) ([]*DirectLink, error) {
if dlcb.err != nil {
return nil, dlcb.err
}
specs := make([]*sqlgraph.CreateSpec, len(dlcb.builders))
nodes := make([]*DirectLink, len(dlcb.builders))
mutators := make([]Mutator, len(dlcb.builders))
for i := range dlcb.builders {
func(i int, root context.Context) {
builder := dlcb.builders[i]
builder.defaults()
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*DirectLinkMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
if err := builder.check(); err != nil {
return nil, err
}
builder.mutation = mutation
var err error
nodes[i], specs[i] = builder.createSpec()
if i < len(mutators)-1 {
_, err = mutators[i+1].Mutate(root, dlcb.builders[i+1].mutation)
} else {
spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
spec.OnConflict = dlcb.conflict
// Invoke the actual operation on the latest mutation in the chain.
if err = sqlgraph.BatchCreate(ctx, dlcb.driver, spec); err != nil {
if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
}
}
if err != nil {
return nil, err
}
mutation.id = &nodes[i].ID
if specs[i].ID.Value != nil {
id := specs[i].ID.Value.(int64)
nodes[i].ID = int(id)
}
mutation.done = true
return nodes[i], nil
})
for i := len(builder.hooks) - 1; i >= 0; i-- {
mut = builder.hooks[i](mut)
}
mutators[i] = mut
}(i, ctx)
}
if len(mutators) > 0 {
if _, err := mutators[0].Mutate(ctx, dlcb.builders[0].mutation); err != nil {
return nil, err
}
}
return nodes, nil
}
// SaveX is like Save, but panics if an error occurs.
func (dlcb *DirectLinkCreateBulk) SaveX(ctx context.Context) []*DirectLink {
v, err := dlcb.Save(ctx)
if err != nil {
panic(err)
}
return v
}
// Exec executes the query.
func (dlcb *DirectLinkCreateBulk) Exec(ctx context.Context) error {
_, err := dlcb.Save(ctx)
return err
}
// ExecX is like Exec, but panics if an error occurs.
func (dlcb *DirectLinkCreateBulk) ExecX(ctx context.Context) {
if err := dlcb.Exec(ctx); err != nil {
panic(err)
}
}
// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause
// of the `INSERT` statement. For example:
//
// client.DirectLink.CreateBulk(builders...).
// OnConflict(
// // Update the row with the new values
// // the was proposed for insertion.
// sql.ResolveWithNewValues(),
// ).
// // Override some of the fields with custom
// // update values.
// Update(func(u *ent.DirectLinkUpsert) {
// SetCreatedAt(v+v).
// }).
// Exec(ctx)
func (dlcb *DirectLinkCreateBulk) OnConflict(opts ...sql.ConflictOption) *DirectLinkUpsertBulk {
dlcb.conflict = opts
return &DirectLinkUpsertBulk{
create: dlcb,
}
}
// OnConflictColumns calls `OnConflict` and configures the columns
// as conflict target. Using this option is equivalent to using:
//
// client.DirectLink.Create().
// OnConflict(sql.ConflictColumns(columns...)).
// Exec(ctx)
func (dlcb *DirectLinkCreateBulk) OnConflictColumns(columns ...string) *DirectLinkUpsertBulk {
dlcb.conflict = append(dlcb.conflict, sql.ConflictColumns(columns...))
return &DirectLinkUpsertBulk{
create: dlcb,
}
}
// DirectLinkUpsertBulk is the builder for "upsert"-ing
// a bulk of DirectLink nodes.
type DirectLinkUpsertBulk struct {
create *DirectLinkCreateBulk
}
// UpdateNewValues updates the mutable fields using the new values that
// were set on create. Using this option is equivalent to using:
//
// client.DirectLink.Create().
// OnConflict(
// sql.ResolveWithNewValues(),
// ).
// Exec(ctx)
func (u *DirectLinkUpsertBulk) UpdateNewValues() *DirectLinkUpsertBulk {
u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues())
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) {
for _, b := range u.create.builders {
if _, exists := b.mutation.CreatedAt(); exists {
s.SetIgnore(directlink.FieldCreatedAt)
}
}
}))
return u
}
// Ignore sets each column to itself in case of conflict.
// Using this option is equivalent to using:
//
// client.DirectLink.Create().
// OnConflict(sql.ResolveWithIgnore()).
// Exec(ctx)
func (u *DirectLinkUpsertBulk) Ignore() *DirectLinkUpsertBulk {
u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore())
return u
}
// DoNothing configures the conflict_action to `DO NOTHING`.
// Supported only by SQLite and PostgreSQL.
func (u *DirectLinkUpsertBulk) DoNothing() *DirectLinkUpsertBulk {
u.create.conflict = append(u.create.conflict, sql.DoNothing())
return u
}
// Update allows overriding fields `UPDATE` values. See the DirectLinkCreateBulk.OnConflict
// documentation for more info.
func (u *DirectLinkUpsertBulk) Update(set func(*DirectLinkUpsert)) *DirectLinkUpsertBulk {
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) {
set(&DirectLinkUpsert{UpdateSet: update})
}))
return u
}
// SetUpdatedAt sets the "updated_at" field.
func (u *DirectLinkUpsertBulk) SetUpdatedAt(v time.Time) *DirectLinkUpsertBulk {
return u.Update(func(s *DirectLinkUpsert) {
s.SetUpdatedAt(v)
})
}
// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create.
func (u *DirectLinkUpsertBulk) UpdateUpdatedAt() *DirectLinkUpsertBulk {
return u.Update(func(s *DirectLinkUpsert) {
s.UpdateUpdatedAt()
})
}
// SetDeletedAt sets the "deleted_at" field.
func (u *DirectLinkUpsertBulk) SetDeletedAt(v time.Time) *DirectLinkUpsertBulk {
return u.Update(func(s *DirectLinkUpsert) {
s.SetDeletedAt(v)
})
}
// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create.
func (u *DirectLinkUpsertBulk) UpdateDeletedAt() *DirectLinkUpsertBulk {
return u.Update(func(s *DirectLinkUpsert) {
s.UpdateDeletedAt()
})
}
// ClearDeletedAt clears the value of the "deleted_at" field.
func (u *DirectLinkUpsertBulk) ClearDeletedAt() *DirectLinkUpsertBulk {
return u.Update(func(s *DirectLinkUpsert) {
s.ClearDeletedAt()
})
}
// SetName sets the "name" field.
func (u *DirectLinkUpsertBulk) SetName(v string) *DirectLinkUpsertBulk {
return u.Update(func(s *DirectLinkUpsert) {
s.SetName(v)
})
}
// UpdateName sets the "name" field to the value that was provided on create.
func (u *DirectLinkUpsertBulk) UpdateName() *DirectLinkUpsertBulk {
return u.Update(func(s *DirectLinkUpsert) {
s.UpdateName()
})
}
// SetDownloads sets the "downloads" field.
func (u *DirectLinkUpsertBulk) SetDownloads(v int) *DirectLinkUpsertBulk {
return u.Update(func(s *DirectLinkUpsert) {
s.SetDownloads(v)
})
}
// AddDownloads adds v to the "downloads" field.
func (u *DirectLinkUpsertBulk) AddDownloads(v int) *DirectLinkUpsertBulk {
return u.Update(func(s *DirectLinkUpsert) {
s.AddDownloads(v)
})
}
// UpdateDownloads sets the "downloads" field to the value that was provided on create.
func (u *DirectLinkUpsertBulk) UpdateDownloads() *DirectLinkUpsertBulk {
return u.Update(func(s *DirectLinkUpsert) {
s.UpdateDownloads()
})
}
// SetFileID sets the "file_id" field.
func (u *DirectLinkUpsertBulk) SetFileID(v int) *DirectLinkUpsertBulk {
return u.Update(func(s *DirectLinkUpsert) {
s.SetFileID(v)
})
}
// UpdateFileID sets the "file_id" field to the value that was provided on create.
func (u *DirectLinkUpsertBulk) UpdateFileID() *DirectLinkUpsertBulk {
return u.Update(func(s *DirectLinkUpsert) {
s.UpdateFileID()
})
}
// SetSpeed sets the "speed" field.
func (u *DirectLinkUpsertBulk) SetSpeed(v int) *DirectLinkUpsertBulk {
return u.Update(func(s *DirectLinkUpsert) {
s.SetSpeed(v)
})
}
// AddSpeed adds v to the "speed" field.
func (u *DirectLinkUpsertBulk) AddSpeed(v int) *DirectLinkUpsertBulk {
return u.Update(func(s *DirectLinkUpsert) {
s.AddSpeed(v)
})
}
// UpdateSpeed sets the "speed" field to the value that was provided on create.
func (u *DirectLinkUpsertBulk) UpdateSpeed() *DirectLinkUpsertBulk {
return u.Update(func(s *DirectLinkUpsert) {
s.UpdateSpeed()
})
}
// Exec executes the query.
func (u *DirectLinkUpsertBulk) Exec(ctx context.Context) error {
if u.create.err != nil {
return u.create.err
}
for i, b := range u.create.builders {
if len(b.conflict) != 0 {
return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the DirectLinkCreateBulk instead", i)
}
}
if len(u.create.conflict) == 0 {
return errors.New("ent: missing options for DirectLinkCreateBulk.OnConflict")
}
return u.create.Exec(ctx)
}
// ExecX is like Exec, but panics if an error occurs.
func (u *DirectLinkUpsertBulk) ExecX(ctx context.Context) {
if err := u.create.Exec(ctx); err != nil {
panic(err)
}
}

@ -0,0 +1,88 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"context"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/cloudreve/Cloudreve/v4/ent/directlink"
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
)
// DirectLinkDelete is the builder for deleting a DirectLink entity.
type DirectLinkDelete struct {
config
hooks []Hook
mutation *DirectLinkMutation
}
// Where appends a list predicates to the DirectLinkDelete builder.
func (dld *DirectLinkDelete) Where(ps ...predicate.DirectLink) *DirectLinkDelete {
dld.mutation.Where(ps...)
return dld
}
// Exec executes the deletion query and returns how many vertices were deleted.
func (dld *DirectLinkDelete) Exec(ctx context.Context) (int, error) {
return withHooks(ctx, dld.sqlExec, dld.mutation, dld.hooks)
}
// ExecX is like Exec, but panics if an error occurs.
func (dld *DirectLinkDelete) ExecX(ctx context.Context) int {
n, err := dld.Exec(ctx)
if err != nil {
panic(err)
}
return n
}
func (dld *DirectLinkDelete) sqlExec(ctx context.Context) (int, error) {
_spec := sqlgraph.NewDeleteSpec(directlink.Table, sqlgraph.NewFieldSpec(directlink.FieldID, field.TypeInt))
if ps := dld.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
affected, err := sqlgraph.DeleteNodes(ctx, dld.driver, _spec)
if err != nil && sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
dld.mutation.done = true
return affected, err
}
// DirectLinkDeleteOne is the builder for deleting a single DirectLink entity.
type DirectLinkDeleteOne struct {
dld *DirectLinkDelete
}
// Where appends a list predicates to the DirectLinkDelete builder.
func (dldo *DirectLinkDeleteOne) Where(ps ...predicate.DirectLink) *DirectLinkDeleteOne {
dldo.dld.mutation.Where(ps...)
return dldo
}
// Exec executes the deletion query.
func (dldo *DirectLinkDeleteOne) Exec(ctx context.Context) error {
n, err := dldo.dld.Exec(ctx)
switch {
case err != nil:
return err
case n == 0:
return &NotFoundError{directlink.Label}
default:
return nil
}
}
// ExecX is like Exec, but panics if an error occurs.
func (dldo *DirectLinkDeleteOne) ExecX(ctx context.Context) {
if err := dldo.Exec(ctx); err != nil {
panic(err)
}
}

@ -0,0 +1,605 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"context"
"fmt"
"math"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/cloudreve/Cloudreve/v4/ent/directlink"
"github.com/cloudreve/Cloudreve/v4/ent/file"
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
)
// DirectLinkQuery is the builder for querying DirectLink entities.
type DirectLinkQuery struct {
config
ctx *QueryContext
order []directlink.OrderOption
inters []Interceptor
predicates []predicate.DirectLink
withFile *FileQuery
// intermediate query (i.e. traversal path).
sql *sql.Selector
path func(context.Context) (*sql.Selector, error)
}
// Where adds a new predicate for the DirectLinkQuery builder.
func (dlq *DirectLinkQuery) Where(ps ...predicate.DirectLink) *DirectLinkQuery {
dlq.predicates = append(dlq.predicates, ps...)
return dlq
}
// Limit the number of records to be returned by this query.
func (dlq *DirectLinkQuery) Limit(limit int) *DirectLinkQuery {
dlq.ctx.Limit = &limit
return dlq
}
// Offset to start from.
func (dlq *DirectLinkQuery) Offset(offset int) *DirectLinkQuery {
dlq.ctx.Offset = &offset
return dlq
}
// Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method.
func (dlq *DirectLinkQuery) Unique(unique bool) *DirectLinkQuery {
dlq.ctx.Unique = &unique
return dlq
}
// Order specifies how the records should be ordered.
func (dlq *DirectLinkQuery) Order(o ...directlink.OrderOption) *DirectLinkQuery {
dlq.order = append(dlq.order, o...)
return dlq
}
// QueryFile chains the current query on the "file" edge.
func (dlq *DirectLinkQuery) QueryFile() *FileQuery {
query := (&FileClient{config: dlq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := dlq.prepareQuery(ctx); err != nil {
return nil, err
}
selector := dlq.sqlQuery(ctx)
if err := selector.Err(); err != nil {
return nil, err
}
step := sqlgraph.NewStep(
sqlgraph.From(directlink.Table, directlink.FieldID, selector),
sqlgraph.To(file.Table, file.FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, directlink.FileTable, directlink.FileColumn),
)
fromU = sqlgraph.SetNeighbors(dlq.driver.Dialect(), step)
return fromU, nil
}
return query
}
// First returns the first DirectLink entity from the query.
// Returns a *NotFoundError when no DirectLink was found.
func (dlq *DirectLinkQuery) First(ctx context.Context) (*DirectLink, error) {
nodes, err := dlq.Limit(1).All(setContextOp(ctx, dlq.ctx, "First"))
if err != nil {
return nil, err
}
if len(nodes) == 0 {
return nil, &NotFoundError{directlink.Label}
}
return nodes[0], nil
}
// FirstX is like First, but panics if an error occurs.
func (dlq *DirectLinkQuery) FirstX(ctx context.Context) *DirectLink {
node, err := dlq.First(ctx)
if err != nil && !IsNotFound(err) {
panic(err)
}
return node
}
// FirstID returns the first DirectLink ID from the query.
// Returns a *NotFoundError when no DirectLink ID was found.
func (dlq *DirectLinkQuery) FirstID(ctx context.Context) (id int, err error) {
var ids []int
if ids, err = dlq.Limit(1).IDs(setContextOp(ctx, dlq.ctx, "FirstID")); err != nil {
return
}
if len(ids) == 0 {
err = &NotFoundError{directlink.Label}
return
}
return ids[0], nil
}
// FirstIDX is like FirstID, but panics if an error occurs.
func (dlq *DirectLinkQuery) FirstIDX(ctx context.Context) int {
id, err := dlq.FirstID(ctx)
if err != nil && !IsNotFound(err) {
panic(err)
}
return id
}
// Only returns a single DirectLink entity found by the query, ensuring it only returns one.
// Returns a *NotSingularError when more than one DirectLink entity is found.
// Returns a *NotFoundError when no DirectLink entities are found.
func (dlq *DirectLinkQuery) Only(ctx context.Context) (*DirectLink, error) {
nodes, err := dlq.Limit(2).All(setContextOp(ctx, dlq.ctx, "Only"))
if err != nil {
return nil, err
}
switch len(nodes) {
case 1:
return nodes[0], nil
case 0:
return nil, &NotFoundError{directlink.Label}
default:
return nil, &NotSingularError{directlink.Label}
}
}
// OnlyX is like Only, but panics if an error occurs.
func (dlq *DirectLinkQuery) OnlyX(ctx context.Context) *DirectLink {
node, err := dlq.Only(ctx)
if err != nil {
panic(err)
}
return node
}
// OnlyID is like Only, but returns the only DirectLink ID in the query.
// Returns a *NotSingularError when more than one DirectLink ID is found.
// Returns a *NotFoundError when no entities are found.
func (dlq *DirectLinkQuery) OnlyID(ctx context.Context) (id int, err error) {
var ids []int
if ids, err = dlq.Limit(2).IDs(setContextOp(ctx, dlq.ctx, "OnlyID")); err != nil {
return
}
switch len(ids) {
case 1:
id = ids[0]
case 0:
err = &NotFoundError{directlink.Label}
default:
err = &NotSingularError{directlink.Label}
}
return
}
// OnlyIDX is like OnlyID, but panics if an error occurs.
func (dlq *DirectLinkQuery) OnlyIDX(ctx context.Context) int {
id, err := dlq.OnlyID(ctx)
if err != nil {
panic(err)
}
return id
}
// All executes the query and returns a list of DirectLinks.
func (dlq *DirectLinkQuery) All(ctx context.Context) ([]*DirectLink, error) {
ctx = setContextOp(ctx, dlq.ctx, "All")
if err := dlq.prepareQuery(ctx); err != nil {
return nil, err
}
qr := querierAll[[]*DirectLink, *DirectLinkQuery]()
return withInterceptors[[]*DirectLink](ctx, dlq, qr, dlq.inters)
}
// AllX is like All, but panics if an error occurs.
func (dlq *DirectLinkQuery) AllX(ctx context.Context) []*DirectLink {
nodes, err := dlq.All(ctx)
if err != nil {
panic(err)
}
return nodes
}
// IDs executes the query and returns a list of DirectLink IDs.
func (dlq *DirectLinkQuery) IDs(ctx context.Context) (ids []int, err error) {
if dlq.ctx.Unique == nil && dlq.path != nil {
dlq.Unique(true)
}
ctx = setContextOp(ctx, dlq.ctx, "IDs")
if err = dlq.Select(directlink.FieldID).Scan(ctx, &ids); err != nil {
return nil, err
}
return ids, nil
}
// IDsX is like IDs, but panics if an error occurs.
func (dlq *DirectLinkQuery) IDsX(ctx context.Context) []int {
ids, err := dlq.IDs(ctx)
if err != nil {
panic(err)
}
return ids
}
// Count returns the count of the given query.
func (dlq *DirectLinkQuery) Count(ctx context.Context) (int, error) {
ctx = setContextOp(ctx, dlq.ctx, "Count")
if err := dlq.prepareQuery(ctx); err != nil {
return 0, err
}
return withInterceptors[int](ctx, dlq, querierCount[*DirectLinkQuery](), dlq.inters)
}
// CountX is like Count, but panics if an error occurs.
func (dlq *DirectLinkQuery) CountX(ctx context.Context) int {
count, err := dlq.Count(ctx)
if err != nil {
panic(err)
}
return count
}
// Exist returns true if the query has elements in the graph.
func (dlq *DirectLinkQuery) Exist(ctx context.Context) (bool, error) {
ctx = setContextOp(ctx, dlq.ctx, "Exist")
switch _, err := dlq.FirstID(ctx); {
case IsNotFound(err):
return false, nil
case err != nil:
return false, fmt.Errorf("ent: check existence: %w", err)
default:
return true, nil
}
}
// ExistX is like Exist, but panics if an error occurs.
func (dlq *DirectLinkQuery) ExistX(ctx context.Context) bool {
exist, err := dlq.Exist(ctx)
if err != nil {
panic(err)
}
return exist
}
// Clone returns a duplicate of the DirectLinkQuery builder, including all associated steps. It can be
// used to prepare common query builders and use them differently after the clone is made.
func (dlq *DirectLinkQuery) Clone() *DirectLinkQuery {
if dlq == nil {
return nil
}
return &DirectLinkQuery{
config: dlq.config,
ctx: dlq.ctx.Clone(),
order: append([]directlink.OrderOption{}, dlq.order...),
inters: append([]Interceptor{}, dlq.inters...),
predicates: append([]predicate.DirectLink{}, dlq.predicates...),
withFile: dlq.withFile.Clone(),
// clone intermediate query.
sql: dlq.sql.Clone(),
path: dlq.path,
}
}
// WithFile tells the query-builder to eager-load the nodes that are connected to
// the "file" edge. The optional arguments are used to configure the query builder of the edge.
func (dlq *DirectLinkQuery) WithFile(opts ...func(*FileQuery)) *DirectLinkQuery {
query := (&FileClient{config: dlq.config}).Query()
for _, opt := range opts {
opt(query)
}
dlq.withFile = query
return dlq
}
// GroupBy is used to group vertices by one or more fields/columns.
// It is often used with aggregate functions, like: count, max, mean, min, sum.
//
// Example:
//
// var v []struct {
// CreatedAt time.Time `json:"created_at,omitempty"`
// Count int `json:"count,omitempty"`
// }
//
// client.DirectLink.Query().
// GroupBy(directlink.FieldCreatedAt).
// Aggregate(ent.Count()).
// Scan(ctx, &v)
func (dlq *DirectLinkQuery) GroupBy(field string, fields ...string) *DirectLinkGroupBy {
dlq.ctx.Fields = append([]string{field}, fields...)
grbuild := &DirectLinkGroupBy{build: dlq}
grbuild.flds = &dlq.ctx.Fields
grbuild.label = directlink.Label
grbuild.scan = grbuild.Scan
return grbuild
}
// Select allows the selection one or more fields/columns for the given query,
// instead of selecting all fields in the entity.
//
// Example:
//
// var v []struct {
// CreatedAt time.Time `json:"created_at,omitempty"`
// }
//
// client.DirectLink.Query().
// Select(directlink.FieldCreatedAt).
// Scan(ctx, &v)
func (dlq *DirectLinkQuery) Select(fields ...string) *DirectLinkSelect {
dlq.ctx.Fields = append(dlq.ctx.Fields, fields...)
sbuild := &DirectLinkSelect{DirectLinkQuery: dlq}
sbuild.label = directlink.Label
sbuild.flds, sbuild.scan = &dlq.ctx.Fields, sbuild.Scan
return sbuild
}
// Aggregate returns a DirectLinkSelect configured with the given aggregations.
func (dlq *DirectLinkQuery) Aggregate(fns ...AggregateFunc) *DirectLinkSelect {
return dlq.Select().Aggregate(fns...)
}
func (dlq *DirectLinkQuery) prepareQuery(ctx context.Context) error {
for _, inter := range dlq.inters {
if inter == nil {
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
}
if trv, ok := inter.(Traverser); ok {
if err := trv.Traverse(ctx, dlq); err != nil {
return err
}
}
}
for _, f := range dlq.ctx.Fields {
if !directlink.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
}
}
if dlq.path != nil {
prev, err := dlq.path(ctx)
if err != nil {
return err
}
dlq.sql = prev
}
return nil
}
func (dlq *DirectLinkQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*DirectLink, error) {
var (
nodes = []*DirectLink{}
_spec = dlq.querySpec()
loadedTypes = [1]bool{
dlq.withFile != nil,
}
)
_spec.ScanValues = func(columns []string) ([]any, error) {
return (*DirectLink).scanValues(nil, columns)
}
_spec.Assign = func(columns []string, values []any) error {
node := &DirectLink{config: dlq.config}
nodes = append(nodes, node)
node.Edges.loadedTypes = loadedTypes
return node.assignValues(columns, values)
}
for i := range hooks {
hooks[i](ctx, _spec)
}
if err := sqlgraph.QueryNodes(ctx, dlq.driver, _spec); err != nil {
return nil, err
}
if len(nodes) == 0 {
return nodes, nil
}
if query := dlq.withFile; query != nil {
if err := dlq.loadFile(ctx, query, nodes, nil,
func(n *DirectLink, e *File) { n.Edges.File = e }); err != nil {
return nil, err
}
}
return nodes, nil
}
func (dlq *DirectLinkQuery) loadFile(ctx context.Context, query *FileQuery, nodes []*DirectLink, init func(*DirectLink), assign func(*DirectLink, *File)) error {
ids := make([]int, 0, len(nodes))
nodeids := make(map[int][]*DirectLink)
for i := range nodes {
fk := nodes[i].FileID
if _, ok := nodeids[fk]; !ok {
ids = append(ids, fk)
}
nodeids[fk] = append(nodeids[fk], nodes[i])
}
if len(ids) == 0 {
return nil
}
query.Where(file.IDIn(ids...))
neighbors, err := query.All(ctx)
if err != nil {
return err
}
for _, n := range neighbors {
nodes, ok := nodeids[n.ID]
if !ok {
return fmt.Errorf(`unexpected foreign-key "file_id" returned %v`, n.ID)
}
for i := range nodes {
assign(nodes[i], n)
}
}
return nil
}
func (dlq *DirectLinkQuery) sqlCount(ctx context.Context) (int, error) {
_spec := dlq.querySpec()
_spec.Node.Columns = dlq.ctx.Fields
if len(dlq.ctx.Fields) > 0 {
_spec.Unique = dlq.ctx.Unique != nil && *dlq.ctx.Unique
}
return sqlgraph.CountNodes(ctx, dlq.driver, _spec)
}
func (dlq *DirectLinkQuery) querySpec() *sqlgraph.QuerySpec {
_spec := sqlgraph.NewQuerySpec(directlink.Table, directlink.Columns, sqlgraph.NewFieldSpec(directlink.FieldID, field.TypeInt))
_spec.From = dlq.sql
if unique := dlq.ctx.Unique; unique != nil {
_spec.Unique = *unique
} else if dlq.path != nil {
_spec.Unique = true
}
if fields := dlq.ctx.Fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, directlink.FieldID)
for i := range fields {
if fields[i] != directlink.FieldID {
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
}
}
if dlq.withFile != nil {
_spec.Node.AddColumnOnce(directlink.FieldFileID)
}
}
if ps := dlq.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
if limit := dlq.ctx.Limit; limit != nil {
_spec.Limit = *limit
}
if offset := dlq.ctx.Offset; offset != nil {
_spec.Offset = *offset
}
if ps := dlq.order; len(ps) > 0 {
_spec.Order = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
return _spec
}
func (dlq *DirectLinkQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(dlq.driver.Dialect())
t1 := builder.Table(directlink.Table)
columns := dlq.ctx.Fields
if len(columns) == 0 {
columns = directlink.Columns
}
selector := builder.Select(t1.Columns(columns...)...).From(t1)
if dlq.sql != nil {
selector = dlq.sql
selector.Select(selector.Columns(columns...)...)
}
if dlq.ctx.Unique != nil && *dlq.ctx.Unique {
selector.Distinct()
}
for _, p := range dlq.predicates {
p(selector)
}
for _, p := range dlq.order {
p(selector)
}
if offset := dlq.ctx.Offset; offset != nil {
// limit is mandatory for offset clause. We start
// with default value, and override it below if needed.
selector.Offset(*offset).Limit(math.MaxInt32)
}
if limit := dlq.ctx.Limit; limit != nil {
selector.Limit(*limit)
}
return selector
}
// DirectLinkGroupBy is the group-by builder for DirectLink entities.
type DirectLinkGroupBy struct {
selector
build *DirectLinkQuery
}
// Aggregate adds the given aggregation functions to the group-by query.
func (dlgb *DirectLinkGroupBy) Aggregate(fns ...AggregateFunc) *DirectLinkGroupBy {
dlgb.fns = append(dlgb.fns, fns...)
return dlgb
}
// Scan applies the selector query and scans the result into the given value.
func (dlgb *DirectLinkGroupBy) Scan(ctx context.Context, v any) error {
ctx = setContextOp(ctx, dlgb.build.ctx, "GroupBy")
if err := dlgb.build.prepareQuery(ctx); err != nil {
return err
}
return scanWithInterceptors[*DirectLinkQuery, *DirectLinkGroupBy](ctx, dlgb.build, dlgb, dlgb.build.inters, v)
}
func (dlgb *DirectLinkGroupBy) sqlScan(ctx context.Context, root *DirectLinkQuery, v any) error {
selector := root.sqlQuery(ctx).Select()
aggregation := make([]string, 0, len(dlgb.fns))
for _, fn := range dlgb.fns {
aggregation = append(aggregation, fn(selector))
}
if len(selector.SelectedColumns()) == 0 {
columns := make([]string, 0, len(*dlgb.flds)+len(dlgb.fns))
for _, f := range *dlgb.flds {
columns = append(columns, selector.C(f))
}
columns = append(columns, aggregation...)
selector.Select(columns...)
}
selector.GroupBy(selector.Columns(*dlgb.flds...)...)
if err := selector.Err(); err != nil {
return err
}
rows := &sql.Rows{}
query, args := selector.Query()
if err := dlgb.build.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}
// DirectLinkSelect is the builder for selecting fields of DirectLink entities.
type DirectLinkSelect struct {
*DirectLinkQuery
selector
}
// Aggregate adds the given aggregation functions to the selector query.
func (dls *DirectLinkSelect) Aggregate(fns ...AggregateFunc) *DirectLinkSelect {
dls.fns = append(dls.fns, fns...)
return dls
}
// Scan applies the selector query and scans the result into the given value.
func (dls *DirectLinkSelect) Scan(ctx context.Context, v any) error {
ctx = setContextOp(ctx, dls.ctx, "Select")
if err := dls.prepareQuery(ctx); err != nil {
return err
}
return scanWithInterceptors[*DirectLinkQuery, *DirectLinkSelect](ctx, dls.DirectLinkQuery, dls, dls.inters, v)
}
func (dls *DirectLinkSelect) sqlScan(ctx context.Context, root *DirectLinkQuery, v any) error {
selector := root.sqlQuery(ctx)
aggregation := make([]string, 0, len(dls.fns))
for _, fn := range dls.fns {
aggregation = append(aggregation, fn(selector))
}
switch n := len(*dls.selector.flds); {
case n == 0 && len(aggregation) > 0:
selector.Select(aggregation...)
case n != 0 && len(aggregation) > 0:
selector.AppendSelect(aggregation...)
}
rows := &sql.Rows{}
query, args := selector.Query()
if err := dls.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}

@ -0,0 +1,549 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"context"
"errors"
"fmt"
"time"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/cloudreve/Cloudreve/v4/ent/directlink"
"github.com/cloudreve/Cloudreve/v4/ent/file"
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
)
// DirectLinkUpdate is the builder for updating DirectLink entities.
type DirectLinkUpdate struct {
config
hooks []Hook
mutation *DirectLinkMutation
}
// Where appends a list predicates to the DirectLinkUpdate builder.
func (dlu *DirectLinkUpdate) Where(ps ...predicate.DirectLink) *DirectLinkUpdate {
dlu.mutation.Where(ps...)
return dlu
}
// SetUpdatedAt sets the "updated_at" field.
func (dlu *DirectLinkUpdate) SetUpdatedAt(t time.Time) *DirectLinkUpdate {
dlu.mutation.SetUpdatedAt(t)
return dlu
}
// SetDeletedAt sets the "deleted_at" field.
func (dlu *DirectLinkUpdate) SetDeletedAt(t time.Time) *DirectLinkUpdate {
dlu.mutation.SetDeletedAt(t)
return dlu
}
// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil.
func (dlu *DirectLinkUpdate) SetNillableDeletedAt(t *time.Time) *DirectLinkUpdate {
if t != nil {
dlu.SetDeletedAt(*t)
}
return dlu
}
// ClearDeletedAt clears the value of the "deleted_at" field.
func (dlu *DirectLinkUpdate) ClearDeletedAt() *DirectLinkUpdate {
dlu.mutation.ClearDeletedAt()
return dlu
}
// SetName sets the "name" field.
func (dlu *DirectLinkUpdate) SetName(s string) *DirectLinkUpdate {
dlu.mutation.SetName(s)
return dlu
}
// SetNillableName sets the "name" field if the given value is not nil.
func (dlu *DirectLinkUpdate) SetNillableName(s *string) *DirectLinkUpdate {
if s != nil {
dlu.SetName(*s)
}
return dlu
}
// SetDownloads sets the "downloads" field.
func (dlu *DirectLinkUpdate) SetDownloads(i int) *DirectLinkUpdate {
dlu.mutation.ResetDownloads()
dlu.mutation.SetDownloads(i)
return dlu
}
// SetNillableDownloads sets the "downloads" field if the given value is not nil.
func (dlu *DirectLinkUpdate) SetNillableDownloads(i *int) *DirectLinkUpdate {
if i != nil {
dlu.SetDownloads(*i)
}
return dlu
}
// AddDownloads adds i to the "downloads" field.
func (dlu *DirectLinkUpdate) AddDownloads(i int) *DirectLinkUpdate {
dlu.mutation.AddDownloads(i)
return dlu
}
// SetFileID sets the "file_id" field.
func (dlu *DirectLinkUpdate) SetFileID(i int) *DirectLinkUpdate {
dlu.mutation.SetFileID(i)
return dlu
}
// SetNillableFileID sets the "file_id" field if the given value is not nil.
func (dlu *DirectLinkUpdate) SetNillableFileID(i *int) *DirectLinkUpdate {
if i != nil {
dlu.SetFileID(*i)
}
return dlu
}
// SetSpeed sets the "speed" field.
func (dlu *DirectLinkUpdate) SetSpeed(i int) *DirectLinkUpdate {
dlu.mutation.ResetSpeed()
dlu.mutation.SetSpeed(i)
return dlu
}
// SetNillableSpeed sets the "speed" field if the given value is not nil.
func (dlu *DirectLinkUpdate) SetNillableSpeed(i *int) *DirectLinkUpdate {
if i != nil {
dlu.SetSpeed(*i)
}
return dlu
}
// AddSpeed adds i to the "speed" field.
func (dlu *DirectLinkUpdate) AddSpeed(i int) *DirectLinkUpdate {
dlu.mutation.AddSpeed(i)
return dlu
}
// SetFile sets the "file" edge to the File entity.
func (dlu *DirectLinkUpdate) SetFile(f *File) *DirectLinkUpdate {
return dlu.SetFileID(f.ID)
}
// Mutation returns the DirectLinkMutation object of the builder.
func (dlu *DirectLinkUpdate) Mutation() *DirectLinkMutation {
return dlu.mutation
}
// ClearFile clears the "file" edge to the File entity.
func (dlu *DirectLinkUpdate) ClearFile() *DirectLinkUpdate {
dlu.mutation.ClearFile()
return dlu
}
// Save executes the query and returns the number of nodes affected by the update operation.
func (dlu *DirectLinkUpdate) Save(ctx context.Context) (int, error) {
if err := dlu.defaults(); err != nil {
return 0, err
}
return withHooks(ctx, dlu.sqlSave, dlu.mutation, dlu.hooks)
}
// SaveX is like Save, but panics if an error occurs.
func (dlu *DirectLinkUpdate) SaveX(ctx context.Context) int {
affected, err := dlu.Save(ctx)
if err != nil {
panic(err)
}
return affected
}
// Exec executes the query.
func (dlu *DirectLinkUpdate) Exec(ctx context.Context) error {
_, err := dlu.Save(ctx)
return err
}
// ExecX is like Exec, but panics if an error occurs.
func (dlu *DirectLinkUpdate) ExecX(ctx context.Context) {
if err := dlu.Exec(ctx); err != nil {
panic(err)
}
}
// defaults sets the default values of the builder before save.
func (dlu *DirectLinkUpdate) defaults() error {
if _, ok := dlu.mutation.UpdatedAt(); !ok {
if directlink.UpdateDefaultUpdatedAt == nil {
return fmt.Errorf("ent: uninitialized directlink.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)")
}
v := directlink.UpdateDefaultUpdatedAt()
dlu.mutation.SetUpdatedAt(v)
}
return nil
}
// check runs all checks and user-defined validators on the builder.
func (dlu *DirectLinkUpdate) check() error {
if _, ok := dlu.mutation.FileID(); dlu.mutation.FileCleared() && !ok {
return errors.New(`ent: clearing a required unique edge "DirectLink.file"`)
}
return nil
}
func (dlu *DirectLinkUpdate) sqlSave(ctx context.Context) (n int, err error) {
if err := dlu.check(); err != nil {
return n, err
}
_spec := sqlgraph.NewUpdateSpec(directlink.Table, directlink.Columns, sqlgraph.NewFieldSpec(directlink.FieldID, field.TypeInt))
if ps := dlu.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
if value, ok := dlu.mutation.UpdatedAt(); ok {
_spec.SetField(directlink.FieldUpdatedAt, field.TypeTime, value)
}
if value, ok := dlu.mutation.DeletedAt(); ok {
_spec.SetField(directlink.FieldDeletedAt, field.TypeTime, value)
}
if dlu.mutation.DeletedAtCleared() {
_spec.ClearField(directlink.FieldDeletedAt, field.TypeTime)
}
if value, ok := dlu.mutation.Name(); ok {
_spec.SetField(directlink.FieldName, field.TypeString, value)
}
if value, ok := dlu.mutation.Downloads(); ok {
_spec.SetField(directlink.FieldDownloads, field.TypeInt, value)
}
if value, ok := dlu.mutation.AddedDownloads(); ok {
_spec.AddField(directlink.FieldDownloads, field.TypeInt, value)
}
if value, ok := dlu.mutation.Speed(); ok {
_spec.SetField(directlink.FieldSpeed, field.TypeInt, value)
}
if value, ok := dlu.mutation.AddedSpeed(); ok {
_spec.AddField(directlink.FieldSpeed, field.TypeInt, value)
}
if dlu.mutation.FileCleared() {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
Inverse: true,
Table: directlink.FileTable,
Columns: []string{directlink.FileColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(file.FieldID, field.TypeInt),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
}
if nodes := dlu.mutation.FileIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
Inverse: true,
Table: directlink.FileTable,
Columns: []string{directlink.FileColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(file.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges.Add = append(_spec.Edges.Add, edge)
}
if n, err = sqlgraph.UpdateNodes(ctx, dlu.driver, _spec); err != nil {
if _, ok := err.(*sqlgraph.NotFoundError); ok {
err = &NotFoundError{directlink.Label}
} else if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
return 0, err
}
dlu.mutation.done = true
return n, nil
}
// DirectLinkUpdateOne is the builder for updating a single DirectLink entity.
type DirectLinkUpdateOne struct {
config
fields []string
hooks []Hook
mutation *DirectLinkMutation
}
// SetUpdatedAt sets the "updated_at" field.
func (dluo *DirectLinkUpdateOne) SetUpdatedAt(t time.Time) *DirectLinkUpdateOne {
dluo.mutation.SetUpdatedAt(t)
return dluo
}
// SetDeletedAt sets the "deleted_at" field.
func (dluo *DirectLinkUpdateOne) SetDeletedAt(t time.Time) *DirectLinkUpdateOne {
dluo.mutation.SetDeletedAt(t)
return dluo
}
// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil.
func (dluo *DirectLinkUpdateOne) SetNillableDeletedAt(t *time.Time) *DirectLinkUpdateOne {
if t != nil {
dluo.SetDeletedAt(*t)
}
return dluo
}
// ClearDeletedAt clears the value of the "deleted_at" field.
func (dluo *DirectLinkUpdateOne) ClearDeletedAt() *DirectLinkUpdateOne {
dluo.mutation.ClearDeletedAt()
return dluo
}
// SetName sets the "name" field.
func (dluo *DirectLinkUpdateOne) SetName(s string) *DirectLinkUpdateOne {
dluo.mutation.SetName(s)
return dluo
}
// SetNillableName sets the "name" field if the given value is not nil.
func (dluo *DirectLinkUpdateOne) SetNillableName(s *string) *DirectLinkUpdateOne {
if s != nil {
dluo.SetName(*s)
}
return dluo
}
// SetDownloads sets the "downloads" field.
func (dluo *DirectLinkUpdateOne) SetDownloads(i int) *DirectLinkUpdateOne {
dluo.mutation.ResetDownloads()
dluo.mutation.SetDownloads(i)
return dluo
}
// SetNillableDownloads sets the "downloads" field if the given value is not nil.
func (dluo *DirectLinkUpdateOne) SetNillableDownloads(i *int) *DirectLinkUpdateOne {
if i != nil {
dluo.SetDownloads(*i)
}
return dluo
}
// AddDownloads adds i to the "downloads" field.
func (dluo *DirectLinkUpdateOne) AddDownloads(i int) *DirectLinkUpdateOne {
dluo.mutation.AddDownloads(i)
return dluo
}
// SetFileID sets the "file_id" field.
func (dluo *DirectLinkUpdateOne) SetFileID(i int) *DirectLinkUpdateOne {
dluo.mutation.SetFileID(i)
return dluo
}
// SetNillableFileID sets the "file_id" field if the given value is not nil.
func (dluo *DirectLinkUpdateOne) SetNillableFileID(i *int) *DirectLinkUpdateOne {
if i != nil {
dluo.SetFileID(*i)
}
return dluo
}
// SetSpeed sets the "speed" field.
func (dluo *DirectLinkUpdateOne) SetSpeed(i int) *DirectLinkUpdateOne {
dluo.mutation.ResetSpeed()
dluo.mutation.SetSpeed(i)
return dluo
}
// SetNillableSpeed sets the "speed" field if the given value is not nil.
func (dluo *DirectLinkUpdateOne) SetNillableSpeed(i *int) *DirectLinkUpdateOne {
if i != nil {
dluo.SetSpeed(*i)
}
return dluo
}
// AddSpeed adds i to the "speed" field.
func (dluo *DirectLinkUpdateOne) AddSpeed(i int) *DirectLinkUpdateOne {
dluo.mutation.AddSpeed(i)
return dluo
}
// SetFile sets the "file" edge to the File entity.
func (dluo *DirectLinkUpdateOne) SetFile(f *File) *DirectLinkUpdateOne {
return dluo.SetFileID(f.ID)
}
// Mutation returns the DirectLinkMutation object of the builder.
func (dluo *DirectLinkUpdateOne) Mutation() *DirectLinkMutation {
return dluo.mutation
}
// ClearFile clears the "file" edge to the File entity.
func (dluo *DirectLinkUpdateOne) ClearFile() *DirectLinkUpdateOne {
dluo.mutation.ClearFile()
return dluo
}
// Where appends a list predicates to the DirectLinkUpdate builder.
func (dluo *DirectLinkUpdateOne) Where(ps ...predicate.DirectLink) *DirectLinkUpdateOne {
dluo.mutation.Where(ps...)
return dluo
}
// Select allows selecting one or more fields (columns) of the returned entity.
// The default is selecting all fields defined in the entity schema.
func (dluo *DirectLinkUpdateOne) Select(field string, fields ...string) *DirectLinkUpdateOne {
dluo.fields = append([]string{field}, fields...)
return dluo
}
// Save executes the query and returns the updated DirectLink entity.
func (dluo *DirectLinkUpdateOne) Save(ctx context.Context) (*DirectLink, error) {
if err := dluo.defaults(); err != nil {
return nil, err
}
return withHooks(ctx, dluo.sqlSave, dluo.mutation, dluo.hooks)
}
// SaveX is like Save, but panics if an error occurs.
func (dluo *DirectLinkUpdateOne) SaveX(ctx context.Context) *DirectLink {
node, err := dluo.Save(ctx)
if err != nil {
panic(err)
}
return node
}
// Exec executes the query on the entity.
func (dluo *DirectLinkUpdateOne) Exec(ctx context.Context) error {
_, err := dluo.Save(ctx)
return err
}
// ExecX is like Exec, but panics if an error occurs.
func (dluo *DirectLinkUpdateOne) ExecX(ctx context.Context) {
if err := dluo.Exec(ctx); err != nil {
panic(err)
}
}
// defaults sets the default values of the builder before save.
func (dluo *DirectLinkUpdateOne) defaults() error {
if _, ok := dluo.mutation.UpdatedAt(); !ok {
if directlink.UpdateDefaultUpdatedAt == nil {
return fmt.Errorf("ent: uninitialized directlink.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)")
}
v := directlink.UpdateDefaultUpdatedAt()
dluo.mutation.SetUpdatedAt(v)
}
return nil
}
// check runs all checks and user-defined validators on the builder.
func (dluo *DirectLinkUpdateOne) check() error {
if _, ok := dluo.mutation.FileID(); dluo.mutation.FileCleared() && !ok {
return errors.New(`ent: clearing a required unique edge "DirectLink.file"`)
}
return nil
}
func (dluo *DirectLinkUpdateOne) sqlSave(ctx context.Context) (_node *DirectLink, err error) {
if err := dluo.check(); err != nil {
return _node, err
}
_spec := sqlgraph.NewUpdateSpec(directlink.Table, directlink.Columns, sqlgraph.NewFieldSpec(directlink.FieldID, field.TypeInt))
id, ok := dluo.mutation.ID()
if !ok {
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "DirectLink.id" for update`)}
}
_spec.Node.ID.Value = id
if fields := dluo.fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, directlink.FieldID)
for _, f := range fields {
if !directlink.ValidColumn(f) {
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
}
if f != directlink.FieldID {
_spec.Node.Columns = append(_spec.Node.Columns, f)
}
}
}
if ps := dluo.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
if value, ok := dluo.mutation.UpdatedAt(); ok {
_spec.SetField(directlink.FieldUpdatedAt, field.TypeTime, value)
}
if value, ok := dluo.mutation.DeletedAt(); ok {
_spec.SetField(directlink.FieldDeletedAt, field.TypeTime, value)
}
if dluo.mutation.DeletedAtCleared() {
_spec.ClearField(directlink.FieldDeletedAt, field.TypeTime)
}
if value, ok := dluo.mutation.Name(); ok {
_spec.SetField(directlink.FieldName, field.TypeString, value)
}
if value, ok := dluo.mutation.Downloads(); ok {
_spec.SetField(directlink.FieldDownloads, field.TypeInt, value)
}
if value, ok := dluo.mutation.AddedDownloads(); ok {
_spec.AddField(directlink.FieldDownloads, field.TypeInt, value)
}
if value, ok := dluo.mutation.Speed(); ok {
_spec.SetField(directlink.FieldSpeed, field.TypeInt, value)
}
if value, ok := dluo.mutation.AddedSpeed(); ok {
_spec.AddField(directlink.FieldSpeed, field.TypeInt, value)
}
if dluo.mutation.FileCleared() {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
Inverse: true,
Table: directlink.FileTable,
Columns: []string{directlink.FileColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(file.FieldID, field.TypeInt),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
}
if nodes := dluo.mutation.FileIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
Inverse: true,
Table: directlink.FileTable,
Columns: []string{directlink.FileColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(file.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges.Add = append(_spec.Edges.Add, edge)
}
_node = &DirectLink{config: dluo.config}
_spec.Assign = _node.assignValues
_spec.ScanValues = _node.scanValues
if err = sqlgraph.UpdateNode(ctx, dluo.driver, _spec); err != nil {
if _, ok := err.(*sqlgraph.NotFoundError); ok {
err = &NotFoundError{directlink.Label}
} else if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
return nil, err
}
dluo.mutation.done = true
return _node, nil
}

@ -0,0 +1,632 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"context"
"errors"
"fmt"
"reflect"
"sync"
"entgo.io/ent"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"github.com/cloudreve/Cloudreve/v4/ent/davaccount"
"github.com/cloudreve/Cloudreve/v4/ent/directlink"
"github.com/cloudreve/Cloudreve/v4/ent/entity"
"github.com/cloudreve/Cloudreve/v4/ent/file"
"github.com/cloudreve/Cloudreve/v4/ent/group"
"github.com/cloudreve/Cloudreve/v4/ent/metadata"
"github.com/cloudreve/Cloudreve/v4/ent/node"
"github.com/cloudreve/Cloudreve/v4/ent/passkey"
"github.com/cloudreve/Cloudreve/v4/ent/setting"
"github.com/cloudreve/Cloudreve/v4/ent/share"
"github.com/cloudreve/Cloudreve/v4/ent/storagepolicy"
"github.com/cloudreve/Cloudreve/v4/ent/task"
"github.com/cloudreve/Cloudreve/v4/ent/user"
)
// ent aliases to avoid import conflicts in user's code.
type (
Op = ent.Op
Hook = ent.Hook
Value = ent.Value
Query = ent.Query
QueryContext = ent.QueryContext
Querier = ent.Querier
QuerierFunc = ent.QuerierFunc
Interceptor = ent.Interceptor
InterceptFunc = ent.InterceptFunc
Traverser = ent.Traverser
TraverseFunc = ent.TraverseFunc
Policy = ent.Policy
Mutator = ent.Mutator
Mutation = ent.Mutation
MutateFunc = ent.MutateFunc
)
type clientCtxKey struct{}
// FromContext returns a Client stored inside a context, or nil if there isn't one.
func FromContext(ctx context.Context) *Client {
c, _ := ctx.Value(clientCtxKey{}).(*Client)
return c
}
// NewContext returns a new context with the given Client attached.
func NewContext(parent context.Context, c *Client) context.Context {
return context.WithValue(parent, clientCtxKey{}, c)
}
type txCtxKey struct{}
// TxFromContext returns a Tx stored inside a context, or nil if there isn't one.
func TxFromContext(ctx context.Context) *Tx {
tx, _ := ctx.Value(txCtxKey{}).(*Tx)
return tx
}
// NewTxContext returns a new context with the given Tx attached.
func NewTxContext(parent context.Context, tx *Tx) context.Context {
return context.WithValue(parent, txCtxKey{}, tx)
}
// OrderFunc applies an ordering on the sql selector.
// Deprecated: Use Asc/Desc functions or the package builders instead.
type OrderFunc func(*sql.Selector)
var (
initCheck sync.Once
columnCheck sql.ColumnCheck
)
// columnChecker checks if the column exists in the given table.
func checkColumn(table, column string) error {
initCheck.Do(func() {
columnCheck = sql.NewColumnCheck(map[string]func(string) bool{
davaccount.Table: davaccount.ValidColumn,
directlink.Table: directlink.ValidColumn,
entity.Table: entity.ValidColumn,
file.Table: file.ValidColumn,
group.Table: group.ValidColumn,
metadata.Table: metadata.ValidColumn,
node.Table: node.ValidColumn,
passkey.Table: passkey.ValidColumn,
setting.Table: setting.ValidColumn,
share.Table: share.ValidColumn,
storagepolicy.Table: storagepolicy.ValidColumn,
task.Table: task.ValidColumn,
user.Table: user.ValidColumn,
})
})
return columnCheck(table, column)
}
// Asc applies the given fields in ASC order.
func Asc(fields ...string) func(*sql.Selector) {
return func(s *sql.Selector) {
for _, f := range fields {
if err := checkColumn(s.TableName(), f); err != nil {
s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)})
}
s.OrderBy(sql.Asc(s.C(f)))
}
}
}
// Desc applies the given fields in DESC order.
func Desc(fields ...string) func(*sql.Selector) {
return func(s *sql.Selector) {
for _, f := range fields {
if err := checkColumn(s.TableName(), f); err != nil {
s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)})
}
s.OrderBy(sql.Desc(s.C(f)))
}
}
}
// AggregateFunc applies an aggregation step on the group-by traversal/selector.
type AggregateFunc func(*sql.Selector) string
// As is a pseudo aggregation function for renaming another other functions with custom names. For example:
//
// GroupBy(field1, field2).
// Aggregate(ent.As(ent.Sum(field1), "sum_field1"), (ent.As(ent.Sum(field2), "sum_field2")).
// Scan(ctx, &v)
func As(fn AggregateFunc, end string) AggregateFunc {
return func(s *sql.Selector) string {
return sql.As(fn(s), end)
}
}
// Count applies the "count" aggregation function on each group.
func Count() AggregateFunc {
return func(s *sql.Selector) string {
return sql.Count("*")
}
}
// Max applies the "max" aggregation function on the given field of each group.
func Max(field string) AggregateFunc {
return func(s *sql.Selector) string {
if err := checkColumn(s.TableName(), field); err != nil {
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
return ""
}
return sql.Max(s.C(field))
}
}
// Mean applies the "mean" aggregation function on the given field of each group.
func Mean(field string) AggregateFunc {
return func(s *sql.Selector) string {
if err := checkColumn(s.TableName(), field); err != nil {
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
return ""
}
return sql.Avg(s.C(field))
}
}
// Min applies the "min" aggregation function on the given field of each group.
func Min(field string) AggregateFunc {
return func(s *sql.Selector) string {
if err := checkColumn(s.TableName(), field); err != nil {
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
return ""
}
return sql.Min(s.C(field))
}
}
// Sum applies the "sum" aggregation function on the given field of each group.
func Sum(field string) AggregateFunc {
return func(s *sql.Selector) string {
if err := checkColumn(s.TableName(), field); err != nil {
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
return ""
}
return sql.Sum(s.C(field))
}
}
// ValidationError returns when validating a field or edge fails.
type ValidationError struct {
Name string // Field or edge name.
err error
}
// Error implements the error interface.
func (e *ValidationError) Error() string {
return e.err.Error()
}
// Unwrap implements the errors.Wrapper interface.
func (e *ValidationError) Unwrap() error {
return e.err
}
// IsValidationError returns a boolean indicating whether the error is a validation error.
func IsValidationError(err error) bool {
if err == nil {
return false
}
var e *ValidationError
return errors.As(err, &e)
}
// NotFoundError returns when trying to fetch a specific entity and it was not found in the database.
type NotFoundError struct {
label string
}
// Error implements the error interface.
func (e *NotFoundError) Error() string {
return "ent: " + e.label + " not found"
}
// IsNotFound returns a boolean indicating whether the error is a not found error.
func IsNotFound(err error) bool {
if err == nil {
return false
}
var e *NotFoundError
return errors.As(err, &e)
}
// MaskNotFound masks not found error.
func MaskNotFound(err error) error {
if IsNotFound(err) {
return nil
}
return err
}
// NotSingularError returns when trying to fetch a singular entity and more then one was found in the database.
type NotSingularError struct {
label string
}
// Error implements the error interface.
func (e *NotSingularError) Error() string {
return "ent: " + e.label + " not singular"
}
// IsNotSingular returns a boolean indicating whether the error is a not singular error.
func IsNotSingular(err error) bool {
if err == nil {
return false
}
var e *NotSingularError
return errors.As(err, &e)
}
// NotLoadedError returns when trying to get a node that was not loaded by the query.
type NotLoadedError struct {
edge string
}
// Error implements the error interface.
func (e *NotLoadedError) Error() string {
return "ent: " + e.edge + " edge was not loaded"
}
// IsNotLoaded returns a boolean indicating whether the error is a not loaded error.
func IsNotLoaded(err error) bool {
if err == nil {
return false
}
var e *NotLoadedError
return errors.As(err, &e)
}
// ConstraintError returns when trying to create/update one or more entities and
// one or more of their constraints failed. For example, violation of edge or
// field uniqueness.
type ConstraintError struct {
msg string
wrap error
}
// Error implements the error interface.
func (e ConstraintError) Error() string {
return "ent: constraint failed: " + e.msg
}
// Unwrap implements the errors.Wrapper interface.
func (e *ConstraintError) Unwrap() error {
return e.wrap
}
// IsConstraintError returns a boolean indicating whether the error is a constraint failure.
func IsConstraintError(err error) bool {
if err == nil {
return false
}
var e *ConstraintError
return errors.As(err, &e)
}
// selector embedded by the different Select/GroupBy builders.
type selector struct {
label string
flds *[]string
fns []AggregateFunc
scan func(context.Context, any) error
}
// ScanX is like Scan, but panics if an error occurs.
func (s *selector) ScanX(ctx context.Context, v any) {
if err := s.scan(ctx, v); err != nil {
panic(err)
}
}
// Strings returns list of strings from a selector. It is only allowed when selecting one field.
func (s *selector) Strings(ctx context.Context) ([]string, error) {
if len(*s.flds) > 1 {
return nil, errors.New("ent: Strings is not achievable when selecting more than 1 field")
}
var v []string
if err := s.scan(ctx, &v); err != nil {
return nil, err
}
return v, nil
}
// StringsX is like Strings, but panics if an error occurs.
func (s *selector) StringsX(ctx context.Context) []string {
v, err := s.Strings(ctx)
if err != nil {
panic(err)
}
return v
}
// String returns a single string from a selector. It is only allowed when selecting one field.
func (s *selector) String(ctx context.Context) (_ string, err error) {
var v []string
if v, err = s.Strings(ctx); err != nil {
return
}
switch len(v) {
case 1:
return v[0], nil
case 0:
err = &NotFoundError{s.label}
default:
err = fmt.Errorf("ent: Strings returned %d results when one was expected", len(v))
}
return
}
// StringX is like String, but panics if an error occurs.
func (s *selector) StringX(ctx context.Context) string {
v, err := s.String(ctx)
if err != nil {
panic(err)
}
return v
}
// Ints returns list of ints from a selector. It is only allowed when selecting one field.
func (s *selector) Ints(ctx context.Context) ([]int, error) {
if len(*s.flds) > 1 {
return nil, errors.New("ent: Ints is not achievable when selecting more than 1 field")
}
var v []int
if err := s.scan(ctx, &v); err != nil {
return nil, err
}
return v, nil
}
// IntsX is like Ints, but panics if an error occurs.
func (s *selector) IntsX(ctx context.Context) []int {
v, err := s.Ints(ctx)
if err != nil {
panic(err)
}
return v
}
// Int returns a single int from a selector. It is only allowed when selecting one field.
func (s *selector) Int(ctx context.Context) (_ int, err error) {
var v []int
if v, err = s.Ints(ctx); err != nil {
return
}
switch len(v) {
case 1:
return v[0], nil
case 0:
err = &NotFoundError{s.label}
default:
err = fmt.Errorf("ent: Ints returned %d results when one was expected", len(v))
}
return
}
// IntX is like Int, but panics if an error occurs.
func (s *selector) IntX(ctx context.Context) int {
v, err := s.Int(ctx)
if err != nil {
panic(err)
}
return v
}
// Float64s returns list of float64s from a selector. It is only allowed when selecting one field.
func (s *selector) Float64s(ctx context.Context) ([]float64, error) {
if len(*s.flds) > 1 {
return nil, errors.New("ent: Float64s is not achievable when selecting more than 1 field")
}
var v []float64
if err := s.scan(ctx, &v); err != nil {
return nil, err
}
return v, nil
}
// Float64sX is like Float64s, but panics if an error occurs.
func (s *selector) Float64sX(ctx context.Context) []float64 {
v, err := s.Float64s(ctx)
if err != nil {
panic(err)
}
return v
}
// Float64 returns a single float64 from a selector. It is only allowed when selecting one field.
func (s *selector) Float64(ctx context.Context) (_ float64, err error) {
var v []float64
if v, err = s.Float64s(ctx); err != nil {
return
}
switch len(v) {
case 1:
return v[0], nil
case 0:
err = &NotFoundError{s.label}
default:
err = fmt.Errorf("ent: Float64s returned %d results when one was expected", len(v))
}
return
}
// Float64X is like Float64, but panics if an error occurs.
func (s *selector) Float64X(ctx context.Context) float64 {
v, err := s.Float64(ctx)
if err != nil {
panic(err)
}
return v
}
// Bools returns list of bools from a selector. It is only allowed when selecting one field.
func (s *selector) Bools(ctx context.Context) ([]bool, error) {
if len(*s.flds) > 1 {
return nil, errors.New("ent: Bools is not achievable when selecting more than 1 field")
}
var v []bool
if err := s.scan(ctx, &v); err != nil {
return nil, err
}
return v, nil
}
// BoolsX is like Bools, but panics if an error occurs.
func (s *selector) BoolsX(ctx context.Context) []bool {
v, err := s.Bools(ctx)
if err != nil {
panic(err)
}
return v
}
// Bool returns a single bool from a selector. It is only allowed when selecting one field.
func (s *selector) Bool(ctx context.Context) (_ bool, err error) {
var v []bool
if v, err = s.Bools(ctx); err != nil {
return
}
switch len(v) {
case 1:
return v[0], nil
case 0:
err = &NotFoundError{s.label}
default:
err = fmt.Errorf("ent: Bools returned %d results when one was expected", len(v))
}
return
}
// BoolX is like Bool, but panics if an error occurs.
func (s *selector) BoolX(ctx context.Context) bool {
v, err := s.Bool(ctx)
if err != nil {
panic(err)
}
return v
}
// withHooks invokes the builder operation with the given hooks, if any.
func withHooks[V Value, M any, PM interface {
*M
Mutation
}](ctx context.Context, exec func(context.Context) (V, error), mutation PM, hooks []Hook) (value V, err error) {
if len(hooks) == 0 {
return exec(ctx)
}
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutationT, ok := any(m).(PM)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
// Set the mutation to the builder.
*mutation = *mutationT
return exec(ctx)
})
for i := len(hooks) - 1; i >= 0; i-- {
if hooks[i] == nil {
return value, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = hooks[i](mut)
}
v, err := mut.Mutate(ctx, mutation)
if err != nil {
return value, err
}
nv, ok := v.(V)
if !ok {
return value, fmt.Errorf("unexpected node type %T returned from %T", v, mutation)
}
return nv, nil
}
// setContextOp returns a new context with the given QueryContext attached (including its op) in case it does not exist.
func setContextOp(ctx context.Context, qc *QueryContext, op string) context.Context {
if ent.QueryFromContext(ctx) == nil {
qc.Op = op
ctx = ent.NewQueryContext(ctx, qc)
}
return ctx
}
func querierAll[V Value, Q interface {
sqlAll(context.Context, ...queryHook) (V, error)
}]() Querier {
return QuerierFunc(func(ctx context.Context, q Query) (Value, error) {
query, ok := q.(Q)
if !ok {
return nil, fmt.Errorf("unexpected query type %T", q)
}
return query.sqlAll(ctx)
})
}
func querierCount[Q interface {
sqlCount(context.Context) (int, error)
}]() Querier {
return QuerierFunc(func(ctx context.Context, q Query) (Value, error) {
query, ok := q.(Q)
if !ok {
return nil, fmt.Errorf("unexpected query type %T", q)
}
return query.sqlCount(ctx)
})
}
func withInterceptors[V Value](ctx context.Context, q Query, qr Querier, inters []Interceptor) (v V, err error) {
for i := len(inters) - 1; i >= 0; i-- {
qr = inters[i].Intercept(qr)
}
rv, err := qr.Query(ctx, q)
if err != nil {
return v, err
}
vt, ok := rv.(V)
if !ok {
return v, fmt.Errorf("unexpected type %T returned from %T. expected type: %T", vt, q, v)
}
return vt, nil
}
func scanWithInterceptors[Q1 ent.Query, Q2 interface {
sqlScan(context.Context, Q1, any) error
}](ctx context.Context, rootQuery Q1, selectOrGroup Q2, inters []Interceptor, v any) error {
rv := reflect.ValueOf(v)
var qr Querier = QuerierFunc(func(ctx context.Context, q Query) (Value, error) {
query, ok := q.(Q1)
if !ok {
return nil, fmt.Errorf("unexpected query type %T", q)
}
if err := selectOrGroup.sqlScan(ctx, query, v); err != nil {
return nil, err
}
if k := rv.Kind(); k == reflect.Pointer && rv.Elem().CanInterface() {
return rv.Elem().Interface(), nil
}
return v, nil
})
for i := len(inters) - 1; i >= 0; i-- {
qr = inters[i].Intercept(qr)
}
vv, err := qr.Query(ctx, rootQuery)
if err != nil {
return err
}
switch rv2 := reflect.ValueOf(vv); {
case rv.IsNil(), rv2.IsNil(), rv.Kind() != reflect.Pointer:
case rv.Type() == rv2.Type():
rv.Elem().Set(rv2.Elem())
case rv.Elem().Type() == rv2.Type():
rv.Elem().Set(rv2)
}
return nil
}
// queryHook describes an internal hook for the different sqlAll methods.
type queryHook func(context.Context, *sqlgraph.QuerySpec)

@ -0,0 +1,29 @@
//go:build ignore
package main
import (
"log"
"entgo.io/ent/entc"
"entgo.io/ent/entc/gen"
)
func main() {
if err := entc.Generate("./schema", &gen.Config{
Features: []gen.Feature{
gen.FeatureIntercept,
gen.FeatureSnapshot,
gen.FeatureUpsert,
gen.FeatureUpsert,
gen.FeatureExecQuery,
},
Templates: []*gen.Template{
gen.MustParse(gen.NewTemplate("edge_helper").ParseFiles("templates/edgehelper.tmpl")),
gen.MustParse(gen.NewTemplate("mutation_helper").ParseFiles("templates/mutationhelper.tmpl")),
gen.MustParse(gen.NewTemplate("create_helper").ParseFiles("templates/createhelper.tmpl")),
},
}); err != nil {
log.Fatal("running ent codegen:", err)
}
}

@ -0,0 +1,317 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"encoding/json"
"fmt"
"strings"
"time"
"entgo.io/ent"
"entgo.io/ent/dialect/sql"
"github.com/cloudreve/Cloudreve/v4/ent/entity"
"github.com/cloudreve/Cloudreve/v4/ent/storagepolicy"
"github.com/cloudreve/Cloudreve/v4/ent/user"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/gofrs/uuid"
)
// Entity is the model entity for the Entity schema.
type Entity struct {
config `json:"-"`
// ID of the ent.
ID int `json:"id,omitempty"`
// CreatedAt holds the value of the "created_at" field.
CreatedAt time.Time `json:"created_at,omitempty"`
// UpdatedAt holds the value of the "updated_at" field.
UpdatedAt time.Time `json:"updated_at,omitempty"`
// DeletedAt holds the value of the "deleted_at" field.
DeletedAt *time.Time `json:"deleted_at,omitempty"`
// Type holds the value of the "type" field.
Type int `json:"type,omitempty"`
// Source holds the value of the "source" field.
Source string `json:"source,omitempty"`
// Size holds the value of the "size" field.
Size int64 `json:"size,omitempty"`
// ReferenceCount holds the value of the "reference_count" field.
ReferenceCount int `json:"reference_count,omitempty"`
// StoragePolicyEntities holds the value of the "storage_policy_entities" field.
StoragePolicyEntities int `json:"storage_policy_entities,omitempty"`
// CreatedBy holds the value of the "created_by" field.
CreatedBy int `json:"created_by,omitempty"`
// UploadSessionID holds the value of the "upload_session_id" field.
UploadSessionID *uuid.UUID `json:"upload_session_id,omitempty"`
// RecycleOptions holds the value of the "recycle_options" field.
RecycleOptions *types.EntityRecycleOption `json:"recycle_options,omitempty"`
// Edges holds the relations/edges for other nodes in the graph.
// The values are being populated by the EntityQuery when eager-loading is set.
Edges EntityEdges `json:"edges"`
selectValues sql.SelectValues
}
// EntityEdges holds the relations/edges for other nodes in the graph.
type EntityEdges struct {
// File holds the value of the file edge.
File []*File `json:"file,omitempty"`
// User holds the value of the user edge.
User *User `json:"user,omitempty"`
// StoragePolicy holds the value of the storage_policy edge.
StoragePolicy *StoragePolicy `json:"storage_policy,omitempty"`
// loadedTypes holds the information for reporting if a
// type was loaded (or requested) in eager-loading or not.
loadedTypes [3]bool
}
// FileOrErr returns the File value or an error if the edge
// was not loaded in eager-loading.
func (e EntityEdges) FileOrErr() ([]*File, error) {
if e.loadedTypes[0] {
return e.File, nil
}
return nil, &NotLoadedError{edge: "file"}
}
// UserOrErr returns the User value or an error if the edge
// was not loaded in eager-loading, or loaded but was not found.
func (e EntityEdges) UserOrErr() (*User, error) {
if e.loadedTypes[1] {
if e.User == nil {
// Edge was loaded but was not found.
return nil, &NotFoundError{label: user.Label}
}
return e.User, nil
}
return nil, &NotLoadedError{edge: "user"}
}
// StoragePolicyOrErr returns the StoragePolicy value or an error if the edge
// was not loaded in eager-loading, or loaded but was not found.
func (e EntityEdges) StoragePolicyOrErr() (*StoragePolicy, error) {
if e.loadedTypes[2] {
if e.StoragePolicy == nil {
// Edge was loaded but was not found.
return nil, &NotFoundError{label: storagepolicy.Label}
}
return e.StoragePolicy, nil
}
return nil, &NotLoadedError{edge: "storage_policy"}
}
// scanValues returns the types for scanning values from sql.Rows.
func (*Entity) scanValues(columns []string) ([]any, error) {
values := make([]any, len(columns))
for i := range columns {
switch columns[i] {
case entity.FieldUploadSessionID:
values[i] = &sql.NullScanner{S: new(uuid.UUID)}
case entity.FieldRecycleOptions:
values[i] = new([]byte)
case entity.FieldID, entity.FieldType, entity.FieldSize, entity.FieldReferenceCount, entity.FieldStoragePolicyEntities, entity.FieldCreatedBy:
values[i] = new(sql.NullInt64)
case entity.FieldSource:
values[i] = new(sql.NullString)
case entity.FieldCreatedAt, entity.FieldUpdatedAt, entity.FieldDeletedAt:
values[i] = new(sql.NullTime)
default:
values[i] = new(sql.UnknownType)
}
}
return values, nil
}
// assignValues assigns the values that were returned from sql.Rows (after scanning)
// to the Entity fields.
func (e *Entity) assignValues(columns []string, values []any) error {
if m, n := len(values), len(columns); m < n {
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
}
for i := range columns {
switch columns[i] {
case entity.FieldID:
value, ok := values[i].(*sql.NullInt64)
if !ok {
return fmt.Errorf("unexpected type %T for field id", value)
}
e.ID = int(value.Int64)
case entity.FieldCreatedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field created_at", values[i])
} else if value.Valid {
e.CreatedAt = value.Time
}
case entity.FieldUpdatedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field updated_at", values[i])
} else if value.Valid {
e.UpdatedAt = value.Time
}
case entity.FieldDeletedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field deleted_at", values[i])
} else if value.Valid {
e.DeletedAt = new(time.Time)
*e.DeletedAt = value.Time
}
case entity.FieldType:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field type", values[i])
} else if value.Valid {
e.Type = int(value.Int64)
}
case entity.FieldSource:
if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field source", values[i])
} else if value.Valid {
e.Source = value.String
}
case entity.FieldSize:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field size", values[i])
} else if value.Valid {
e.Size = value.Int64
}
case entity.FieldReferenceCount:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field reference_count", values[i])
} else if value.Valid {
e.ReferenceCount = int(value.Int64)
}
case entity.FieldStoragePolicyEntities:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field storage_policy_entities", values[i])
} else if value.Valid {
e.StoragePolicyEntities = int(value.Int64)
}
case entity.FieldCreatedBy:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field created_by", values[i])
} else if value.Valid {
e.CreatedBy = int(value.Int64)
}
case entity.FieldUploadSessionID:
if value, ok := values[i].(*sql.NullScanner); !ok {
return fmt.Errorf("unexpected type %T for field upload_session_id", values[i])
} else if value.Valid {
e.UploadSessionID = new(uuid.UUID)
*e.UploadSessionID = *value.S.(*uuid.UUID)
}
case entity.FieldRecycleOptions:
if value, ok := values[i].(*[]byte); !ok {
return fmt.Errorf("unexpected type %T for field recycle_options", values[i])
} else if value != nil && len(*value) > 0 {
if err := json.Unmarshal(*value, &e.RecycleOptions); err != nil {
return fmt.Errorf("unmarshal field recycle_options: %w", err)
}
}
default:
e.selectValues.Set(columns[i], values[i])
}
}
return nil
}
// Value returns the ent.Value that was dynamically selected and assigned to the Entity.
// This includes values selected through modifiers, order, etc.
func (e *Entity) Value(name string) (ent.Value, error) {
return e.selectValues.Get(name)
}
// QueryFile queries the "file" edge of the Entity entity.
func (e *Entity) QueryFile() *FileQuery {
return NewEntityClient(e.config).QueryFile(e)
}
// QueryUser queries the "user" edge of the Entity entity.
func (e *Entity) QueryUser() *UserQuery {
return NewEntityClient(e.config).QueryUser(e)
}
// QueryStoragePolicy queries the "storage_policy" edge of the Entity entity.
func (e *Entity) QueryStoragePolicy() *StoragePolicyQuery {
return NewEntityClient(e.config).QueryStoragePolicy(e)
}
// Update returns a builder for updating this Entity.
// Note that you need to call Entity.Unwrap() before calling this method if this Entity
// was returned from a transaction, and the transaction was committed or rolled back.
func (e *Entity) Update() *EntityUpdateOne {
return NewEntityClient(e.config).UpdateOne(e)
}
// Unwrap unwraps the Entity entity that was returned from a transaction after it was closed,
// so that all future queries will be executed through the driver which created the transaction.
func (e *Entity) Unwrap() *Entity {
_tx, ok := e.config.driver.(*txDriver)
if !ok {
panic("ent: Entity is not a transactional entity")
}
e.config.driver = _tx.drv
return e
}
// String implements the fmt.Stringer.
func (e *Entity) String() string {
var builder strings.Builder
builder.WriteString("Entity(")
builder.WriteString(fmt.Sprintf("id=%v, ", e.ID))
builder.WriteString("created_at=")
builder.WriteString(e.CreatedAt.Format(time.ANSIC))
builder.WriteString(", ")
builder.WriteString("updated_at=")
builder.WriteString(e.UpdatedAt.Format(time.ANSIC))
builder.WriteString(", ")
if v := e.DeletedAt; v != nil {
builder.WriteString("deleted_at=")
builder.WriteString(v.Format(time.ANSIC))
}
builder.WriteString(", ")
builder.WriteString("type=")
builder.WriteString(fmt.Sprintf("%v", e.Type))
builder.WriteString(", ")
builder.WriteString("source=")
builder.WriteString(e.Source)
builder.WriteString(", ")
builder.WriteString("size=")
builder.WriteString(fmt.Sprintf("%v", e.Size))
builder.WriteString(", ")
builder.WriteString("reference_count=")
builder.WriteString(fmt.Sprintf("%v", e.ReferenceCount))
builder.WriteString(", ")
builder.WriteString("storage_policy_entities=")
builder.WriteString(fmt.Sprintf("%v", e.StoragePolicyEntities))
builder.WriteString(", ")
builder.WriteString("created_by=")
builder.WriteString(fmt.Sprintf("%v", e.CreatedBy))
builder.WriteString(", ")
if v := e.UploadSessionID; v != nil {
builder.WriteString("upload_session_id=")
builder.WriteString(fmt.Sprintf("%v", *v))
}
builder.WriteString(", ")
builder.WriteString("recycle_options=")
builder.WriteString(fmt.Sprintf("%v", e.RecycleOptions))
builder.WriteByte(')')
return builder.String()
}
// SetFile manually set the edge as loaded state.
func (e *Entity) SetFile(v []*File) {
e.Edges.File = v
e.Edges.loadedTypes[0] = true
}
// SetUser manually set the edge as loaded state.
func (e *Entity) SetUser(v *User) {
e.Edges.User = v
e.Edges.loadedTypes[1] = true
}
// SetStoragePolicy manually set the edge as loaded state.
func (e *Entity) SetStoragePolicy(v *StoragePolicy) {
e.Edges.StoragePolicy = v
e.Edges.loadedTypes[2] = true
}
// Entities is a parsable slice of Entity.
type Entities []*Entity

@ -0,0 +1,224 @@
// Code generated by ent, DO NOT EDIT.
package entity
import (
"time"
"entgo.io/ent"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
)
const (
// Label holds the string label denoting the entity type in the database.
Label = "entity"
// FieldID holds the string denoting the id field in the database.
FieldID = "id"
// FieldCreatedAt holds the string denoting the created_at field in the database.
FieldCreatedAt = "created_at"
// FieldUpdatedAt holds the string denoting the updated_at field in the database.
FieldUpdatedAt = "updated_at"
// FieldDeletedAt holds the string denoting the deleted_at field in the database.
FieldDeletedAt = "deleted_at"
// FieldType holds the string denoting the type field in the database.
FieldType = "type"
// FieldSource holds the string denoting the source field in the database.
FieldSource = "source"
// FieldSize holds the string denoting the size field in the database.
FieldSize = "size"
// FieldReferenceCount holds the string denoting the reference_count field in the database.
FieldReferenceCount = "reference_count"
// FieldStoragePolicyEntities holds the string denoting the storage_policy_entities field in the database.
FieldStoragePolicyEntities = "storage_policy_entities"
// FieldCreatedBy holds the string denoting the created_by field in the database.
FieldCreatedBy = "created_by"
// FieldUploadSessionID holds the string denoting the upload_session_id field in the database.
FieldUploadSessionID = "upload_session_id"
// FieldRecycleOptions holds the string denoting the recycle_options field in the database.
FieldRecycleOptions = "recycle_options"
// EdgeFile holds the string denoting the file edge name in mutations.
EdgeFile = "file"
// EdgeUser holds the string denoting the user edge name in mutations.
EdgeUser = "user"
// EdgeStoragePolicy holds the string denoting the storage_policy edge name in mutations.
EdgeStoragePolicy = "storage_policy"
// Table holds the table name of the entity in the database.
Table = "entities"
// FileTable is the table that holds the file relation/edge. The primary key declared below.
FileTable = "file_entities"
// FileInverseTable is the table name for the File entity.
// It exists in this package in order to avoid circular dependency with the "file" package.
FileInverseTable = "files"
// UserTable is the table that holds the user relation/edge.
UserTable = "entities"
// UserInverseTable is the table name for the User entity.
// It exists in this package in order to avoid circular dependency with the "user" package.
UserInverseTable = "users"
// UserColumn is the table column denoting the user relation/edge.
UserColumn = "created_by"
// StoragePolicyTable is the table that holds the storage_policy relation/edge.
StoragePolicyTable = "entities"
// StoragePolicyInverseTable is the table name for the StoragePolicy entity.
// It exists in this package in order to avoid circular dependency with the "storagepolicy" package.
StoragePolicyInverseTable = "storage_policies"
// StoragePolicyColumn is the table column denoting the storage_policy relation/edge.
StoragePolicyColumn = "storage_policy_entities"
)
// Columns holds all SQL columns for entity fields.
var Columns = []string{
FieldID,
FieldCreatedAt,
FieldUpdatedAt,
FieldDeletedAt,
FieldType,
FieldSource,
FieldSize,
FieldReferenceCount,
FieldStoragePolicyEntities,
FieldCreatedBy,
FieldUploadSessionID,
FieldRecycleOptions,
}
var (
// FilePrimaryKey and FileColumn2 are the table columns denoting the
// primary key for the file relation (M2M).
FilePrimaryKey = []string{"file_id", "entity_id"}
)
// ValidColumn reports if the column name is valid (part of the table columns).
func ValidColumn(column string) bool {
for i := range Columns {
if column == Columns[i] {
return true
}
}
return false
}
// Note that the variables below are initialized by the runtime
// package on the initialization of the application. Therefore,
// it should be imported in the main as follows:
//
// import _ "github.com/cloudreve/Cloudreve/v4/ent/runtime"
var (
Hooks [1]ent.Hook
Interceptors [1]ent.Interceptor
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
DefaultCreatedAt func() time.Time
// DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
DefaultUpdatedAt func() time.Time
// UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
UpdateDefaultUpdatedAt func() time.Time
// DefaultReferenceCount holds the default value on creation for the "reference_count" field.
DefaultReferenceCount int
)
// OrderOption defines the ordering options for the Entity queries.
type OrderOption func(*sql.Selector)
// ByID orders the results by the id field.
func ByID(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldID, opts...).ToFunc()
}
// ByCreatedAt orders the results by the created_at field.
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
}
// ByUpdatedAt orders the results by the updated_at field.
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
}
// ByDeletedAt orders the results by the deleted_at field.
func ByDeletedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldDeletedAt, opts...).ToFunc()
}
// ByType orders the results by the type field.
func ByType(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldType, opts...).ToFunc()
}
// BySource orders the results by the source field.
func BySource(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldSource, opts...).ToFunc()
}
// BySize orders the results by the size field.
func BySize(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldSize, opts...).ToFunc()
}
// ByReferenceCount orders the results by the reference_count field.
func ByReferenceCount(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldReferenceCount, opts...).ToFunc()
}
// ByStoragePolicyEntities orders the results by the storage_policy_entities field.
func ByStoragePolicyEntities(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldStoragePolicyEntities, opts...).ToFunc()
}
// ByCreatedBy orders the results by the created_by field.
func ByCreatedBy(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldCreatedBy, opts...).ToFunc()
}
// ByUploadSessionID orders the results by the upload_session_id field.
func ByUploadSessionID(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldUploadSessionID, opts...).ToFunc()
}
// ByFileCount orders the results by file count.
func ByFileCount(opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborsCount(s, newFileStep(), opts...)
}
}
// ByFile orders the results by file terms.
func ByFile(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newFileStep(), append([]sql.OrderTerm{term}, terms...)...)
}
}
// ByUserField orders the results by user field.
func ByUserField(field string, opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newUserStep(), sql.OrderByField(field, opts...))
}
}
// ByStoragePolicyField orders the results by storage_policy field.
func ByStoragePolicyField(field string, opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newStoragePolicyStep(), sql.OrderByField(field, opts...))
}
}
func newFileStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(FileInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.M2M, true, FileTable, FilePrimaryKey...),
)
}
func newUserStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(UserInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn),
)
}
func newStoragePolicyStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(StoragePolicyInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, StoragePolicyTable, StoragePolicyColumn),
)
}

@ -0,0 +1,616 @@
// Code generated by ent, DO NOT EDIT.
package entity
import (
"time"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
"github.com/gofrs/uuid"
)
// ID filters vertices based on their ID field.
func ID(id int) predicate.Entity {
return predicate.Entity(sql.FieldEQ(FieldID, id))
}
// IDEQ applies the EQ predicate on the ID field.
func IDEQ(id int) predicate.Entity {
return predicate.Entity(sql.FieldEQ(FieldID, id))
}
// IDNEQ applies the NEQ predicate on the ID field.
func IDNEQ(id int) predicate.Entity {
return predicate.Entity(sql.FieldNEQ(FieldID, id))
}
// IDIn applies the In predicate on the ID field.
func IDIn(ids ...int) predicate.Entity {
return predicate.Entity(sql.FieldIn(FieldID, ids...))
}
// IDNotIn applies the NotIn predicate on the ID field.
func IDNotIn(ids ...int) predicate.Entity {
return predicate.Entity(sql.FieldNotIn(FieldID, ids...))
}
// IDGT applies the GT predicate on the ID field.
func IDGT(id int) predicate.Entity {
return predicate.Entity(sql.FieldGT(FieldID, id))
}
// IDGTE applies the GTE predicate on the ID field.
func IDGTE(id int) predicate.Entity {
return predicate.Entity(sql.FieldGTE(FieldID, id))
}
// IDLT applies the LT predicate on the ID field.
func IDLT(id int) predicate.Entity {
return predicate.Entity(sql.FieldLT(FieldID, id))
}
// IDLTE applies the LTE predicate on the ID field.
func IDLTE(id int) predicate.Entity {
return predicate.Entity(sql.FieldLTE(FieldID, id))
}
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
func CreatedAt(v time.Time) predicate.Entity {
return predicate.Entity(sql.FieldEQ(FieldCreatedAt, v))
}
// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
func UpdatedAt(v time.Time) predicate.Entity {
return predicate.Entity(sql.FieldEQ(FieldUpdatedAt, v))
}
// DeletedAt applies equality check predicate on the "deleted_at" field. It's identical to DeletedAtEQ.
func DeletedAt(v time.Time) predicate.Entity {
return predicate.Entity(sql.FieldEQ(FieldDeletedAt, v))
}
// Type applies equality check predicate on the "type" field. It's identical to TypeEQ.
func Type(v int) predicate.Entity {
return predicate.Entity(sql.FieldEQ(FieldType, v))
}
// Source applies equality check predicate on the "source" field. It's identical to SourceEQ.
func Source(v string) predicate.Entity {
return predicate.Entity(sql.FieldEQ(FieldSource, v))
}
// Size applies equality check predicate on the "size" field. It's identical to SizeEQ.
func Size(v int64) predicate.Entity {
return predicate.Entity(sql.FieldEQ(FieldSize, v))
}
// ReferenceCount applies equality check predicate on the "reference_count" field. It's identical to ReferenceCountEQ.
func ReferenceCount(v int) predicate.Entity {
return predicate.Entity(sql.FieldEQ(FieldReferenceCount, v))
}
// StoragePolicyEntities applies equality check predicate on the "storage_policy_entities" field. It's identical to StoragePolicyEntitiesEQ.
func StoragePolicyEntities(v int) predicate.Entity {
return predicate.Entity(sql.FieldEQ(FieldStoragePolicyEntities, v))
}
// CreatedBy applies equality check predicate on the "created_by" field. It's identical to CreatedByEQ.
func CreatedBy(v int) predicate.Entity {
return predicate.Entity(sql.FieldEQ(FieldCreatedBy, v))
}
// UploadSessionID applies equality check predicate on the "upload_session_id" field. It's identical to UploadSessionIDEQ.
func UploadSessionID(v uuid.UUID) predicate.Entity {
return predicate.Entity(sql.FieldEQ(FieldUploadSessionID, v))
}
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
func CreatedAtEQ(v time.Time) predicate.Entity {
return predicate.Entity(sql.FieldEQ(FieldCreatedAt, v))
}
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
func CreatedAtNEQ(v time.Time) predicate.Entity {
return predicate.Entity(sql.FieldNEQ(FieldCreatedAt, v))
}
// CreatedAtIn applies the In predicate on the "created_at" field.
func CreatedAtIn(vs ...time.Time) predicate.Entity {
return predicate.Entity(sql.FieldIn(FieldCreatedAt, vs...))
}
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
func CreatedAtNotIn(vs ...time.Time) predicate.Entity {
return predicate.Entity(sql.FieldNotIn(FieldCreatedAt, vs...))
}
// CreatedAtGT applies the GT predicate on the "created_at" field.
func CreatedAtGT(v time.Time) predicate.Entity {
return predicate.Entity(sql.FieldGT(FieldCreatedAt, v))
}
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
func CreatedAtGTE(v time.Time) predicate.Entity {
return predicate.Entity(sql.FieldGTE(FieldCreatedAt, v))
}
// CreatedAtLT applies the LT predicate on the "created_at" field.
func CreatedAtLT(v time.Time) predicate.Entity {
return predicate.Entity(sql.FieldLT(FieldCreatedAt, v))
}
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
func CreatedAtLTE(v time.Time) predicate.Entity {
return predicate.Entity(sql.FieldLTE(FieldCreatedAt, v))
}
// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
func UpdatedAtEQ(v time.Time) predicate.Entity {
return predicate.Entity(sql.FieldEQ(FieldUpdatedAt, v))
}
// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
func UpdatedAtNEQ(v time.Time) predicate.Entity {
return predicate.Entity(sql.FieldNEQ(FieldUpdatedAt, v))
}
// UpdatedAtIn applies the In predicate on the "updated_at" field.
func UpdatedAtIn(vs ...time.Time) predicate.Entity {
return predicate.Entity(sql.FieldIn(FieldUpdatedAt, vs...))
}
// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
func UpdatedAtNotIn(vs ...time.Time) predicate.Entity {
return predicate.Entity(sql.FieldNotIn(FieldUpdatedAt, vs...))
}
// UpdatedAtGT applies the GT predicate on the "updated_at" field.
func UpdatedAtGT(v time.Time) predicate.Entity {
return predicate.Entity(sql.FieldGT(FieldUpdatedAt, v))
}
// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
func UpdatedAtGTE(v time.Time) predicate.Entity {
return predicate.Entity(sql.FieldGTE(FieldUpdatedAt, v))
}
// UpdatedAtLT applies the LT predicate on the "updated_at" field.
func UpdatedAtLT(v time.Time) predicate.Entity {
return predicate.Entity(sql.FieldLT(FieldUpdatedAt, v))
}
// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
func UpdatedAtLTE(v time.Time) predicate.Entity {
return predicate.Entity(sql.FieldLTE(FieldUpdatedAt, v))
}
// DeletedAtEQ applies the EQ predicate on the "deleted_at" field.
func DeletedAtEQ(v time.Time) predicate.Entity {
return predicate.Entity(sql.FieldEQ(FieldDeletedAt, v))
}
// DeletedAtNEQ applies the NEQ predicate on the "deleted_at" field.
func DeletedAtNEQ(v time.Time) predicate.Entity {
return predicate.Entity(sql.FieldNEQ(FieldDeletedAt, v))
}
// DeletedAtIn applies the In predicate on the "deleted_at" field.
func DeletedAtIn(vs ...time.Time) predicate.Entity {
return predicate.Entity(sql.FieldIn(FieldDeletedAt, vs...))
}
// DeletedAtNotIn applies the NotIn predicate on the "deleted_at" field.
func DeletedAtNotIn(vs ...time.Time) predicate.Entity {
return predicate.Entity(sql.FieldNotIn(FieldDeletedAt, vs...))
}
// DeletedAtGT applies the GT predicate on the "deleted_at" field.
func DeletedAtGT(v time.Time) predicate.Entity {
return predicate.Entity(sql.FieldGT(FieldDeletedAt, v))
}
// DeletedAtGTE applies the GTE predicate on the "deleted_at" field.
func DeletedAtGTE(v time.Time) predicate.Entity {
return predicate.Entity(sql.FieldGTE(FieldDeletedAt, v))
}
// DeletedAtLT applies the LT predicate on the "deleted_at" field.
func DeletedAtLT(v time.Time) predicate.Entity {
return predicate.Entity(sql.FieldLT(FieldDeletedAt, v))
}
// DeletedAtLTE applies the LTE predicate on the "deleted_at" field.
func DeletedAtLTE(v time.Time) predicate.Entity {
return predicate.Entity(sql.FieldLTE(FieldDeletedAt, v))
}
// DeletedAtIsNil applies the IsNil predicate on the "deleted_at" field.
func DeletedAtIsNil() predicate.Entity {
return predicate.Entity(sql.FieldIsNull(FieldDeletedAt))
}
// DeletedAtNotNil applies the NotNil predicate on the "deleted_at" field.
func DeletedAtNotNil() predicate.Entity {
return predicate.Entity(sql.FieldNotNull(FieldDeletedAt))
}
// TypeEQ applies the EQ predicate on the "type" field.
func TypeEQ(v int) predicate.Entity {
return predicate.Entity(sql.FieldEQ(FieldType, v))
}
// TypeNEQ applies the NEQ predicate on the "type" field.
func TypeNEQ(v int) predicate.Entity {
return predicate.Entity(sql.FieldNEQ(FieldType, v))
}
// TypeIn applies the In predicate on the "type" field.
func TypeIn(vs ...int) predicate.Entity {
return predicate.Entity(sql.FieldIn(FieldType, vs...))
}
// TypeNotIn applies the NotIn predicate on the "type" field.
func TypeNotIn(vs ...int) predicate.Entity {
return predicate.Entity(sql.FieldNotIn(FieldType, vs...))
}
// TypeGT applies the GT predicate on the "type" field.
func TypeGT(v int) predicate.Entity {
return predicate.Entity(sql.FieldGT(FieldType, v))
}
// TypeGTE applies the GTE predicate on the "type" field.
func TypeGTE(v int) predicate.Entity {
return predicate.Entity(sql.FieldGTE(FieldType, v))
}
// TypeLT applies the LT predicate on the "type" field.
func TypeLT(v int) predicate.Entity {
return predicate.Entity(sql.FieldLT(FieldType, v))
}
// TypeLTE applies the LTE predicate on the "type" field.
func TypeLTE(v int) predicate.Entity {
return predicate.Entity(sql.FieldLTE(FieldType, v))
}
// SourceEQ applies the EQ predicate on the "source" field.
func SourceEQ(v string) predicate.Entity {
return predicate.Entity(sql.FieldEQ(FieldSource, v))
}
// SourceNEQ applies the NEQ predicate on the "source" field.
func SourceNEQ(v string) predicate.Entity {
return predicate.Entity(sql.FieldNEQ(FieldSource, v))
}
// SourceIn applies the In predicate on the "source" field.
func SourceIn(vs ...string) predicate.Entity {
return predicate.Entity(sql.FieldIn(FieldSource, vs...))
}
// SourceNotIn applies the NotIn predicate on the "source" field.
func SourceNotIn(vs ...string) predicate.Entity {
return predicate.Entity(sql.FieldNotIn(FieldSource, vs...))
}
// SourceGT applies the GT predicate on the "source" field.
func SourceGT(v string) predicate.Entity {
return predicate.Entity(sql.FieldGT(FieldSource, v))
}
// SourceGTE applies the GTE predicate on the "source" field.
func SourceGTE(v string) predicate.Entity {
return predicate.Entity(sql.FieldGTE(FieldSource, v))
}
// SourceLT applies the LT predicate on the "source" field.
func SourceLT(v string) predicate.Entity {
return predicate.Entity(sql.FieldLT(FieldSource, v))
}
// SourceLTE applies the LTE predicate on the "source" field.
func SourceLTE(v string) predicate.Entity {
return predicate.Entity(sql.FieldLTE(FieldSource, v))
}
// SourceContains applies the Contains predicate on the "source" field.
func SourceContains(v string) predicate.Entity {
return predicate.Entity(sql.FieldContains(FieldSource, v))
}
// SourceHasPrefix applies the HasPrefix predicate on the "source" field.
func SourceHasPrefix(v string) predicate.Entity {
return predicate.Entity(sql.FieldHasPrefix(FieldSource, v))
}
// SourceHasSuffix applies the HasSuffix predicate on the "source" field.
func SourceHasSuffix(v string) predicate.Entity {
return predicate.Entity(sql.FieldHasSuffix(FieldSource, v))
}
// SourceEqualFold applies the EqualFold predicate on the "source" field.
func SourceEqualFold(v string) predicate.Entity {
return predicate.Entity(sql.FieldEqualFold(FieldSource, v))
}
// SourceContainsFold applies the ContainsFold predicate on the "source" field.
func SourceContainsFold(v string) predicate.Entity {
return predicate.Entity(sql.FieldContainsFold(FieldSource, v))
}
// SizeEQ applies the EQ predicate on the "size" field.
func SizeEQ(v int64) predicate.Entity {
return predicate.Entity(sql.FieldEQ(FieldSize, v))
}
// SizeNEQ applies the NEQ predicate on the "size" field.
func SizeNEQ(v int64) predicate.Entity {
return predicate.Entity(sql.FieldNEQ(FieldSize, v))
}
// SizeIn applies the In predicate on the "size" field.
func SizeIn(vs ...int64) predicate.Entity {
return predicate.Entity(sql.FieldIn(FieldSize, vs...))
}
// SizeNotIn applies the NotIn predicate on the "size" field.
func SizeNotIn(vs ...int64) predicate.Entity {
return predicate.Entity(sql.FieldNotIn(FieldSize, vs...))
}
// SizeGT applies the GT predicate on the "size" field.
func SizeGT(v int64) predicate.Entity {
return predicate.Entity(sql.FieldGT(FieldSize, v))
}
// SizeGTE applies the GTE predicate on the "size" field.
func SizeGTE(v int64) predicate.Entity {
return predicate.Entity(sql.FieldGTE(FieldSize, v))
}
// SizeLT applies the LT predicate on the "size" field.
func SizeLT(v int64) predicate.Entity {
return predicate.Entity(sql.FieldLT(FieldSize, v))
}
// SizeLTE applies the LTE predicate on the "size" field.
func SizeLTE(v int64) predicate.Entity {
return predicate.Entity(sql.FieldLTE(FieldSize, v))
}
// ReferenceCountEQ applies the EQ predicate on the "reference_count" field.
func ReferenceCountEQ(v int) predicate.Entity {
return predicate.Entity(sql.FieldEQ(FieldReferenceCount, v))
}
// ReferenceCountNEQ applies the NEQ predicate on the "reference_count" field.
func ReferenceCountNEQ(v int) predicate.Entity {
return predicate.Entity(sql.FieldNEQ(FieldReferenceCount, v))
}
// ReferenceCountIn applies the In predicate on the "reference_count" field.
func ReferenceCountIn(vs ...int) predicate.Entity {
return predicate.Entity(sql.FieldIn(FieldReferenceCount, vs...))
}
// ReferenceCountNotIn applies the NotIn predicate on the "reference_count" field.
func ReferenceCountNotIn(vs ...int) predicate.Entity {
return predicate.Entity(sql.FieldNotIn(FieldReferenceCount, vs...))
}
// ReferenceCountGT applies the GT predicate on the "reference_count" field.
func ReferenceCountGT(v int) predicate.Entity {
return predicate.Entity(sql.FieldGT(FieldReferenceCount, v))
}
// ReferenceCountGTE applies the GTE predicate on the "reference_count" field.
func ReferenceCountGTE(v int) predicate.Entity {
return predicate.Entity(sql.FieldGTE(FieldReferenceCount, v))
}
// ReferenceCountLT applies the LT predicate on the "reference_count" field.
func ReferenceCountLT(v int) predicate.Entity {
return predicate.Entity(sql.FieldLT(FieldReferenceCount, v))
}
// ReferenceCountLTE applies the LTE predicate on the "reference_count" field.
func ReferenceCountLTE(v int) predicate.Entity {
return predicate.Entity(sql.FieldLTE(FieldReferenceCount, v))
}
// StoragePolicyEntitiesEQ applies the EQ predicate on the "storage_policy_entities" field.
func StoragePolicyEntitiesEQ(v int) predicate.Entity {
return predicate.Entity(sql.FieldEQ(FieldStoragePolicyEntities, v))
}
// StoragePolicyEntitiesNEQ applies the NEQ predicate on the "storage_policy_entities" field.
func StoragePolicyEntitiesNEQ(v int) predicate.Entity {
return predicate.Entity(sql.FieldNEQ(FieldStoragePolicyEntities, v))
}
// StoragePolicyEntitiesIn applies the In predicate on the "storage_policy_entities" field.
func StoragePolicyEntitiesIn(vs ...int) predicate.Entity {
return predicate.Entity(sql.FieldIn(FieldStoragePolicyEntities, vs...))
}
// StoragePolicyEntitiesNotIn applies the NotIn predicate on the "storage_policy_entities" field.
func StoragePolicyEntitiesNotIn(vs ...int) predicate.Entity {
return predicate.Entity(sql.FieldNotIn(FieldStoragePolicyEntities, vs...))
}
// CreatedByEQ applies the EQ predicate on the "created_by" field.
func CreatedByEQ(v int) predicate.Entity {
return predicate.Entity(sql.FieldEQ(FieldCreatedBy, v))
}
// CreatedByNEQ applies the NEQ predicate on the "created_by" field.
func CreatedByNEQ(v int) predicate.Entity {
return predicate.Entity(sql.FieldNEQ(FieldCreatedBy, v))
}
// CreatedByIn applies the In predicate on the "created_by" field.
func CreatedByIn(vs ...int) predicate.Entity {
return predicate.Entity(sql.FieldIn(FieldCreatedBy, vs...))
}
// CreatedByNotIn applies the NotIn predicate on the "created_by" field.
func CreatedByNotIn(vs ...int) predicate.Entity {
return predicate.Entity(sql.FieldNotIn(FieldCreatedBy, vs...))
}
// CreatedByIsNil applies the IsNil predicate on the "created_by" field.
func CreatedByIsNil() predicate.Entity {
return predicate.Entity(sql.FieldIsNull(FieldCreatedBy))
}
// CreatedByNotNil applies the NotNil predicate on the "created_by" field.
func CreatedByNotNil() predicate.Entity {
return predicate.Entity(sql.FieldNotNull(FieldCreatedBy))
}
// UploadSessionIDEQ applies the EQ predicate on the "upload_session_id" field.
func UploadSessionIDEQ(v uuid.UUID) predicate.Entity {
return predicate.Entity(sql.FieldEQ(FieldUploadSessionID, v))
}
// UploadSessionIDNEQ applies the NEQ predicate on the "upload_session_id" field.
func UploadSessionIDNEQ(v uuid.UUID) predicate.Entity {
return predicate.Entity(sql.FieldNEQ(FieldUploadSessionID, v))
}
// UploadSessionIDIn applies the In predicate on the "upload_session_id" field.
func UploadSessionIDIn(vs ...uuid.UUID) predicate.Entity {
return predicate.Entity(sql.FieldIn(FieldUploadSessionID, vs...))
}
// UploadSessionIDNotIn applies the NotIn predicate on the "upload_session_id" field.
func UploadSessionIDNotIn(vs ...uuid.UUID) predicate.Entity {
return predicate.Entity(sql.FieldNotIn(FieldUploadSessionID, vs...))
}
// UploadSessionIDGT applies the GT predicate on the "upload_session_id" field.
func UploadSessionIDGT(v uuid.UUID) predicate.Entity {
return predicate.Entity(sql.FieldGT(FieldUploadSessionID, v))
}
// UploadSessionIDGTE applies the GTE predicate on the "upload_session_id" field.
func UploadSessionIDGTE(v uuid.UUID) predicate.Entity {
return predicate.Entity(sql.FieldGTE(FieldUploadSessionID, v))
}
// UploadSessionIDLT applies the LT predicate on the "upload_session_id" field.
func UploadSessionIDLT(v uuid.UUID) predicate.Entity {
return predicate.Entity(sql.FieldLT(FieldUploadSessionID, v))
}
// UploadSessionIDLTE applies the LTE predicate on the "upload_session_id" field.
func UploadSessionIDLTE(v uuid.UUID) predicate.Entity {
return predicate.Entity(sql.FieldLTE(FieldUploadSessionID, v))
}
// UploadSessionIDIsNil applies the IsNil predicate on the "upload_session_id" field.
func UploadSessionIDIsNil() predicate.Entity {
return predicate.Entity(sql.FieldIsNull(FieldUploadSessionID))
}
// UploadSessionIDNotNil applies the NotNil predicate on the "upload_session_id" field.
func UploadSessionIDNotNil() predicate.Entity {
return predicate.Entity(sql.FieldNotNull(FieldUploadSessionID))
}
// RecycleOptionsIsNil applies the IsNil predicate on the "recycle_options" field.
func RecycleOptionsIsNil() predicate.Entity {
return predicate.Entity(sql.FieldIsNull(FieldRecycleOptions))
}
// RecycleOptionsNotNil applies the NotNil predicate on the "recycle_options" field.
func RecycleOptionsNotNil() predicate.Entity {
return predicate.Entity(sql.FieldNotNull(FieldRecycleOptions))
}
// HasFile applies the HasEdge predicate on the "file" edge.
func HasFile() predicate.Entity {
return predicate.Entity(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.Edge(sqlgraph.M2M, true, FileTable, FilePrimaryKey...),
)
sqlgraph.HasNeighbors(s, step)
})
}
// HasFileWith applies the HasEdge predicate on the "file" edge with a given conditions (other predicates).
func HasFileWith(preds ...predicate.File) predicate.Entity {
return predicate.Entity(func(s *sql.Selector) {
step := newFileStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
}
})
})
}
// HasUser applies the HasEdge predicate on the "user" edge.
func HasUser() predicate.Entity {
return predicate.Entity(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn),
)
sqlgraph.HasNeighbors(s, step)
})
}
// HasUserWith applies the HasEdge predicate on the "user" edge with a given conditions (other predicates).
func HasUserWith(preds ...predicate.User) predicate.Entity {
return predicate.Entity(func(s *sql.Selector) {
step := newUserStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
}
})
})
}
// HasStoragePolicy applies the HasEdge predicate on the "storage_policy" edge.
func HasStoragePolicy() predicate.Entity {
return predicate.Entity(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, StoragePolicyTable, StoragePolicyColumn),
)
sqlgraph.HasNeighbors(s, step)
})
}
// HasStoragePolicyWith applies the HasEdge predicate on the "storage_policy" edge with a given conditions (other predicates).
func HasStoragePolicyWith(preds ...predicate.StoragePolicy) predicate.Entity {
return predicate.Entity(func(s *sql.Selector) {
step := newStoragePolicyStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
}
})
})
}
// And groups predicates with the AND operator between them.
func And(predicates ...predicate.Entity) predicate.Entity {
return predicate.Entity(sql.AndPredicates(predicates...))
}
// Or groups predicates with the OR operator between them.
func Or(predicates ...predicate.Entity) predicate.Entity {
return predicate.Entity(sql.OrPredicates(predicates...))
}
// Not applies the not operator on the given predicate.
func Not(p predicate.Entity) predicate.Entity {
return predicate.Entity(sql.NotPredicates(p))
}

File diff suppressed because it is too large Load Diff

@ -0,0 +1,88 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"context"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/cloudreve/Cloudreve/v4/ent/entity"
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
)
// EntityDelete is the builder for deleting a Entity entity.
type EntityDelete struct {
config
hooks []Hook
mutation *EntityMutation
}
// Where appends a list predicates to the EntityDelete builder.
func (ed *EntityDelete) Where(ps ...predicate.Entity) *EntityDelete {
ed.mutation.Where(ps...)
return ed
}
// Exec executes the deletion query and returns how many vertices were deleted.
func (ed *EntityDelete) Exec(ctx context.Context) (int, error) {
return withHooks(ctx, ed.sqlExec, ed.mutation, ed.hooks)
}
// ExecX is like Exec, but panics if an error occurs.
func (ed *EntityDelete) ExecX(ctx context.Context) int {
n, err := ed.Exec(ctx)
if err != nil {
panic(err)
}
return n
}
func (ed *EntityDelete) sqlExec(ctx context.Context) (int, error) {
_spec := sqlgraph.NewDeleteSpec(entity.Table, sqlgraph.NewFieldSpec(entity.FieldID, field.TypeInt))
if ps := ed.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
affected, err := sqlgraph.DeleteNodes(ctx, ed.driver, _spec)
if err != nil && sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
ed.mutation.done = true
return affected, err
}
// EntityDeleteOne is the builder for deleting a single Entity entity.
type EntityDeleteOne struct {
ed *EntityDelete
}
// Where appends a list predicates to the EntityDelete builder.
func (edo *EntityDeleteOne) Where(ps ...predicate.Entity) *EntityDeleteOne {
edo.ed.mutation.Where(ps...)
return edo
}
// Exec executes the deletion query.
func (edo *EntityDeleteOne) Exec(ctx context.Context) error {
n, err := edo.ed.Exec(ctx)
switch {
case err != nil:
return err
case n == 0:
return &NotFoundError{entity.Label}
default:
return nil
}
}
// ExecX is like Exec, but panics if an error occurs.
func (edo *EntityDeleteOne) ExecX(ctx context.Context) {
if err := edo.Exec(ctx); err != nil {
panic(err)
}
}

@ -0,0 +1,786 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"context"
"database/sql/driver"
"fmt"
"math"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/cloudreve/Cloudreve/v4/ent/entity"
"github.com/cloudreve/Cloudreve/v4/ent/file"
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
"github.com/cloudreve/Cloudreve/v4/ent/storagepolicy"
"github.com/cloudreve/Cloudreve/v4/ent/user"
)
// EntityQuery is the builder for querying Entity entities.
type EntityQuery struct {
config
ctx *QueryContext
order []entity.OrderOption
inters []Interceptor
predicates []predicate.Entity
withFile *FileQuery
withUser *UserQuery
withStoragePolicy *StoragePolicyQuery
// intermediate query (i.e. traversal path).
sql *sql.Selector
path func(context.Context) (*sql.Selector, error)
}
// Where adds a new predicate for the EntityQuery builder.
func (eq *EntityQuery) Where(ps ...predicate.Entity) *EntityQuery {
eq.predicates = append(eq.predicates, ps...)
return eq
}
// Limit the number of records to be returned by this query.
func (eq *EntityQuery) Limit(limit int) *EntityQuery {
eq.ctx.Limit = &limit
return eq
}
// Offset to start from.
func (eq *EntityQuery) Offset(offset int) *EntityQuery {
eq.ctx.Offset = &offset
return eq
}
// Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method.
func (eq *EntityQuery) Unique(unique bool) *EntityQuery {
eq.ctx.Unique = &unique
return eq
}
// Order specifies how the records should be ordered.
func (eq *EntityQuery) Order(o ...entity.OrderOption) *EntityQuery {
eq.order = append(eq.order, o...)
return eq
}
// QueryFile chains the current query on the "file" edge.
func (eq *EntityQuery) QueryFile() *FileQuery {
query := (&FileClient{config: eq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := eq.prepareQuery(ctx); err != nil {
return nil, err
}
selector := eq.sqlQuery(ctx)
if err := selector.Err(); err != nil {
return nil, err
}
step := sqlgraph.NewStep(
sqlgraph.From(entity.Table, entity.FieldID, selector),
sqlgraph.To(file.Table, file.FieldID),
sqlgraph.Edge(sqlgraph.M2M, true, entity.FileTable, entity.FilePrimaryKey...),
)
fromU = sqlgraph.SetNeighbors(eq.driver.Dialect(), step)
return fromU, nil
}
return query
}
// QueryUser chains the current query on the "user" edge.
func (eq *EntityQuery) QueryUser() *UserQuery {
query := (&UserClient{config: eq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := eq.prepareQuery(ctx); err != nil {
return nil, err
}
selector := eq.sqlQuery(ctx)
if err := selector.Err(); err != nil {
return nil, err
}
step := sqlgraph.NewStep(
sqlgraph.From(entity.Table, entity.FieldID, selector),
sqlgraph.To(user.Table, user.FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, entity.UserTable, entity.UserColumn),
)
fromU = sqlgraph.SetNeighbors(eq.driver.Dialect(), step)
return fromU, nil
}
return query
}
// QueryStoragePolicy chains the current query on the "storage_policy" edge.
func (eq *EntityQuery) QueryStoragePolicy() *StoragePolicyQuery {
query := (&StoragePolicyClient{config: eq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := eq.prepareQuery(ctx); err != nil {
return nil, err
}
selector := eq.sqlQuery(ctx)
if err := selector.Err(); err != nil {
return nil, err
}
step := sqlgraph.NewStep(
sqlgraph.From(entity.Table, entity.FieldID, selector),
sqlgraph.To(storagepolicy.Table, storagepolicy.FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, entity.StoragePolicyTable, entity.StoragePolicyColumn),
)
fromU = sqlgraph.SetNeighbors(eq.driver.Dialect(), step)
return fromU, nil
}
return query
}
// First returns the first Entity entity from the query.
// Returns a *NotFoundError when no Entity was found.
func (eq *EntityQuery) First(ctx context.Context) (*Entity, error) {
nodes, err := eq.Limit(1).All(setContextOp(ctx, eq.ctx, "First"))
if err != nil {
return nil, err
}
if len(nodes) == 0 {
return nil, &NotFoundError{entity.Label}
}
return nodes[0], nil
}
// FirstX is like First, but panics if an error occurs.
func (eq *EntityQuery) FirstX(ctx context.Context) *Entity {
node, err := eq.First(ctx)
if err != nil && !IsNotFound(err) {
panic(err)
}
return node
}
// FirstID returns the first Entity ID from the query.
// Returns a *NotFoundError when no Entity ID was found.
func (eq *EntityQuery) FirstID(ctx context.Context) (id int, err error) {
var ids []int
if ids, err = eq.Limit(1).IDs(setContextOp(ctx, eq.ctx, "FirstID")); err != nil {
return
}
if len(ids) == 0 {
err = &NotFoundError{entity.Label}
return
}
return ids[0], nil
}
// FirstIDX is like FirstID, but panics if an error occurs.
func (eq *EntityQuery) FirstIDX(ctx context.Context) int {
id, err := eq.FirstID(ctx)
if err != nil && !IsNotFound(err) {
panic(err)
}
return id
}
// Only returns a single Entity entity found by the query, ensuring it only returns one.
// Returns a *NotSingularError when more than one Entity entity is found.
// Returns a *NotFoundError when no Entity entities are found.
func (eq *EntityQuery) Only(ctx context.Context) (*Entity, error) {
nodes, err := eq.Limit(2).All(setContextOp(ctx, eq.ctx, "Only"))
if err != nil {
return nil, err
}
switch len(nodes) {
case 1:
return nodes[0], nil
case 0:
return nil, &NotFoundError{entity.Label}
default:
return nil, &NotSingularError{entity.Label}
}
}
// OnlyX is like Only, but panics if an error occurs.
func (eq *EntityQuery) OnlyX(ctx context.Context) *Entity {
node, err := eq.Only(ctx)
if err != nil {
panic(err)
}
return node
}
// OnlyID is like Only, but returns the only Entity ID in the query.
// Returns a *NotSingularError when more than one Entity ID is found.
// Returns a *NotFoundError when no entities are found.
func (eq *EntityQuery) OnlyID(ctx context.Context) (id int, err error) {
var ids []int
if ids, err = eq.Limit(2).IDs(setContextOp(ctx, eq.ctx, "OnlyID")); err != nil {
return
}
switch len(ids) {
case 1:
id = ids[0]
case 0:
err = &NotFoundError{entity.Label}
default:
err = &NotSingularError{entity.Label}
}
return
}
// OnlyIDX is like OnlyID, but panics if an error occurs.
func (eq *EntityQuery) OnlyIDX(ctx context.Context) int {
id, err := eq.OnlyID(ctx)
if err != nil {
panic(err)
}
return id
}
// All executes the query and returns a list of Entities.
func (eq *EntityQuery) All(ctx context.Context) ([]*Entity, error) {
ctx = setContextOp(ctx, eq.ctx, "All")
if err := eq.prepareQuery(ctx); err != nil {
return nil, err
}
qr := querierAll[[]*Entity, *EntityQuery]()
return withInterceptors[[]*Entity](ctx, eq, qr, eq.inters)
}
// AllX is like All, but panics if an error occurs.
func (eq *EntityQuery) AllX(ctx context.Context) []*Entity {
nodes, err := eq.All(ctx)
if err != nil {
panic(err)
}
return nodes
}
// IDs executes the query and returns a list of Entity IDs.
func (eq *EntityQuery) IDs(ctx context.Context) (ids []int, err error) {
if eq.ctx.Unique == nil && eq.path != nil {
eq.Unique(true)
}
ctx = setContextOp(ctx, eq.ctx, "IDs")
if err = eq.Select(entity.FieldID).Scan(ctx, &ids); err != nil {
return nil, err
}
return ids, nil
}
// IDsX is like IDs, but panics if an error occurs.
func (eq *EntityQuery) IDsX(ctx context.Context) []int {
ids, err := eq.IDs(ctx)
if err != nil {
panic(err)
}
return ids
}
// Count returns the count of the given query.
func (eq *EntityQuery) Count(ctx context.Context) (int, error) {
ctx = setContextOp(ctx, eq.ctx, "Count")
if err := eq.prepareQuery(ctx); err != nil {
return 0, err
}
return withInterceptors[int](ctx, eq, querierCount[*EntityQuery](), eq.inters)
}
// CountX is like Count, but panics if an error occurs.
func (eq *EntityQuery) CountX(ctx context.Context) int {
count, err := eq.Count(ctx)
if err != nil {
panic(err)
}
return count
}
// Exist returns true if the query has elements in the graph.
func (eq *EntityQuery) Exist(ctx context.Context) (bool, error) {
ctx = setContextOp(ctx, eq.ctx, "Exist")
switch _, err := eq.FirstID(ctx); {
case IsNotFound(err):
return false, nil
case err != nil:
return false, fmt.Errorf("ent: check existence: %w", err)
default:
return true, nil
}
}
// ExistX is like Exist, but panics if an error occurs.
func (eq *EntityQuery) ExistX(ctx context.Context) bool {
exist, err := eq.Exist(ctx)
if err != nil {
panic(err)
}
return exist
}
// Clone returns a duplicate of the EntityQuery builder, including all associated steps. It can be
// used to prepare common query builders and use them differently after the clone is made.
func (eq *EntityQuery) Clone() *EntityQuery {
if eq == nil {
return nil
}
return &EntityQuery{
config: eq.config,
ctx: eq.ctx.Clone(),
order: append([]entity.OrderOption{}, eq.order...),
inters: append([]Interceptor{}, eq.inters...),
predicates: append([]predicate.Entity{}, eq.predicates...),
withFile: eq.withFile.Clone(),
withUser: eq.withUser.Clone(),
withStoragePolicy: eq.withStoragePolicy.Clone(),
// clone intermediate query.
sql: eq.sql.Clone(),
path: eq.path,
}
}
// WithFile tells the query-builder to eager-load the nodes that are connected to
// the "file" edge. The optional arguments are used to configure the query builder of the edge.
func (eq *EntityQuery) WithFile(opts ...func(*FileQuery)) *EntityQuery {
query := (&FileClient{config: eq.config}).Query()
for _, opt := range opts {
opt(query)
}
eq.withFile = query
return eq
}
// WithUser tells the query-builder to eager-load the nodes that are connected to
// the "user" edge. The optional arguments are used to configure the query builder of the edge.
func (eq *EntityQuery) WithUser(opts ...func(*UserQuery)) *EntityQuery {
query := (&UserClient{config: eq.config}).Query()
for _, opt := range opts {
opt(query)
}
eq.withUser = query
return eq
}
// WithStoragePolicy tells the query-builder to eager-load the nodes that are connected to
// the "storage_policy" edge. The optional arguments are used to configure the query builder of the edge.
func (eq *EntityQuery) WithStoragePolicy(opts ...func(*StoragePolicyQuery)) *EntityQuery {
query := (&StoragePolicyClient{config: eq.config}).Query()
for _, opt := range opts {
opt(query)
}
eq.withStoragePolicy = query
return eq
}
// GroupBy is used to group vertices by one or more fields/columns.
// It is often used with aggregate functions, like: count, max, mean, min, sum.
//
// Example:
//
// var v []struct {
// CreatedAt time.Time `json:"created_at,omitempty"`
// Count int `json:"count,omitempty"`
// }
//
// client.Entity.Query().
// GroupBy(entity.FieldCreatedAt).
// Aggregate(ent.Count()).
// Scan(ctx, &v)
func (eq *EntityQuery) GroupBy(field string, fields ...string) *EntityGroupBy {
eq.ctx.Fields = append([]string{field}, fields...)
grbuild := &EntityGroupBy{build: eq}
grbuild.flds = &eq.ctx.Fields
grbuild.label = entity.Label
grbuild.scan = grbuild.Scan
return grbuild
}
// Select allows the selection one or more fields/columns for the given query,
// instead of selecting all fields in the entity.
//
// Example:
//
// var v []struct {
// CreatedAt time.Time `json:"created_at,omitempty"`
// }
//
// client.Entity.Query().
// Select(entity.FieldCreatedAt).
// Scan(ctx, &v)
func (eq *EntityQuery) Select(fields ...string) *EntitySelect {
eq.ctx.Fields = append(eq.ctx.Fields, fields...)
sbuild := &EntitySelect{EntityQuery: eq}
sbuild.label = entity.Label
sbuild.flds, sbuild.scan = &eq.ctx.Fields, sbuild.Scan
return sbuild
}
// Aggregate returns a EntitySelect configured with the given aggregations.
func (eq *EntityQuery) Aggregate(fns ...AggregateFunc) *EntitySelect {
return eq.Select().Aggregate(fns...)
}
func (eq *EntityQuery) prepareQuery(ctx context.Context) error {
for _, inter := range eq.inters {
if inter == nil {
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
}
if trv, ok := inter.(Traverser); ok {
if err := trv.Traverse(ctx, eq); err != nil {
return err
}
}
}
for _, f := range eq.ctx.Fields {
if !entity.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
}
}
if eq.path != nil {
prev, err := eq.path(ctx)
if err != nil {
return err
}
eq.sql = prev
}
return nil
}
func (eq *EntityQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Entity, error) {
var (
nodes = []*Entity{}
_spec = eq.querySpec()
loadedTypes = [3]bool{
eq.withFile != nil,
eq.withUser != nil,
eq.withStoragePolicy != nil,
}
)
_spec.ScanValues = func(columns []string) ([]any, error) {
return (*Entity).scanValues(nil, columns)
}
_spec.Assign = func(columns []string, values []any) error {
node := &Entity{config: eq.config}
nodes = append(nodes, node)
node.Edges.loadedTypes = loadedTypes
return node.assignValues(columns, values)
}
for i := range hooks {
hooks[i](ctx, _spec)
}
if err := sqlgraph.QueryNodes(ctx, eq.driver, _spec); err != nil {
return nil, err
}
if len(nodes) == 0 {
return nodes, nil
}
if query := eq.withFile; query != nil {
if err := eq.loadFile(ctx, query, nodes,
func(n *Entity) { n.Edges.File = []*File{} },
func(n *Entity, e *File) { n.Edges.File = append(n.Edges.File, e) }); err != nil {
return nil, err
}
}
if query := eq.withUser; query != nil {
if err := eq.loadUser(ctx, query, nodes, nil,
func(n *Entity, e *User) { n.Edges.User = e }); err != nil {
return nil, err
}
}
if query := eq.withStoragePolicy; query != nil {
if err := eq.loadStoragePolicy(ctx, query, nodes, nil,
func(n *Entity, e *StoragePolicy) { n.Edges.StoragePolicy = e }); err != nil {
return nil, err
}
}
return nodes, nil
}
func (eq *EntityQuery) loadFile(ctx context.Context, query *FileQuery, nodes []*Entity, init func(*Entity), assign func(*Entity, *File)) error {
edgeIDs := make([]driver.Value, len(nodes))
byID := make(map[int]*Entity)
nids := make(map[int]map[*Entity]struct{})
for i, node := range nodes {
edgeIDs[i] = node.ID
byID[node.ID] = node
if init != nil {
init(node)
}
}
query.Where(func(s *sql.Selector) {
joinT := sql.Table(entity.FileTable)
s.Join(joinT).On(s.C(file.FieldID), joinT.C(entity.FilePrimaryKey[0]))
s.Where(sql.InValues(joinT.C(entity.FilePrimaryKey[1]), edgeIDs...))
columns := s.SelectedColumns()
s.Select(joinT.C(entity.FilePrimaryKey[1]))
s.AppendSelect(columns...)
s.SetDistinct(false)
})
if err := query.prepareQuery(ctx); err != nil {
return err
}
qr := QuerierFunc(func(ctx context.Context, q Query) (Value, error) {
return query.sqlAll(ctx, func(_ context.Context, spec *sqlgraph.QuerySpec) {
assign := spec.Assign
values := spec.ScanValues
spec.ScanValues = func(columns []string) ([]any, error) {
values, err := values(columns[1:])
if err != nil {
return nil, err
}
return append([]any{new(sql.NullInt64)}, values...), nil
}
spec.Assign = func(columns []string, values []any) error {
outValue := int(values[0].(*sql.NullInt64).Int64)
inValue := int(values[1].(*sql.NullInt64).Int64)
if nids[inValue] == nil {
nids[inValue] = map[*Entity]struct{}{byID[outValue]: {}}
return assign(columns[1:], values[1:])
}
nids[inValue][byID[outValue]] = struct{}{}
return nil
}
})
})
neighbors, err := withInterceptors[[]*File](ctx, query, qr, query.inters)
if err != nil {
return err
}
for _, n := range neighbors {
nodes, ok := nids[n.ID]
if !ok {
return fmt.Errorf(`unexpected "file" node returned %v`, n.ID)
}
for kn := range nodes {
assign(kn, n)
}
}
return nil
}
func (eq *EntityQuery) loadUser(ctx context.Context, query *UserQuery, nodes []*Entity, init func(*Entity), assign func(*Entity, *User)) error {
ids := make([]int, 0, len(nodes))
nodeids := make(map[int][]*Entity)
for i := range nodes {
fk := nodes[i].CreatedBy
if _, ok := nodeids[fk]; !ok {
ids = append(ids, fk)
}
nodeids[fk] = append(nodeids[fk], nodes[i])
}
if len(ids) == 0 {
return nil
}
query.Where(user.IDIn(ids...))
neighbors, err := query.All(ctx)
if err != nil {
return err
}
for _, n := range neighbors {
nodes, ok := nodeids[n.ID]
if !ok {
return fmt.Errorf(`unexpected foreign-key "created_by" returned %v`, n.ID)
}
for i := range nodes {
assign(nodes[i], n)
}
}
return nil
}
func (eq *EntityQuery) loadStoragePolicy(ctx context.Context, query *StoragePolicyQuery, nodes []*Entity, init func(*Entity), assign func(*Entity, *StoragePolicy)) error {
ids := make([]int, 0, len(nodes))
nodeids := make(map[int][]*Entity)
for i := range nodes {
fk := nodes[i].StoragePolicyEntities
if _, ok := nodeids[fk]; !ok {
ids = append(ids, fk)
}
nodeids[fk] = append(nodeids[fk], nodes[i])
}
if len(ids) == 0 {
return nil
}
query.Where(storagepolicy.IDIn(ids...))
neighbors, err := query.All(ctx)
if err != nil {
return err
}
for _, n := range neighbors {
nodes, ok := nodeids[n.ID]
if !ok {
return fmt.Errorf(`unexpected foreign-key "storage_policy_entities" returned %v`, n.ID)
}
for i := range nodes {
assign(nodes[i], n)
}
}
return nil
}
func (eq *EntityQuery) sqlCount(ctx context.Context) (int, error) {
_spec := eq.querySpec()
_spec.Node.Columns = eq.ctx.Fields
if len(eq.ctx.Fields) > 0 {
_spec.Unique = eq.ctx.Unique != nil && *eq.ctx.Unique
}
return sqlgraph.CountNodes(ctx, eq.driver, _spec)
}
func (eq *EntityQuery) querySpec() *sqlgraph.QuerySpec {
_spec := sqlgraph.NewQuerySpec(entity.Table, entity.Columns, sqlgraph.NewFieldSpec(entity.FieldID, field.TypeInt))
_spec.From = eq.sql
if unique := eq.ctx.Unique; unique != nil {
_spec.Unique = *unique
} else if eq.path != nil {
_spec.Unique = true
}
if fields := eq.ctx.Fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, entity.FieldID)
for i := range fields {
if fields[i] != entity.FieldID {
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
}
}
if eq.withUser != nil {
_spec.Node.AddColumnOnce(entity.FieldCreatedBy)
}
if eq.withStoragePolicy != nil {
_spec.Node.AddColumnOnce(entity.FieldStoragePolicyEntities)
}
}
if ps := eq.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
if limit := eq.ctx.Limit; limit != nil {
_spec.Limit = *limit
}
if offset := eq.ctx.Offset; offset != nil {
_spec.Offset = *offset
}
if ps := eq.order; len(ps) > 0 {
_spec.Order = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
return _spec
}
func (eq *EntityQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(eq.driver.Dialect())
t1 := builder.Table(entity.Table)
columns := eq.ctx.Fields
if len(columns) == 0 {
columns = entity.Columns
}
selector := builder.Select(t1.Columns(columns...)...).From(t1)
if eq.sql != nil {
selector = eq.sql
selector.Select(selector.Columns(columns...)...)
}
if eq.ctx.Unique != nil && *eq.ctx.Unique {
selector.Distinct()
}
for _, p := range eq.predicates {
p(selector)
}
for _, p := range eq.order {
p(selector)
}
if offset := eq.ctx.Offset; offset != nil {
// limit is mandatory for offset clause. We start
// with default value, and override it below if needed.
selector.Offset(*offset).Limit(math.MaxInt32)
}
if limit := eq.ctx.Limit; limit != nil {
selector.Limit(*limit)
}
return selector
}
// EntityGroupBy is the group-by builder for Entity entities.
type EntityGroupBy struct {
selector
build *EntityQuery
}
// Aggregate adds the given aggregation functions to the group-by query.
func (egb *EntityGroupBy) Aggregate(fns ...AggregateFunc) *EntityGroupBy {
egb.fns = append(egb.fns, fns...)
return egb
}
// Scan applies the selector query and scans the result into the given value.
func (egb *EntityGroupBy) Scan(ctx context.Context, v any) error {
ctx = setContextOp(ctx, egb.build.ctx, "GroupBy")
if err := egb.build.prepareQuery(ctx); err != nil {
return err
}
return scanWithInterceptors[*EntityQuery, *EntityGroupBy](ctx, egb.build, egb, egb.build.inters, v)
}
func (egb *EntityGroupBy) sqlScan(ctx context.Context, root *EntityQuery, v any) error {
selector := root.sqlQuery(ctx).Select()
aggregation := make([]string, 0, len(egb.fns))
for _, fn := range egb.fns {
aggregation = append(aggregation, fn(selector))
}
if len(selector.SelectedColumns()) == 0 {
columns := make([]string, 0, len(*egb.flds)+len(egb.fns))
for _, f := range *egb.flds {
columns = append(columns, selector.C(f))
}
columns = append(columns, aggregation...)
selector.Select(columns...)
}
selector.GroupBy(selector.Columns(*egb.flds...)...)
if err := selector.Err(); err != nil {
return err
}
rows := &sql.Rows{}
query, args := selector.Query()
if err := egb.build.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}
// EntitySelect is the builder for selecting fields of Entity entities.
type EntitySelect struct {
*EntityQuery
selector
}
// Aggregate adds the given aggregation functions to the selector query.
func (es *EntitySelect) Aggregate(fns ...AggregateFunc) *EntitySelect {
es.fns = append(es.fns, fns...)
return es
}
// Scan applies the selector query and scans the result into the given value.
func (es *EntitySelect) Scan(ctx context.Context, v any) error {
ctx = setContextOp(ctx, es.ctx, "Select")
if err := es.prepareQuery(ctx); err != nil {
return err
}
return scanWithInterceptors[*EntityQuery, *EntitySelect](ctx, es.EntityQuery, es, es.inters, v)
}
func (es *EntitySelect) sqlScan(ctx context.Context, root *EntityQuery, v any) error {
selector := root.sqlQuery(ctx)
aggregation := make([]string, 0, len(es.fns))
for _, fn := range es.fns {
aggregation = append(aggregation, fn(selector))
}
switch n := len(*es.selector.flds); {
case n == 0 && len(aggregation) > 0:
selector.Select(aggregation...)
case n != 0 && len(aggregation) > 0:
selector.AppendSelect(aggregation...)
}
rows := &sql.Rows{}
query, args := selector.Query()
if err := es.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}

File diff suppressed because it is too large Load Diff

@ -0,0 +1,84 @@
// Code generated by ent, DO NOT EDIT.
package enttest
import (
"context"
"github.com/cloudreve/Cloudreve/v4/ent"
// required by schema hooks.
_ "github.com/cloudreve/Cloudreve/v4/ent/runtime"
"entgo.io/ent/dialect/sql/schema"
"github.com/cloudreve/Cloudreve/v4/ent/migrate"
)
type (
// TestingT is the interface that is shared between
// testing.T and testing.B and used by enttest.
TestingT interface {
FailNow()
Error(...any)
}
// Option configures client creation.
Option func(*options)
options struct {
opts []ent.Option
migrateOpts []schema.MigrateOption
}
)
// WithOptions forwards options to client creation.
func WithOptions(opts ...ent.Option) Option {
return func(o *options) {
o.opts = append(o.opts, opts...)
}
}
// WithMigrateOptions forwards options to auto migration.
func WithMigrateOptions(opts ...schema.MigrateOption) Option {
return func(o *options) {
o.migrateOpts = append(o.migrateOpts, opts...)
}
}
func newOptions(opts []Option) *options {
o := &options{}
for _, opt := range opts {
opt(o)
}
return o
}
// Open calls ent.Open and auto-run migration.
func Open(t TestingT, driverName, dataSourceName string, opts ...Option) *ent.Client {
o := newOptions(opts)
c, err := ent.Open(driverName, dataSourceName, o.opts...)
if err != nil {
t.Error(err)
t.FailNow()
}
migrateSchema(t, c, o)
return c
}
// NewClient calls ent.NewClient and auto-run migration.
func NewClient(t TestingT, opts ...Option) *ent.Client {
o := newOptions(opts)
c := ent.NewClient(o.opts...)
migrateSchema(t, c, o)
return c
}
func migrateSchema(t TestingT, c *ent.Client, o *options) {
tables, err := schema.CopyTables(migrate.Tables)
if err != nil {
t.Error(err)
t.FailNow()
}
if err := migrate.Create(context.Background(), c.Schema, tables, o.migrateOpts...); err != nil {
t.Error(err)
t.FailNow()
}
}

@ -0,0 +1,438 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"encoding/json"
"fmt"
"strings"
"time"
"entgo.io/ent"
"entgo.io/ent/dialect/sql"
"github.com/cloudreve/Cloudreve/v4/ent/file"
"github.com/cloudreve/Cloudreve/v4/ent/storagepolicy"
"github.com/cloudreve/Cloudreve/v4/ent/user"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
)
// File is the model entity for the File schema.
type File struct {
config `json:"-"`
// ID of the ent.
ID int `json:"id,omitempty"`
// CreatedAt holds the value of the "created_at" field.
CreatedAt time.Time `json:"created_at,omitempty"`
// UpdatedAt holds the value of the "updated_at" field.
UpdatedAt time.Time `json:"updated_at,omitempty"`
// DeletedAt holds the value of the "deleted_at" field.
DeletedAt *time.Time `json:"deleted_at,omitempty"`
// Type holds the value of the "type" field.
Type int `json:"type,omitempty"`
// Name holds the value of the "name" field.
Name string `json:"name,omitempty"`
// OwnerID holds the value of the "owner_id" field.
OwnerID int `json:"owner_id,omitempty"`
// Size holds the value of the "size" field.
Size int64 `json:"size,omitempty"`
// PrimaryEntity holds the value of the "primary_entity" field.
PrimaryEntity int `json:"primary_entity,omitempty"`
// FileChildren holds the value of the "file_children" field.
FileChildren int `json:"file_children,omitempty"`
// IsSymbolic holds the value of the "is_symbolic" field.
IsSymbolic bool `json:"is_symbolic,omitempty"`
// Props holds the value of the "props" field.
Props *types.FileProps `json:"props,omitempty"`
// StoragePolicyFiles holds the value of the "storage_policy_files" field.
StoragePolicyFiles int `json:"storage_policy_files,omitempty"`
// Edges holds the relations/edges for other nodes in the graph.
// The values are being populated by the FileQuery when eager-loading is set.
Edges FileEdges `json:"edges"`
selectValues sql.SelectValues
}
// FileEdges holds the relations/edges for other nodes in the graph.
type FileEdges struct {
// Owner holds the value of the owner edge.
Owner *User `json:"owner,omitempty"`
// StoragePolicies holds the value of the storage_policies edge.
StoragePolicies *StoragePolicy `json:"storage_policies,omitempty"`
// Parent holds the value of the parent edge.
Parent *File `json:"parent,omitempty"`
// Children holds the value of the children edge.
Children []*File `json:"children,omitempty"`
// Metadata holds the value of the metadata edge.
Metadata []*Metadata `json:"metadata,omitempty"`
// Entities holds the value of the entities edge.
Entities []*Entity `json:"entities,omitempty"`
// Shares holds the value of the shares edge.
Shares []*Share `json:"shares,omitempty"`
// DirectLinks holds the value of the direct_links edge.
DirectLinks []*DirectLink `json:"direct_links,omitempty"`
// loadedTypes holds the information for reporting if a
// type was loaded (or requested) in eager-loading or not.
loadedTypes [8]bool
}
// OwnerOrErr returns the Owner value or an error if the edge
// was not loaded in eager-loading, or loaded but was not found.
func (e FileEdges) OwnerOrErr() (*User, error) {
if e.loadedTypes[0] {
if e.Owner == nil {
// Edge was loaded but was not found.
return nil, &NotFoundError{label: user.Label}
}
return e.Owner, nil
}
return nil, &NotLoadedError{edge: "owner"}
}
// StoragePoliciesOrErr returns the StoragePolicies value or an error if the edge
// was not loaded in eager-loading, or loaded but was not found.
func (e FileEdges) StoragePoliciesOrErr() (*StoragePolicy, error) {
if e.loadedTypes[1] {
if e.StoragePolicies == nil {
// Edge was loaded but was not found.
return nil, &NotFoundError{label: storagepolicy.Label}
}
return e.StoragePolicies, nil
}
return nil, &NotLoadedError{edge: "storage_policies"}
}
// ParentOrErr returns the Parent value or an error if the edge
// was not loaded in eager-loading, or loaded but was not found.
func (e FileEdges) ParentOrErr() (*File, error) {
if e.loadedTypes[2] {
if e.Parent == nil {
// Edge was loaded but was not found.
return nil, &NotFoundError{label: file.Label}
}
return e.Parent, nil
}
return nil, &NotLoadedError{edge: "parent"}
}
// ChildrenOrErr returns the Children value or an error if the edge
// was not loaded in eager-loading.
func (e FileEdges) ChildrenOrErr() ([]*File, error) {
if e.loadedTypes[3] {
return e.Children, nil
}
return nil, &NotLoadedError{edge: "children"}
}
// MetadataOrErr returns the Metadata value or an error if the edge
// was not loaded in eager-loading.
func (e FileEdges) MetadataOrErr() ([]*Metadata, error) {
if e.loadedTypes[4] {
return e.Metadata, nil
}
return nil, &NotLoadedError{edge: "metadata"}
}
// EntitiesOrErr returns the Entities value or an error if the edge
// was not loaded in eager-loading.
func (e FileEdges) EntitiesOrErr() ([]*Entity, error) {
if e.loadedTypes[5] {
return e.Entities, nil
}
return nil, &NotLoadedError{edge: "entities"}
}
// SharesOrErr returns the Shares value or an error if the edge
// was not loaded in eager-loading.
func (e FileEdges) SharesOrErr() ([]*Share, error) {
if e.loadedTypes[6] {
return e.Shares, nil
}
return nil, &NotLoadedError{edge: "shares"}
}
// DirectLinksOrErr returns the DirectLinks value or an error if the edge
// was not loaded in eager-loading.
func (e FileEdges) DirectLinksOrErr() ([]*DirectLink, error) {
if e.loadedTypes[7] {
return e.DirectLinks, nil
}
return nil, &NotLoadedError{edge: "direct_links"}
}
// scanValues returns the types for scanning values from sql.Rows.
func (*File) scanValues(columns []string) ([]any, error) {
values := make([]any, len(columns))
for i := range columns {
switch columns[i] {
case file.FieldProps:
values[i] = new([]byte)
case file.FieldIsSymbolic:
values[i] = new(sql.NullBool)
case file.FieldID, file.FieldType, file.FieldOwnerID, file.FieldSize, file.FieldPrimaryEntity, file.FieldFileChildren, file.FieldStoragePolicyFiles:
values[i] = new(sql.NullInt64)
case file.FieldName:
values[i] = new(sql.NullString)
case file.FieldCreatedAt, file.FieldUpdatedAt, file.FieldDeletedAt:
values[i] = new(sql.NullTime)
default:
values[i] = new(sql.UnknownType)
}
}
return values, nil
}
// assignValues assigns the values that were returned from sql.Rows (after scanning)
// to the File fields.
func (f *File) assignValues(columns []string, values []any) error {
if m, n := len(values), len(columns); m < n {
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
}
for i := range columns {
switch columns[i] {
case file.FieldID:
value, ok := values[i].(*sql.NullInt64)
if !ok {
return fmt.Errorf("unexpected type %T for field id", value)
}
f.ID = int(value.Int64)
case file.FieldCreatedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field created_at", values[i])
} else if value.Valid {
f.CreatedAt = value.Time
}
case file.FieldUpdatedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field updated_at", values[i])
} else if value.Valid {
f.UpdatedAt = value.Time
}
case file.FieldDeletedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field deleted_at", values[i])
} else if value.Valid {
f.DeletedAt = new(time.Time)
*f.DeletedAt = value.Time
}
case file.FieldType:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field type", values[i])
} else if value.Valid {
f.Type = int(value.Int64)
}
case file.FieldName:
if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field name", values[i])
} else if value.Valid {
f.Name = value.String
}
case file.FieldOwnerID:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field owner_id", values[i])
} else if value.Valid {
f.OwnerID = int(value.Int64)
}
case file.FieldSize:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field size", values[i])
} else if value.Valid {
f.Size = value.Int64
}
case file.FieldPrimaryEntity:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field primary_entity", values[i])
} else if value.Valid {
f.PrimaryEntity = int(value.Int64)
}
case file.FieldFileChildren:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field file_children", values[i])
} else if value.Valid {
f.FileChildren = int(value.Int64)
}
case file.FieldIsSymbolic:
if value, ok := values[i].(*sql.NullBool); !ok {
return fmt.Errorf("unexpected type %T for field is_symbolic", values[i])
} else if value.Valid {
f.IsSymbolic = value.Bool
}
case file.FieldProps:
if value, ok := values[i].(*[]byte); !ok {
return fmt.Errorf("unexpected type %T for field props", values[i])
} else if value != nil && len(*value) > 0 {
if err := json.Unmarshal(*value, &f.Props); err != nil {
return fmt.Errorf("unmarshal field props: %w", err)
}
}
case file.FieldStoragePolicyFiles:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field storage_policy_files", values[i])
} else if value.Valid {
f.StoragePolicyFiles = int(value.Int64)
}
default:
f.selectValues.Set(columns[i], values[i])
}
}
return nil
}
// Value returns the ent.Value that was dynamically selected and assigned to the File.
// This includes values selected through modifiers, order, etc.
func (f *File) Value(name string) (ent.Value, error) {
return f.selectValues.Get(name)
}
// QueryOwner queries the "owner" edge of the File entity.
func (f *File) QueryOwner() *UserQuery {
return NewFileClient(f.config).QueryOwner(f)
}
// QueryStoragePolicies queries the "storage_policies" edge of the File entity.
func (f *File) QueryStoragePolicies() *StoragePolicyQuery {
return NewFileClient(f.config).QueryStoragePolicies(f)
}
// QueryParent queries the "parent" edge of the File entity.
func (f *File) QueryParent() *FileQuery {
return NewFileClient(f.config).QueryParent(f)
}
// QueryChildren queries the "children" edge of the File entity.
func (f *File) QueryChildren() *FileQuery {
return NewFileClient(f.config).QueryChildren(f)
}
// QueryMetadata queries the "metadata" edge of the File entity.
func (f *File) QueryMetadata() *MetadataQuery {
return NewFileClient(f.config).QueryMetadata(f)
}
// QueryEntities queries the "entities" edge of the File entity.
func (f *File) QueryEntities() *EntityQuery {
return NewFileClient(f.config).QueryEntities(f)
}
// QueryShares queries the "shares" edge of the File entity.
func (f *File) QueryShares() *ShareQuery {
return NewFileClient(f.config).QueryShares(f)
}
// QueryDirectLinks queries the "direct_links" edge of the File entity.
func (f *File) QueryDirectLinks() *DirectLinkQuery {
return NewFileClient(f.config).QueryDirectLinks(f)
}
// Update returns a builder for updating this File.
// Note that you need to call File.Unwrap() before calling this method if this File
// was returned from a transaction, and the transaction was committed or rolled back.
func (f *File) Update() *FileUpdateOne {
return NewFileClient(f.config).UpdateOne(f)
}
// Unwrap unwraps the File entity that was returned from a transaction after it was closed,
// so that all future queries will be executed through the driver which created the transaction.
func (f *File) Unwrap() *File {
_tx, ok := f.config.driver.(*txDriver)
if !ok {
panic("ent: File is not a transactional entity")
}
f.config.driver = _tx.drv
return f
}
// String implements the fmt.Stringer.
func (f *File) String() string {
var builder strings.Builder
builder.WriteString("File(")
builder.WriteString(fmt.Sprintf("id=%v, ", f.ID))
builder.WriteString("created_at=")
builder.WriteString(f.CreatedAt.Format(time.ANSIC))
builder.WriteString(", ")
builder.WriteString("updated_at=")
builder.WriteString(f.UpdatedAt.Format(time.ANSIC))
builder.WriteString(", ")
if v := f.DeletedAt; v != nil {
builder.WriteString("deleted_at=")
builder.WriteString(v.Format(time.ANSIC))
}
builder.WriteString(", ")
builder.WriteString("type=")
builder.WriteString(fmt.Sprintf("%v", f.Type))
builder.WriteString(", ")
builder.WriteString("name=")
builder.WriteString(f.Name)
builder.WriteString(", ")
builder.WriteString("owner_id=")
builder.WriteString(fmt.Sprintf("%v", f.OwnerID))
builder.WriteString(", ")
builder.WriteString("size=")
builder.WriteString(fmt.Sprintf("%v", f.Size))
builder.WriteString(", ")
builder.WriteString("primary_entity=")
builder.WriteString(fmt.Sprintf("%v", f.PrimaryEntity))
builder.WriteString(", ")
builder.WriteString("file_children=")
builder.WriteString(fmt.Sprintf("%v", f.FileChildren))
builder.WriteString(", ")
builder.WriteString("is_symbolic=")
builder.WriteString(fmt.Sprintf("%v", f.IsSymbolic))
builder.WriteString(", ")
builder.WriteString("props=")
builder.WriteString(fmt.Sprintf("%v", f.Props))
builder.WriteString(", ")
builder.WriteString("storage_policy_files=")
builder.WriteString(fmt.Sprintf("%v", f.StoragePolicyFiles))
builder.WriteByte(')')
return builder.String()
}
// SetOwner manually set the edge as loaded state.
func (e *File) SetOwner(v *User) {
e.Edges.Owner = v
e.Edges.loadedTypes[0] = true
}
// SetStoragePolicies manually set the edge as loaded state.
func (e *File) SetStoragePolicies(v *StoragePolicy) {
e.Edges.StoragePolicies = v
e.Edges.loadedTypes[1] = true
}
// SetParent manually set the edge as loaded state.
func (e *File) SetParent(v *File) {
e.Edges.Parent = v
e.Edges.loadedTypes[2] = true
}
// SetChildren manually set the edge as loaded state.
func (e *File) SetChildren(v []*File) {
e.Edges.Children = v
e.Edges.loadedTypes[3] = true
}
// SetMetadata manually set the edge as loaded state.
func (e *File) SetMetadata(v []*Metadata) {
e.Edges.Metadata = v
e.Edges.loadedTypes[4] = true
}
// SetEntities manually set the edge as loaded state.
func (e *File) SetEntities(v []*Entity) {
e.Edges.Entities = v
e.Edges.loadedTypes[5] = true
}
// SetShares manually set the edge as loaded state.
func (e *File) SetShares(v []*Share) {
e.Edges.Shares = v
e.Edges.loadedTypes[6] = true
}
// SetDirectLinks manually set the edge as loaded state.
func (e *File) SetDirectLinks(v []*DirectLink) {
e.Edges.DirectLinks = v
e.Edges.loadedTypes[7] = true
}
// Files is a parsable slice of File.
type Files []*File

@ -0,0 +1,371 @@
// Code generated by ent, DO NOT EDIT.
package file
import (
"time"
"entgo.io/ent"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
)
const (
// Label holds the string label denoting the file type in the database.
Label = "file"
// FieldID holds the string denoting the id field in the database.
FieldID = "id"
// FieldCreatedAt holds the string denoting the created_at field in the database.
FieldCreatedAt = "created_at"
// FieldUpdatedAt holds the string denoting the updated_at field in the database.
FieldUpdatedAt = "updated_at"
// FieldDeletedAt holds the string denoting the deleted_at field in the database.
FieldDeletedAt = "deleted_at"
// FieldType holds the string denoting the type field in the database.
FieldType = "type"
// FieldName holds the string denoting the name field in the database.
FieldName = "name"
// FieldOwnerID holds the string denoting the owner_id field in the database.
FieldOwnerID = "owner_id"
// FieldSize holds the string denoting the size field in the database.
FieldSize = "size"
// FieldPrimaryEntity holds the string denoting the primary_entity field in the database.
FieldPrimaryEntity = "primary_entity"
// FieldFileChildren holds the string denoting the file_children field in the database.
FieldFileChildren = "file_children"
// FieldIsSymbolic holds the string denoting the is_symbolic field in the database.
FieldIsSymbolic = "is_symbolic"
// FieldProps holds the string denoting the props field in the database.
FieldProps = "props"
// FieldStoragePolicyFiles holds the string denoting the storage_policy_files field in the database.
FieldStoragePolicyFiles = "storage_policy_files"
// EdgeOwner holds the string denoting the owner edge name in mutations.
EdgeOwner = "owner"
// EdgeStoragePolicies holds the string denoting the storage_policies edge name in mutations.
EdgeStoragePolicies = "storage_policies"
// EdgeParent holds the string denoting the parent edge name in mutations.
EdgeParent = "parent"
// EdgeChildren holds the string denoting the children edge name in mutations.
EdgeChildren = "children"
// EdgeMetadata holds the string denoting the metadata edge name in mutations.
EdgeMetadata = "metadata"
// EdgeEntities holds the string denoting the entities edge name in mutations.
EdgeEntities = "entities"
// EdgeShares holds the string denoting the shares edge name in mutations.
EdgeShares = "shares"
// EdgeDirectLinks holds the string denoting the direct_links edge name in mutations.
EdgeDirectLinks = "direct_links"
// Table holds the table name of the file in the database.
Table = "files"
// OwnerTable is the table that holds the owner relation/edge.
OwnerTable = "files"
// OwnerInverseTable is the table name for the User entity.
// It exists in this package in order to avoid circular dependency with the "user" package.
OwnerInverseTable = "users"
// OwnerColumn is the table column denoting the owner relation/edge.
OwnerColumn = "owner_id"
// StoragePoliciesTable is the table that holds the storage_policies relation/edge.
StoragePoliciesTable = "files"
// StoragePoliciesInverseTable is the table name for the StoragePolicy entity.
// It exists in this package in order to avoid circular dependency with the "storagepolicy" package.
StoragePoliciesInverseTable = "storage_policies"
// StoragePoliciesColumn is the table column denoting the storage_policies relation/edge.
StoragePoliciesColumn = "storage_policy_files"
// ParentTable is the table that holds the parent relation/edge.
ParentTable = "files"
// ParentColumn is the table column denoting the parent relation/edge.
ParentColumn = "file_children"
// ChildrenTable is the table that holds the children relation/edge.
ChildrenTable = "files"
// ChildrenColumn is the table column denoting the children relation/edge.
ChildrenColumn = "file_children"
// MetadataTable is the table that holds the metadata relation/edge.
MetadataTable = "metadata"
// MetadataInverseTable is the table name for the Metadata entity.
// It exists in this package in order to avoid circular dependency with the "metadata" package.
MetadataInverseTable = "metadata"
// MetadataColumn is the table column denoting the metadata relation/edge.
MetadataColumn = "file_id"
// EntitiesTable is the table that holds the entities relation/edge. The primary key declared below.
EntitiesTable = "file_entities"
// EntitiesInverseTable is the table name for the Entity entity.
// It exists in this package in order to avoid circular dependency with the "entity" package.
EntitiesInverseTable = "entities"
// SharesTable is the table that holds the shares relation/edge.
SharesTable = "shares"
// SharesInverseTable is the table name for the Share entity.
// It exists in this package in order to avoid circular dependency with the "share" package.
SharesInverseTable = "shares"
// SharesColumn is the table column denoting the shares relation/edge.
SharesColumn = "file_shares"
// DirectLinksTable is the table that holds the direct_links relation/edge.
DirectLinksTable = "direct_links"
// DirectLinksInverseTable is the table name for the DirectLink entity.
// It exists in this package in order to avoid circular dependency with the "directlink" package.
DirectLinksInverseTable = "direct_links"
// DirectLinksColumn is the table column denoting the direct_links relation/edge.
DirectLinksColumn = "file_id"
)
// Columns holds all SQL columns for file fields.
var Columns = []string{
FieldID,
FieldCreatedAt,
FieldUpdatedAt,
FieldDeletedAt,
FieldType,
FieldName,
FieldOwnerID,
FieldSize,
FieldPrimaryEntity,
FieldFileChildren,
FieldIsSymbolic,
FieldProps,
FieldStoragePolicyFiles,
}
var (
// EntitiesPrimaryKey and EntitiesColumn2 are the table columns denoting the
// primary key for the entities relation (M2M).
EntitiesPrimaryKey = []string{"file_id", "entity_id"}
)
// ValidColumn reports if the column name is valid (part of the table columns).
func ValidColumn(column string) bool {
for i := range Columns {
if column == Columns[i] {
return true
}
}
return false
}
// Note that the variables below are initialized by the runtime
// package on the initialization of the application. Therefore,
// it should be imported in the main as follows:
//
// import _ "github.com/cloudreve/Cloudreve/v4/ent/runtime"
var (
Hooks [1]ent.Hook
Interceptors [1]ent.Interceptor
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
DefaultCreatedAt func() time.Time
// DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
DefaultUpdatedAt func() time.Time
// UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
UpdateDefaultUpdatedAt func() time.Time
// DefaultSize holds the default value on creation for the "size" field.
DefaultSize int64
// DefaultIsSymbolic holds the default value on creation for the "is_symbolic" field.
DefaultIsSymbolic bool
)
// OrderOption defines the ordering options for the File queries.
type OrderOption func(*sql.Selector)
// ByID orders the results by the id field.
func ByID(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldID, opts...).ToFunc()
}
// ByCreatedAt orders the results by the created_at field.
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
}
// ByUpdatedAt orders the results by the updated_at field.
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
}
// ByDeletedAt orders the results by the deleted_at field.
func ByDeletedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldDeletedAt, opts...).ToFunc()
}
// ByType orders the results by the type field.
func ByType(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldType, opts...).ToFunc()
}
// ByName orders the results by the name field.
func ByName(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldName, opts...).ToFunc()
}
// ByOwnerID orders the results by the owner_id field.
func ByOwnerID(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldOwnerID, opts...).ToFunc()
}
// BySize orders the results by the size field.
func BySize(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldSize, opts...).ToFunc()
}
// ByPrimaryEntity orders the results by the primary_entity field.
func ByPrimaryEntity(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldPrimaryEntity, opts...).ToFunc()
}
// ByFileChildren orders the results by the file_children field.
func ByFileChildren(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldFileChildren, opts...).ToFunc()
}
// ByIsSymbolic orders the results by the is_symbolic field.
func ByIsSymbolic(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldIsSymbolic, opts...).ToFunc()
}
// ByStoragePolicyFiles orders the results by the storage_policy_files field.
func ByStoragePolicyFiles(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldStoragePolicyFiles, opts...).ToFunc()
}
// ByOwnerField orders the results by owner field.
func ByOwnerField(field string, opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newOwnerStep(), sql.OrderByField(field, opts...))
}
}
// ByStoragePoliciesField orders the results by storage_policies field.
func ByStoragePoliciesField(field string, opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newStoragePoliciesStep(), sql.OrderByField(field, opts...))
}
}
// ByParentField orders the results by parent field.
func ByParentField(field string, opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newParentStep(), sql.OrderByField(field, opts...))
}
}
// ByChildrenCount orders the results by children count.
func ByChildrenCount(opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborsCount(s, newChildrenStep(), opts...)
}
}
// ByChildren orders the results by children terms.
func ByChildren(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newChildrenStep(), append([]sql.OrderTerm{term}, terms...)...)
}
}
// ByMetadataCount orders the results by metadata count.
func ByMetadataCount(opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborsCount(s, newMetadataStep(), opts...)
}
}
// ByMetadata orders the results by metadata terms.
func ByMetadata(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newMetadataStep(), append([]sql.OrderTerm{term}, terms...)...)
}
}
// ByEntitiesCount orders the results by entities count.
func ByEntitiesCount(opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborsCount(s, newEntitiesStep(), opts...)
}
}
// ByEntities orders the results by entities terms.
func ByEntities(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newEntitiesStep(), append([]sql.OrderTerm{term}, terms...)...)
}
}
// BySharesCount orders the results by shares count.
func BySharesCount(opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborsCount(s, newSharesStep(), opts...)
}
}
// ByShares orders the results by shares terms.
func ByShares(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newSharesStep(), append([]sql.OrderTerm{term}, terms...)...)
}
}
// ByDirectLinksCount orders the results by direct_links count.
func ByDirectLinksCount(opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborsCount(s, newDirectLinksStep(), opts...)
}
}
// ByDirectLinks orders the results by direct_links terms.
func ByDirectLinks(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newDirectLinksStep(), append([]sql.OrderTerm{term}, terms...)...)
}
}
func newOwnerStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(OwnerInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn),
)
}
func newStoragePoliciesStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(StoragePoliciesInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, StoragePoliciesTable, StoragePoliciesColumn),
)
}
func newParentStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(Table, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, ParentTable, ParentColumn),
)
}
func newChildrenStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(Table, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, ChildrenTable, ChildrenColumn),
)
}
func newMetadataStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(MetadataInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, MetadataTable, MetadataColumn),
)
}
func newEntitiesStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(EntitiesInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.M2M, false, EntitiesTable, EntitiesPrimaryKey...),
)
}
func newSharesStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(SharesInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, SharesTable, SharesColumn),
)
}
func newDirectLinksStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(DirectLinksInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, DirectLinksTable, DirectLinksColumn),
)
}

@ -0,0 +1,735 @@
// Code generated by ent, DO NOT EDIT.
package file
import (
"time"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
)
// ID filters vertices based on their ID field.
func ID(id int) predicate.File {
return predicate.File(sql.FieldEQ(FieldID, id))
}
// IDEQ applies the EQ predicate on the ID field.
func IDEQ(id int) predicate.File {
return predicate.File(sql.FieldEQ(FieldID, id))
}
// IDNEQ applies the NEQ predicate on the ID field.
func IDNEQ(id int) predicate.File {
return predicate.File(sql.FieldNEQ(FieldID, id))
}
// IDIn applies the In predicate on the ID field.
func IDIn(ids ...int) predicate.File {
return predicate.File(sql.FieldIn(FieldID, ids...))
}
// IDNotIn applies the NotIn predicate on the ID field.
func IDNotIn(ids ...int) predicate.File {
return predicate.File(sql.FieldNotIn(FieldID, ids...))
}
// IDGT applies the GT predicate on the ID field.
func IDGT(id int) predicate.File {
return predicate.File(sql.FieldGT(FieldID, id))
}
// IDGTE applies the GTE predicate on the ID field.
func IDGTE(id int) predicate.File {
return predicate.File(sql.FieldGTE(FieldID, id))
}
// IDLT applies the LT predicate on the ID field.
func IDLT(id int) predicate.File {
return predicate.File(sql.FieldLT(FieldID, id))
}
// IDLTE applies the LTE predicate on the ID field.
func IDLTE(id int) predicate.File {
return predicate.File(sql.FieldLTE(FieldID, id))
}
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
func CreatedAt(v time.Time) predicate.File {
return predicate.File(sql.FieldEQ(FieldCreatedAt, v))
}
// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
func UpdatedAt(v time.Time) predicate.File {
return predicate.File(sql.FieldEQ(FieldUpdatedAt, v))
}
// DeletedAt applies equality check predicate on the "deleted_at" field. It's identical to DeletedAtEQ.
func DeletedAt(v time.Time) predicate.File {
return predicate.File(sql.FieldEQ(FieldDeletedAt, v))
}
// Type applies equality check predicate on the "type" field. It's identical to TypeEQ.
func Type(v int) predicate.File {
return predicate.File(sql.FieldEQ(FieldType, v))
}
// Name applies equality check predicate on the "name" field. It's identical to NameEQ.
func Name(v string) predicate.File {
return predicate.File(sql.FieldEQ(FieldName, v))
}
// OwnerID applies equality check predicate on the "owner_id" field. It's identical to OwnerIDEQ.
func OwnerID(v int) predicate.File {
return predicate.File(sql.FieldEQ(FieldOwnerID, v))
}
// Size applies equality check predicate on the "size" field. It's identical to SizeEQ.
func Size(v int64) predicate.File {
return predicate.File(sql.FieldEQ(FieldSize, v))
}
// PrimaryEntity applies equality check predicate on the "primary_entity" field. It's identical to PrimaryEntityEQ.
func PrimaryEntity(v int) predicate.File {
return predicate.File(sql.FieldEQ(FieldPrimaryEntity, v))
}
// FileChildren applies equality check predicate on the "file_children" field. It's identical to FileChildrenEQ.
func FileChildren(v int) predicate.File {
return predicate.File(sql.FieldEQ(FieldFileChildren, v))
}
// IsSymbolic applies equality check predicate on the "is_symbolic" field. It's identical to IsSymbolicEQ.
func IsSymbolic(v bool) predicate.File {
return predicate.File(sql.FieldEQ(FieldIsSymbolic, v))
}
// StoragePolicyFiles applies equality check predicate on the "storage_policy_files" field. It's identical to StoragePolicyFilesEQ.
func StoragePolicyFiles(v int) predicate.File {
return predicate.File(sql.FieldEQ(FieldStoragePolicyFiles, v))
}
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
func CreatedAtEQ(v time.Time) predicate.File {
return predicate.File(sql.FieldEQ(FieldCreatedAt, v))
}
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
func CreatedAtNEQ(v time.Time) predicate.File {
return predicate.File(sql.FieldNEQ(FieldCreatedAt, v))
}
// CreatedAtIn applies the In predicate on the "created_at" field.
func CreatedAtIn(vs ...time.Time) predicate.File {
return predicate.File(sql.FieldIn(FieldCreatedAt, vs...))
}
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
func CreatedAtNotIn(vs ...time.Time) predicate.File {
return predicate.File(sql.FieldNotIn(FieldCreatedAt, vs...))
}
// CreatedAtGT applies the GT predicate on the "created_at" field.
func CreatedAtGT(v time.Time) predicate.File {
return predicate.File(sql.FieldGT(FieldCreatedAt, v))
}
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
func CreatedAtGTE(v time.Time) predicate.File {
return predicate.File(sql.FieldGTE(FieldCreatedAt, v))
}
// CreatedAtLT applies the LT predicate on the "created_at" field.
func CreatedAtLT(v time.Time) predicate.File {
return predicate.File(sql.FieldLT(FieldCreatedAt, v))
}
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
func CreatedAtLTE(v time.Time) predicate.File {
return predicate.File(sql.FieldLTE(FieldCreatedAt, v))
}
// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
func UpdatedAtEQ(v time.Time) predicate.File {
return predicate.File(sql.FieldEQ(FieldUpdatedAt, v))
}
// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
func UpdatedAtNEQ(v time.Time) predicate.File {
return predicate.File(sql.FieldNEQ(FieldUpdatedAt, v))
}
// UpdatedAtIn applies the In predicate on the "updated_at" field.
func UpdatedAtIn(vs ...time.Time) predicate.File {
return predicate.File(sql.FieldIn(FieldUpdatedAt, vs...))
}
// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
func UpdatedAtNotIn(vs ...time.Time) predicate.File {
return predicate.File(sql.FieldNotIn(FieldUpdatedAt, vs...))
}
// UpdatedAtGT applies the GT predicate on the "updated_at" field.
func UpdatedAtGT(v time.Time) predicate.File {
return predicate.File(sql.FieldGT(FieldUpdatedAt, v))
}
// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
func UpdatedAtGTE(v time.Time) predicate.File {
return predicate.File(sql.FieldGTE(FieldUpdatedAt, v))
}
// UpdatedAtLT applies the LT predicate on the "updated_at" field.
func UpdatedAtLT(v time.Time) predicate.File {
return predicate.File(sql.FieldLT(FieldUpdatedAt, v))
}
// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
func UpdatedAtLTE(v time.Time) predicate.File {
return predicate.File(sql.FieldLTE(FieldUpdatedAt, v))
}
// DeletedAtEQ applies the EQ predicate on the "deleted_at" field.
func DeletedAtEQ(v time.Time) predicate.File {
return predicate.File(sql.FieldEQ(FieldDeletedAt, v))
}
// DeletedAtNEQ applies the NEQ predicate on the "deleted_at" field.
func DeletedAtNEQ(v time.Time) predicate.File {
return predicate.File(sql.FieldNEQ(FieldDeletedAt, v))
}
// DeletedAtIn applies the In predicate on the "deleted_at" field.
func DeletedAtIn(vs ...time.Time) predicate.File {
return predicate.File(sql.FieldIn(FieldDeletedAt, vs...))
}
// DeletedAtNotIn applies the NotIn predicate on the "deleted_at" field.
func DeletedAtNotIn(vs ...time.Time) predicate.File {
return predicate.File(sql.FieldNotIn(FieldDeletedAt, vs...))
}
// DeletedAtGT applies the GT predicate on the "deleted_at" field.
func DeletedAtGT(v time.Time) predicate.File {
return predicate.File(sql.FieldGT(FieldDeletedAt, v))
}
// DeletedAtGTE applies the GTE predicate on the "deleted_at" field.
func DeletedAtGTE(v time.Time) predicate.File {
return predicate.File(sql.FieldGTE(FieldDeletedAt, v))
}
// DeletedAtLT applies the LT predicate on the "deleted_at" field.
func DeletedAtLT(v time.Time) predicate.File {
return predicate.File(sql.FieldLT(FieldDeletedAt, v))
}
// DeletedAtLTE applies the LTE predicate on the "deleted_at" field.
func DeletedAtLTE(v time.Time) predicate.File {
return predicate.File(sql.FieldLTE(FieldDeletedAt, v))
}
// DeletedAtIsNil applies the IsNil predicate on the "deleted_at" field.
func DeletedAtIsNil() predicate.File {
return predicate.File(sql.FieldIsNull(FieldDeletedAt))
}
// DeletedAtNotNil applies the NotNil predicate on the "deleted_at" field.
func DeletedAtNotNil() predicate.File {
return predicate.File(sql.FieldNotNull(FieldDeletedAt))
}
// TypeEQ applies the EQ predicate on the "type" field.
func TypeEQ(v int) predicate.File {
return predicate.File(sql.FieldEQ(FieldType, v))
}
// TypeNEQ applies the NEQ predicate on the "type" field.
func TypeNEQ(v int) predicate.File {
return predicate.File(sql.FieldNEQ(FieldType, v))
}
// TypeIn applies the In predicate on the "type" field.
func TypeIn(vs ...int) predicate.File {
return predicate.File(sql.FieldIn(FieldType, vs...))
}
// TypeNotIn applies the NotIn predicate on the "type" field.
func TypeNotIn(vs ...int) predicate.File {
return predicate.File(sql.FieldNotIn(FieldType, vs...))
}
// TypeGT applies the GT predicate on the "type" field.
func TypeGT(v int) predicate.File {
return predicate.File(sql.FieldGT(FieldType, v))
}
// TypeGTE applies the GTE predicate on the "type" field.
func TypeGTE(v int) predicate.File {
return predicate.File(sql.FieldGTE(FieldType, v))
}
// TypeLT applies the LT predicate on the "type" field.
func TypeLT(v int) predicate.File {
return predicate.File(sql.FieldLT(FieldType, v))
}
// TypeLTE applies the LTE predicate on the "type" field.
func TypeLTE(v int) predicate.File {
return predicate.File(sql.FieldLTE(FieldType, v))
}
// NameEQ applies the EQ predicate on the "name" field.
func NameEQ(v string) predicate.File {
return predicate.File(sql.FieldEQ(FieldName, v))
}
// NameNEQ applies the NEQ predicate on the "name" field.
func NameNEQ(v string) predicate.File {
return predicate.File(sql.FieldNEQ(FieldName, v))
}
// NameIn applies the In predicate on the "name" field.
func NameIn(vs ...string) predicate.File {
return predicate.File(sql.FieldIn(FieldName, vs...))
}
// NameNotIn applies the NotIn predicate on the "name" field.
func NameNotIn(vs ...string) predicate.File {
return predicate.File(sql.FieldNotIn(FieldName, vs...))
}
// NameGT applies the GT predicate on the "name" field.
func NameGT(v string) predicate.File {
return predicate.File(sql.FieldGT(FieldName, v))
}
// NameGTE applies the GTE predicate on the "name" field.
func NameGTE(v string) predicate.File {
return predicate.File(sql.FieldGTE(FieldName, v))
}
// NameLT applies the LT predicate on the "name" field.
func NameLT(v string) predicate.File {
return predicate.File(sql.FieldLT(FieldName, v))
}
// NameLTE applies the LTE predicate on the "name" field.
func NameLTE(v string) predicate.File {
return predicate.File(sql.FieldLTE(FieldName, v))
}
// NameContains applies the Contains predicate on the "name" field.
func NameContains(v string) predicate.File {
return predicate.File(sql.FieldContains(FieldName, v))
}
// NameHasPrefix applies the HasPrefix predicate on the "name" field.
func NameHasPrefix(v string) predicate.File {
return predicate.File(sql.FieldHasPrefix(FieldName, v))
}
// NameHasSuffix applies the HasSuffix predicate on the "name" field.
func NameHasSuffix(v string) predicate.File {
return predicate.File(sql.FieldHasSuffix(FieldName, v))
}
// NameEqualFold applies the EqualFold predicate on the "name" field.
func NameEqualFold(v string) predicate.File {
return predicate.File(sql.FieldEqualFold(FieldName, v))
}
// NameContainsFold applies the ContainsFold predicate on the "name" field.
func NameContainsFold(v string) predicate.File {
return predicate.File(sql.FieldContainsFold(FieldName, v))
}
// OwnerIDEQ applies the EQ predicate on the "owner_id" field.
func OwnerIDEQ(v int) predicate.File {
return predicate.File(sql.FieldEQ(FieldOwnerID, v))
}
// OwnerIDNEQ applies the NEQ predicate on the "owner_id" field.
func OwnerIDNEQ(v int) predicate.File {
return predicate.File(sql.FieldNEQ(FieldOwnerID, v))
}
// OwnerIDIn applies the In predicate on the "owner_id" field.
func OwnerIDIn(vs ...int) predicate.File {
return predicate.File(sql.FieldIn(FieldOwnerID, vs...))
}
// OwnerIDNotIn applies the NotIn predicate on the "owner_id" field.
func OwnerIDNotIn(vs ...int) predicate.File {
return predicate.File(sql.FieldNotIn(FieldOwnerID, vs...))
}
// SizeEQ applies the EQ predicate on the "size" field.
func SizeEQ(v int64) predicate.File {
return predicate.File(sql.FieldEQ(FieldSize, v))
}
// SizeNEQ applies the NEQ predicate on the "size" field.
func SizeNEQ(v int64) predicate.File {
return predicate.File(sql.FieldNEQ(FieldSize, v))
}
// SizeIn applies the In predicate on the "size" field.
func SizeIn(vs ...int64) predicate.File {
return predicate.File(sql.FieldIn(FieldSize, vs...))
}
// SizeNotIn applies the NotIn predicate on the "size" field.
func SizeNotIn(vs ...int64) predicate.File {
return predicate.File(sql.FieldNotIn(FieldSize, vs...))
}
// SizeGT applies the GT predicate on the "size" field.
func SizeGT(v int64) predicate.File {
return predicate.File(sql.FieldGT(FieldSize, v))
}
// SizeGTE applies the GTE predicate on the "size" field.
func SizeGTE(v int64) predicate.File {
return predicate.File(sql.FieldGTE(FieldSize, v))
}
// SizeLT applies the LT predicate on the "size" field.
func SizeLT(v int64) predicate.File {
return predicate.File(sql.FieldLT(FieldSize, v))
}
// SizeLTE applies the LTE predicate on the "size" field.
func SizeLTE(v int64) predicate.File {
return predicate.File(sql.FieldLTE(FieldSize, v))
}
// PrimaryEntityEQ applies the EQ predicate on the "primary_entity" field.
func PrimaryEntityEQ(v int) predicate.File {
return predicate.File(sql.FieldEQ(FieldPrimaryEntity, v))
}
// PrimaryEntityNEQ applies the NEQ predicate on the "primary_entity" field.
func PrimaryEntityNEQ(v int) predicate.File {
return predicate.File(sql.FieldNEQ(FieldPrimaryEntity, v))
}
// PrimaryEntityIn applies the In predicate on the "primary_entity" field.
func PrimaryEntityIn(vs ...int) predicate.File {
return predicate.File(sql.FieldIn(FieldPrimaryEntity, vs...))
}
// PrimaryEntityNotIn applies the NotIn predicate on the "primary_entity" field.
func PrimaryEntityNotIn(vs ...int) predicate.File {
return predicate.File(sql.FieldNotIn(FieldPrimaryEntity, vs...))
}
// PrimaryEntityGT applies the GT predicate on the "primary_entity" field.
func PrimaryEntityGT(v int) predicate.File {
return predicate.File(sql.FieldGT(FieldPrimaryEntity, v))
}
// PrimaryEntityGTE applies the GTE predicate on the "primary_entity" field.
func PrimaryEntityGTE(v int) predicate.File {
return predicate.File(sql.FieldGTE(FieldPrimaryEntity, v))
}
// PrimaryEntityLT applies the LT predicate on the "primary_entity" field.
func PrimaryEntityLT(v int) predicate.File {
return predicate.File(sql.FieldLT(FieldPrimaryEntity, v))
}
// PrimaryEntityLTE applies the LTE predicate on the "primary_entity" field.
func PrimaryEntityLTE(v int) predicate.File {
return predicate.File(sql.FieldLTE(FieldPrimaryEntity, v))
}
// PrimaryEntityIsNil applies the IsNil predicate on the "primary_entity" field.
func PrimaryEntityIsNil() predicate.File {
return predicate.File(sql.FieldIsNull(FieldPrimaryEntity))
}
// PrimaryEntityNotNil applies the NotNil predicate on the "primary_entity" field.
func PrimaryEntityNotNil() predicate.File {
return predicate.File(sql.FieldNotNull(FieldPrimaryEntity))
}
// FileChildrenEQ applies the EQ predicate on the "file_children" field.
func FileChildrenEQ(v int) predicate.File {
return predicate.File(sql.FieldEQ(FieldFileChildren, v))
}
// FileChildrenNEQ applies the NEQ predicate on the "file_children" field.
func FileChildrenNEQ(v int) predicate.File {
return predicate.File(sql.FieldNEQ(FieldFileChildren, v))
}
// FileChildrenIn applies the In predicate on the "file_children" field.
func FileChildrenIn(vs ...int) predicate.File {
return predicate.File(sql.FieldIn(FieldFileChildren, vs...))
}
// FileChildrenNotIn applies the NotIn predicate on the "file_children" field.
func FileChildrenNotIn(vs ...int) predicate.File {
return predicate.File(sql.FieldNotIn(FieldFileChildren, vs...))
}
// FileChildrenIsNil applies the IsNil predicate on the "file_children" field.
func FileChildrenIsNil() predicate.File {
return predicate.File(sql.FieldIsNull(FieldFileChildren))
}
// FileChildrenNotNil applies the NotNil predicate on the "file_children" field.
func FileChildrenNotNil() predicate.File {
return predicate.File(sql.FieldNotNull(FieldFileChildren))
}
// IsSymbolicEQ applies the EQ predicate on the "is_symbolic" field.
func IsSymbolicEQ(v bool) predicate.File {
return predicate.File(sql.FieldEQ(FieldIsSymbolic, v))
}
// IsSymbolicNEQ applies the NEQ predicate on the "is_symbolic" field.
func IsSymbolicNEQ(v bool) predicate.File {
return predicate.File(sql.FieldNEQ(FieldIsSymbolic, v))
}
// PropsIsNil applies the IsNil predicate on the "props" field.
func PropsIsNil() predicate.File {
return predicate.File(sql.FieldIsNull(FieldProps))
}
// PropsNotNil applies the NotNil predicate on the "props" field.
func PropsNotNil() predicate.File {
return predicate.File(sql.FieldNotNull(FieldProps))
}
// StoragePolicyFilesEQ applies the EQ predicate on the "storage_policy_files" field.
func StoragePolicyFilesEQ(v int) predicate.File {
return predicate.File(sql.FieldEQ(FieldStoragePolicyFiles, v))
}
// StoragePolicyFilesNEQ applies the NEQ predicate on the "storage_policy_files" field.
func StoragePolicyFilesNEQ(v int) predicate.File {
return predicate.File(sql.FieldNEQ(FieldStoragePolicyFiles, v))
}
// StoragePolicyFilesIn applies the In predicate on the "storage_policy_files" field.
func StoragePolicyFilesIn(vs ...int) predicate.File {
return predicate.File(sql.FieldIn(FieldStoragePolicyFiles, vs...))
}
// StoragePolicyFilesNotIn applies the NotIn predicate on the "storage_policy_files" field.
func StoragePolicyFilesNotIn(vs ...int) predicate.File {
return predicate.File(sql.FieldNotIn(FieldStoragePolicyFiles, vs...))
}
// StoragePolicyFilesIsNil applies the IsNil predicate on the "storage_policy_files" field.
func StoragePolicyFilesIsNil() predicate.File {
return predicate.File(sql.FieldIsNull(FieldStoragePolicyFiles))
}
// StoragePolicyFilesNotNil applies the NotNil predicate on the "storage_policy_files" field.
func StoragePolicyFilesNotNil() predicate.File {
return predicate.File(sql.FieldNotNull(FieldStoragePolicyFiles))
}
// HasOwner applies the HasEdge predicate on the "owner" edge.
func HasOwner() predicate.File {
return predicate.File(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn),
)
sqlgraph.HasNeighbors(s, step)
})
}
// HasOwnerWith applies the HasEdge predicate on the "owner" edge with a given conditions (other predicates).
func HasOwnerWith(preds ...predicate.User) predicate.File {
return predicate.File(func(s *sql.Selector) {
step := newOwnerStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
}
})
})
}
// HasStoragePolicies applies the HasEdge predicate on the "storage_policies" edge.
func HasStoragePolicies() predicate.File {
return predicate.File(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, StoragePoliciesTable, StoragePoliciesColumn),
)
sqlgraph.HasNeighbors(s, step)
})
}
// HasStoragePoliciesWith applies the HasEdge predicate on the "storage_policies" edge with a given conditions (other predicates).
func HasStoragePoliciesWith(preds ...predicate.StoragePolicy) predicate.File {
return predicate.File(func(s *sql.Selector) {
step := newStoragePoliciesStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
}
})
})
}
// HasParent applies the HasEdge predicate on the "parent" edge.
func HasParent() predicate.File {
return predicate.File(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, ParentTable, ParentColumn),
)
sqlgraph.HasNeighbors(s, step)
})
}
// HasParentWith applies the HasEdge predicate on the "parent" edge with a given conditions (other predicates).
func HasParentWith(preds ...predicate.File) predicate.File {
return predicate.File(func(s *sql.Selector) {
step := newParentStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
}
})
})
}
// HasChildren applies the HasEdge predicate on the "children" edge.
func HasChildren() predicate.File {
return predicate.File(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, ChildrenTable, ChildrenColumn),
)
sqlgraph.HasNeighbors(s, step)
})
}
// HasChildrenWith applies the HasEdge predicate on the "children" edge with a given conditions (other predicates).
func HasChildrenWith(preds ...predicate.File) predicate.File {
return predicate.File(func(s *sql.Selector) {
step := newChildrenStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
}
})
})
}
// HasMetadata applies the HasEdge predicate on the "metadata" edge.
func HasMetadata() predicate.File {
return predicate.File(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, MetadataTable, MetadataColumn),
)
sqlgraph.HasNeighbors(s, step)
})
}
// HasMetadataWith applies the HasEdge predicate on the "metadata" edge with a given conditions (other predicates).
func HasMetadataWith(preds ...predicate.Metadata) predicate.File {
return predicate.File(func(s *sql.Selector) {
step := newMetadataStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
}
})
})
}
// HasEntities applies the HasEdge predicate on the "entities" edge.
func HasEntities() predicate.File {
return predicate.File(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.Edge(sqlgraph.M2M, false, EntitiesTable, EntitiesPrimaryKey...),
)
sqlgraph.HasNeighbors(s, step)
})
}
// HasEntitiesWith applies the HasEdge predicate on the "entities" edge with a given conditions (other predicates).
func HasEntitiesWith(preds ...predicate.Entity) predicate.File {
return predicate.File(func(s *sql.Selector) {
step := newEntitiesStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
}
})
})
}
// HasShares applies the HasEdge predicate on the "shares" edge.
func HasShares() predicate.File {
return predicate.File(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, SharesTable, SharesColumn),
)
sqlgraph.HasNeighbors(s, step)
})
}
// HasSharesWith applies the HasEdge predicate on the "shares" edge with a given conditions (other predicates).
func HasSharesWith(preds ...predicate.Share) predicate.File {
return predicate.File(func(s *sql.Selector) {
step := newSharesStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
}
})
})
}
// HasDirectLinks applies the HasEdge predicate on the "direct_links" edge.
func HasDirectLinks() predicate.File {
return predicate.File(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, DirectLinksTable, DirectLinksColumn),
)
sqlgraph.HasNeighbors(s, step)
})
}
// HasDirectLinksWith applies the HasEdge predicate on the "direct_links" edge with a given conditions (other predicates).
func HasDirectLinksWith(preds ...predicate.DirectLink) predicate.File {
return predicate.File(func(s *sql.Selector) {
step := newDirectLinksStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
}
})
})
}
// And groups predicates with the AND operator between them.
func And(predicates ...predicate.File) predicate.File {
return predicate.File(sql.AndPredicates(predicates...))
}
// Or groups predicates with the OR operator between them.
func Or(predicates ...predicate.File) predicate.File {
return predicate.File(sql.OrPredicates(predicates...))
}
// Not applies the not operator on the given predicate.
func Not(p predicate.File) predicate.File {
return predicate.File(sql.NotPredicates(p))
}

File diff suppressed because it is too large Load Diff

@ -0,0 +1,88 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"context"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/cloudreve/Cloudreve/v4/ent/file"
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
)
// FileDelete is the builder for deleting a File entity.
type FileDelete struct {
config
hooks []Hook
mutation *FileMutation
}
// Where appends a list predicates to the FileDelete builder.
func (fd *FileDelete) Where(ps ...predicate.File) *FileDelete {
fd.mutation.Where(ps...)
return fd
}
// Exec executes the deletion query and returns how many vertices were deleted.
func (fd *FileDelete) Exec(ctx context.Context) (int, error) {
return withHooks(ctx, fd.sqlExec, fd.mutation, fd.hooks)
}
// ExecX is like Exec, but panics if an error occurs.
func (fd *FileDelete) ExecX(ctx context.Context) int {
n, err := fd.Exec(ctx)
if err != nil {
panic(err)
}
return n
}
func (fd *FileDelete) sqlExec(ctx context.Context) (int, error) {
_spec := sqlgraph.NewDeleteSpec(file.Table, sqlgraph.NewFieldSpec(file.FieldID, field.TypeInt))
if ps := fd.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
affected, err := sqlgraph.DeleteNodes(ctx, fd.driver, _spec)
if err != nil && sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
fd.mutation.done = true
return affected, err
}
// FileDeleteOne is the builder for deleting a single File entity.
type FileDeleteOne struct {
fd *FileDelete
}
// Where appends a list predicates to the FileDelete builder.
func (fdo *FileDeleteOne) Where(ps ...predicate.File) *FileDeleteOne {
fdo.fd.mutation.Where(ps...)
return fdo
}
// Exec executes the deletion query.
func (fdo *FileDeleteOne) Exec(ctx context.Context) error {
n, err := fdo.fd.Exec(ctx)
switch {
case err != nil:
return err
case n == 0:
return &NotFoundError{file.Label}
default:
return nil
}
}
// ExecX is like Exec, but panics if an error occurs.
func (fdo *FileDeleteOne) ExecX(ctx context.Context) {
if err := fdo.Exec(ctx); err != nil {
panic(err)
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -0,0 +1,3 @@
package ent
//go:generate go run -mod=mod entc.go

@ -0,0 +1,265 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"encoding/json"
"fmt"
"strings"
"time"
"entgo.io/ent"
"entgo.io/ent/dialect/sql"
"github.com/cloudreve/Cloudreve/v4/ent/group"
"github.com/cloudreve/Cloudreve/v4/ent/storagepolicy"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
)
// Group is the model entity for the Group schema.
type Group struct {
config `json:"-"`
// ID of the ent.
ID int `json:"id,omitempty"`
// CreatedAt holds the value of the "created_at" field.
CreatedAt time.Time `json:"created_at,omitempty"`
// UpdatedAt holds the value of the "updated_at" field.
UpdatedAt time.Time `json:"updated_at,omitempty"`
// DeletedAt holds the value of the "deleted_at" field.
DeletedAt *time.Time `json:"deleted_at,omitempty"`
// Name holds the value of the "name" field.
Name string `json:"name,omitempty"`
// MaxStorage holds the value of the "max_storage" field.
MaxStorage int64 `json:"max_storage,omitempty"`
// SpeedLimit holds the value of the "speed_limit" field.
SpeedLimit int `json:"speed_limit,omitempty"`
// Permissions holds the value of the "permissions" field.
Permissions *boolset.BooleanSet `json:"permissions,omitempty"`
// Settings holds the value of the "settings" field.
Settings *types.GroupSetting `json:"settings,omitempty"`
// StoragePolicyID holds the value of the "storage_policy_id" field.
StoragePolicyID int `json:"storage_policy_id,omitempty"`
// Edges holds the relations/edges for other nodes in the graph.
// The values are being populated by the GroupQuery when eager-loading is set.
Edges GroupEdges `json:"edges"`
selectValues sql.SelectValues
}
// GroupEdges holds the relations/edges for other nodes in the graph.
type GroupEdges struct {
// Users holds the value of the users edge.
Users []*User `json:"users,omitempty"`
// StoragePolicies holds the value of the storage_policies edge.
StoragePolicies *StoragePolicy `json:"storage_policies,omitempty"`
// loadedTypes holds the information for reporting if a
// type was loaded (or requested) in eager-loading or not.
loadedTypes [2]bool
}
// UsersOrErr returns the Users value or an error if the edge
// was not loaded in eager-loading.
func (e GroupEdges) UsersOrErr() ([]*User, error) {
if e.loadedTypes[0] {
return e.Users, nil
}
return nil, &NotLoadedError{edge: "users"}
}
// StoragePoliciesOrErr returns the StoragePolicies value or an error if the edge
// was not loaded in eager-loading, or loaded but was not found.
func (e GroupEdges) StoragePoliciesOrErr() (*StoragePolicy, error) {
if e.loadedTypes[1] {
if e.StoragePolicies == nil {
// Edge was loaded but was not found.
return nil, &NotFoundError{label: storagepolicy.Label}
}
return e.StoragePolicies, nil
}
return nil, &NotLoadedError{edge: "storage_policies"}
}
// scanValues returns the types for scanning values from sql.Rows.
func (*Group) scanValues(columns []string) ([]any, error) {
values := make([]any, len(columns))
for i := range columns {
switch columns[i] {
case group.FieldSettings:
values[i] = new([]byte)
case group.FieldPermissions:
values[i] = new(boolset.BooleanSet)
case group.FieldID, group.FieldMaxStorage, group.FieldSpeedLimit, group.FieldStoragePolicyID:
values[i] = new(sql.NullInt64)
case group.FieldName:
values[i] = new(sql.NullString)
case group.FieldCreatedAt, group.FieldUpdatedAt, group.FieldDeletedAt:
values[i] = new(sql.NullTime)
default:
values[i] = new(sql.UnknownType)
}
}
return values, nil
}
// assignValues assigns the values that were returned from sql.Rows (after scanning)
// to the Group fields.
func (gr *Group) assignValues(columns []string, values []any) error {
if m, n := len(values), len(columns); m < n {
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
}
for i := range columns {
switch columns[i] {
case group.FieldID:
value, ok := values[i].(*sql.NullInt64)
if !ok {
return fmt.Errorf("unexpected type %T for field id", value)
}
gr.ID = int(value.Int64)
case group.FieldCreatedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field created_at", values[i])
} else if value.Valid {
gr.CreatedAt = value.Time
}
case group.FieldUpdatedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field updated_at", values[i])
} else if value.Valid {
gr.UpdatedAt = value.Time
}
case group.FieldDeletedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field deleted_at", values[i])
} else if value.Valid {
gr.DeletedAt = new(time.Time)
*gr.DeletedAt = value.Time
}
case group.FieldName:
if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field name", values[i])
} else if value.Valid {
gr.Name = value.String
}
case group.FieldMaxStorage:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field max_storage", values[i])
} else if value.Valid {
gr.MaxStorage = value.Int64
}
case group.FieldSpeedLimit:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field speed_limit", values[i])
} else if value.Valid {
gr.SpeedLimit = int(value.Int64)
}
case group.FieldPermissions:
if value, ok := values[i].(*boolset.BooleanSet); !ok {
return fmt.Errorf("unexpected type %T for field permissions", values[i])
} else if value != nil {
gr.Permissions = value
}
case group.FieldSettings:
if value, ok := values[i].(*[]byte); !ok {
return fmt.Errorf("unexpected type %T for field settings", values[i])
} else if value != nil && len(*value) > 0 {
if err := json.Unmarshal(*value, &gr.Settings); err != nil {
return fmt.Errorf("unmarshal field settings: %w", err)
}
}
case group.FieldStoragePolicyID:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field storage_policy_id", values[i])
} else if value.Valid {
gr.StoragePolicyID = int(value.Int64)
}
default:
gr.selectValues.Set(columns[i], values[i])
}
}
return nil
}
// Value returns the ent.Value that was dynamically selected and assigned to the Group.
// This includes values selected through modifiers, order, etc.
func (gr *Group) Value(name string) (ent.Value, error) {
return gr.selectValues.Get(name)
}
// QueryUsers queries the "users" edge of the Group entity.
func (gr *Group) QueryUsers() *UserQuery {
return NewGroupClient(gr.config).QueryUsers(gr)
}
// QueryStoragePolicies queries the "storage_policies" edge of the Group entity.
func (gr *Group) QueryStoragePolicies() *StoragePolicyQuery {
return NewGroupClient(gr.config).QueryStoragePolicies(gr)
}
// Update returns a builder for updating this Group.
// Note that you need to call Group.Unwrap() before calling this method if this Group
// was returned from a transaction, and the transaction was committed or rolled back.
func (gr *Group) Update() *GroupUpdateOne {
return NewGroupClient(gr.config).UpdateOne(gr)
}
// Unwrap unwraps the Group entity that was returned from a transaction after it was closed,
// so that all future queries will be executed through the driver which created the transaction.
func (gr *Group) Unwrap() *Group {
_tx, ok := gr.config.driver.(*txDriver)
if !ok {
panic("ent: Group is not a transactional entity")
}
gr.config.driver = _tx.drv
return gr
}
// String implements the fmt.Stringer.
func (gr *Group) String() string {
var builder strings.Builder
builder.WriteString("Group(")
builder.WriteString(fmt.Sprintf("id=%v, ", gr.ID))
builder.WriteString("created_at=")
builder.WriteString(gr.CreatedAt.Format(time.ANSIC))
builder.WriteString(", ")
builder.WriteString("updated_at=")
builder.WriteString(gr.UpdatedAt.Format(time.ANSIC))
builder.WriteString(", ")
if v := gr.DeletedAt; v != nil {
builder.WriteString("deleted_at=")
builder.WriteString(v.Format(time.ANSIC))
}
builder.WriteString(", ")
builder.WriteString("name=")
builder.WriteString(gr.Name)
builder.WriteString(", ")
builder.WriteString("max_storage=")
builder.WriteString(fmt.Sprintf("%v", gr.MaxStorage))
builder.WriteString(", ")
builder.WriteString("speed_limit=")
builder.WriteString(fmt.Sprintf("%v", gr.SpeedLimit))
builder.WriteString(", ")
builder.WriteString("permissions=")
builder.WriteString(fmt.Sprintf("%v", gr.Permissions))
builder.WriteString(", ")
builder.WriteString("settings=")
builder.WriteString(fmt.Sprintf("%v", gr.Settings))
builder.WriteString(", ")
builder.WriteString("storage_policy_id=")
builder.WriteString(fmt.Sprintf("%v", gr.StoragePolicyID))
builder.WriteByte(')')
return builder.String()
}
// SetUsers manually set the edge as loaded state.
func (e *Group) SetUsers(v []*User) {
e.Edges.Users = v
e.Edges.loadedTypes[0] = true
}
// SetStoragePolicies manually set the edge as loaded state.
func (e *Group) SetStoragePolicies(v *StoragePolicy) {
e.Edges.StoragePolicies = v
e.Edges.loadedTypes[1] = true
}
// Groups is a parsable slice of Group.
type Groups []*Group

@ -0,0 +1,177 @@
// Code generated by ent, DO NOT EDIT.
package group
import (
"time"
"entgo.io/ent"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
)
const (
// Label holds the string label denoting the group type in the database.
Label = "group"
// FieldID holds the string denoting the id field in the database.
FieldID = "id"
// FieldCreatedAt holds the string denoting the created_at field in the database.
FieldCreatedAt = "created_at"
// FieldUpdatedAt holds the string denoting the updated_at field in the database.
FieldUpdatedAt = "updated_at"
// FieldDeletedAt holds the string denoting the deleted_at field in the database.
FieldDeletedAt = "deleted_at"
// FieldName holds the string denoting the name field in the database.
FieldName = "name"
// FieldMaxStorage holds the string denoting the max_storage field in the database.
FieldMaxStorage = "max_storage"
// FieldSpeedLimit holds the string denoting the speed_limit field in the database.
FieldSpeedLimit = "speed_limit"
// FieldPermissions holds the string denoting the permissions field in the database.
FieldPermissions = "permissions"
// FieldSettings holds the string denoting the settings field in the database.
FieldSettings = "settings"
// FieldStoragePolicyID holds the string denoting the storage_policy_id field in the database.
FieldStoragePolicyID = "storage_policy_id"
// EdgeUsers holds the string denoting the users edge name in mutations.
EdgeUsers = "users"
// EdgeStoragePolicies holds the string denoting the storage_policies edge name in mutations.
EdgeStoragePolicies = "storage_policies"
// Table holds the table name of the group in the database.
Table = "groups"
// UsersTable is the table that holds the users relation/edge.
UsersTable = "users"
// UsersInverseTable is the table name for the User entity.
// It exists in this package in order to avoid circular dependency with the "user" package.
UsersInverseTable = "users"
// UsersColumn is the table column denoting the users relation/edge.
UsersColumn = "group_users"
// StoragePoliciesTable is the table that holds the storage_policies relation/edge.
StoragePoliciesTable = "groups"
// StoragePoliciesInverseTable is the table name for the StoragePolicy entity.
// It exists in this package in order to avoid circular dependency with the "storagepolicy" package.
StoragePoliciesInverseTable = "storage_policies"
// StoragePoliciesColumn is the table column denoting the storage_policies relation/edge.
StoragePoliciesColumn = "storage_policy_id"
)
// Columns holds all SQL columns for group fields.
var Columns = []string{
FieldID,
FieldCreatedAt,
FieldUpdatedAt,
FieldDeletedAt,
FieldName,
FieldMaxStorage,
FieldSpeedLimit,
FieldPermissions,
FieldSettings,
FieldStoragePolicyID,
}
// ValidColumn reports if the column name is valid (part of the table columns).
func ValidColumn(column string) bool {
for i := range Columns {
if column == Columns[i] {
return true
}
}
return false
}
// Note that the variables below are initialized by the runtime
// package on the initialization of the application. Therefore,
// it should be imported in the main as follows:
//
// import _ "github.com/cloudreve/Cloudreve/v4/ent/runtime"
var (
Hooks [1]ent.Hook
Interceptors [1]ent.Interceptor
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
DefaultCreatedAt func() time.Time
// DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
DefaultUpdatedAt func() time.Time
// UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
UpdateDefaultUpdatedAt func() time.Time
// DefaultSettings holds the default value on creation for the "settings" field.
DefaultSettings *types.GroupSetting
)
// OrderOption defines the ordering options for the Group queries.
type OrderOption func(*sql.Selector)
// ByID orders the results by the id field.
func ByID(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldID, opts...).ToFunc()
}
// ByCreatedAt orders the results by the created_at field.
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
}
// ByUpdatedAt orders the results by the updated_at field.
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
}
// ByDeletedAt orders the results by the deleted_at field.
func ByDeletedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldDeletedAt, opts...).ToFunc()
}
// ByName orders the results by the name field.
func ByName(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldName, opts...).ToFunc()
}
// ByMaxStorage orders the results by the max_storage field.
func ByMaxStorage(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldMaxStorage, opts...).ToFunc()
}
// BySpeedLimit orders the results by the speed_limit field.
func BySpeedLimit(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldSpeedLimit, opts...).ToFunc()
}
// ByStoragePolicyID orders the results by the storage_policy_id field.
func ByStoragePolicyID(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldStoragePolicyID, opts...).ToFunc()
}
// ByUsersCount orders the results by users count.
func ByUsersCount(opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborsCount(s, newUsersStep(), opts...)
}
}
// ByUsers orders the results by users terms.
func ByUsers(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newUsersStep(), append([]sql.OrderTerm{term}, terms...)...)
}
}
// ByStoragePoliciesField orders the results by storage_policies field.
func ByStoragePoliciesField(field string, opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newStoragePoliciesStep(), sql.OrderByField(field, opts...))
}
}
func newUsersStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(UsersInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, UsersTable, UsersColumn),
)
}
func newStoragePoliciesStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(StoragePoliciesInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, StoragePoliciesTable, StoragePoliciesColumn),
)
}

@ -0,0 +1,533 @@
// Code generated by ent, DO NOT EDIT.
package group
import (
"time"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
)
// ID filters vertices based on their ID field.
func ID(id int) predicate.Group {
return predicate.Group(sql.FieldEQ(FieldID, id))
}
// IDEQ applies the EQ predicate on the ID field.
func IDEQ(id int) predicate.Group {
return predicate.Group(sql.FieldEQ(FieldID, id))
}
// IDNEQ applies the NEQ predicate on the ID field.
func IDNEQ(id int) predicate.Group {
return predicate.Group(sql.FieldNEQ(FieldID, id))
}
// IDIn applies the In predicate on the ID field.
func IDIn(ids ...int) predicate.Group {
return predicate.Group(sql.FieldIn(FieldID, ids...))
}
// IDNotIn applies the NotIn predicate on the ID field.
func IDNotIn(ids ...int) predicate.Group {
return predicate.Group(sql.FieldNotIn(FieldID, ids...))
}
// IDGT applies the GT predicate on the ID field.
func IDGT(id int) predicate.Group {
return predicate.Group(sql.FieldGT(FieldID, id))
}
// IDGTE applies the GTE predicate on the ID field.
func IDGTE(id int) predicate.Group {
return predicate.Group(sql.FieldGTE(FieldID, id))
}
// IDLT applies the LT predicate on the ID field.
func IDLT(id int) predicate.Group {
return predicate.Group(sql.FieldLT(FieldID, id))
}
// IDLTE applies the LTE predicate on the ID field.
func IDLTE(id int) predicate.Group {
return predicate.Group(sql.FieldLTE(FieldID, id))
}
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
func CreatedAt(v time.Time) predicate.Group {
return predicate.Group(sql.FieldEQ(FieldCreatedAt, v))
}
// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
func UpdatedAt(v time.Time) predicate.Group {
return predicate.Group(sql.FieldEQ(FieldUpdatedAt, v))
}
// DeletedAt applies equality check predicate on the "deleted_at" field. It's identical to DeletedAtEQ.
func DeletedAt(v time.Time) predicate.Group {
return predicate.Group(sql.FieldEQ(FieldDeletedAt, v))
}
// Name applies equality check predicate on the "name" field. It's identical to NameEQ.
func Name(v string) predicate.Group {
return predicate.Group(sql.FieldEQ(FieldName, v))
}
// MaxStorage applies equality check predicate on the "max_storage" field. It's identical to MaxStorageEQ.
func MaxStorage(v int64) predicate.Group {
return predicate.Group(sql.FieldEQ(FieldMaxStorage, v))
}
// SpeedLimit applies equality check predicate on the "speed_limit" field. It's identical to SpeedLimitEQ.
func SpeedLimit(v int) predicate.Group {
return predicate.Group(sql.FieldEQ(FieldSpeedLimit, v))
}
// Permissions applies equality check predicate on the "permissions" field. It's identical to PermissionsEQ.
func Permissions(v *boolset.BooleanSet) predicate.Group {
return predicate.Group(sql.FieldEQ(FieldPermissions, v))
}
// StoragePolicyID applies equality check predicate on the "storage_policy_id" field. It's identical to StoragePolicyIDEQ.
func StoragePolicyID(v int) predicate.Group {
return predicate.Group(sql.FieldEQ(FieldStoragePolicyID, v))
}
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
func CreatedAtEQ(v time.Time) predicate.Group {
return predicate.Group(sql.FieldEQ(FieldCreatedAt, v))
}
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
func CreatedAtNEQ(v time.Time) predicate.Group {
return predicate.Group(sql.FieldNEQ(FieldCreatedAt, v))
}
// CreatedAtIn applies the In predicate on the "created_at" field.
func CreatedAtIn(vs ...time.Time) predicate.Group {
return predicate.Group(sql.FieldIn(FieldCreatedAt, vs...))
}
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
func CreatedAtNotIn(vs ...time.Time) predicate.Group {
return predicate.Group(sql.FieldNotIn(FieldCreatedAt, vs...))
}
// CreatedAtGT applies the GT predicate on the "created_at" field.
func CreatedAtGT(v time.Time) predicate.Group {
return predicate.Group(sql.FieldGT(FieldCreatedAt, v))
}
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
func CreatedAtGTE(v time.Time) predicate.Group {
return predicate.Group(sql.FieldGTE(FieldCreatedAt, v))
}
// CreatedAtLT applies the LT predicate on the "created_at" field.
func CreatedAtLT(v time.Time) predicate.Group {
return predicate.Group(sql.FieldLT(FieldCreatedAt, v))
}
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
func CreatedAtLTE(v time.Time) predicate.Group {
return predicate.Group(sql.FieldLTE(FieldCreatedAt, v))
}
// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
func UpdatedAtEQ(v time.Time) predicate.Group {
return predicate.Group(sql.FieldEQ(FieldUpdatedAt, v))
}
// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
func UpdatedAtNEQ(v time.Time) predicate.Group {
return predicate.Group(sql.FieldNEQ(FieldUpdatedAt, v))
}
// UpdatedAtIn applies the In predicate on the "updated_at" field.
func UpdatedAtIn(vs ...time.Time) predicate.Group {
return predicate.Group(sql.FieldIn(FieldUpdatedAt, vs...))
}
// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
func UpdatedAtNotIn(vs ...time.Time) predicate.Group {
return predicate.Group(sql.FieldNotIn(FieldUpdatedAt, vs...))
}
// UpdatedAtGT applies the GT predicate on the "updated_at" field.
func UpdatedAtGT(v time.Time) predicate.Group {
return predicate.Group(sql.FieldGT(FieldUpdatedAt, v))
}
// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
func UpdatedAtGTE(v time.Time) predicate.Group {
return predicate.Group(sql.FieldGTE(FieldUpdatedAt, v))
}
// UpdatedAtLT applies the LT predicate on the "updated_at" field.
func UpdatedAtLT(v time.Time) predicate.Group {
return predicate.Group(sql.FieldLT(FieldUpdatedAt, v))
}
// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
func UpdatedAtLTE(v time.Time) predicate.Group {
return predicate.Group(sql.FieldLTE(FieldUpdatedAt, v))
}
// DeletedAtEQ applies the EQ predicate on the "deleted_at" field.
func DeletedAtEQ(v time.Time) predicate.Group {
return predicate.Group(sql.FieldEQ(FieldDeletedAt, v))
}
// DeletedAtNEQ applies the NEQ predicate on the "deleted_at" field.
func DeletedAtNEQ(v time.Time) predicate.Group {
return predicate.Group(sql.FieldNEQ(FieldDeletedAt, v))
}
// DeletedAtIn applies the In predicate on the "deleted_at" field.
func DeletedAtIn(vs ...time.Time) predicate.Group {
return predicate.Group(sql.FieldIn(FieldDeletedAt, vs...))
}
// DeletedAtNotIn applies the NotIn predicate on the "deleted_at" field.
func DeletedAtNotIn(vs ...time.Time) predicate.Group {
return predicate.Group(sql.FieldNotIn(FieldDeletedAt, vs...))
}
// DeletedAtGT applies the GT predicate on the "deleted_at" field.
func DeletedAtGT(v time.Time) predicate.Group {
return predicate.Group(sql.FieldGT(FieldDeletedAt, v))
}
// DeletedAtGTE applies the GTE predicate on the "deleted_at" field.
func DeletedAtGTE(v time.Time) predicate.Group {
return predicate.Group(sql.FieldGTE(FieldDeletedAt, v))
}
// DeletedAtLT applies the LT predicate on the "deleted_at" field.
func DeletedAtLT(v time.Time) predicate.Group {
return predicate.Group(sql.FieldLT(FieldDeletedAt, v))
}
// DeletedAtLTE applies the LTE predicate on the "deleted_at" field.
func DeletedAtLTE(v time.Time) predicate.Group {
return predicate.Group(sql.FieldLTE(FieldDeletedAt, v))
}
// DeletedAtIsNil applies the IsNil predicate on the "deleted_at" field.
func DeletedAtIsNil() predicate.Group {
return predicate.Group(sql.FieldIsNull(FieldDeletedAt))
}
// DeletedAtNotNil applies the NotNil predicate on the "deleted_at" field.
func DeletedAtNotNil() predicate.Group {
return predicate.Group(sql.FieldNotNull(FieldDeletedAt))
}
// NameEQ applies the EQ predicate on the "name" field.
func NameEQ(v string) predicate.Group {
return predicate.Group(sql.FieldEQ(FieldName, v))
}
// NameNEQ applies the NEQ predicate on the "name" field.
func NameNEQ(v string) predicate.Group {
return predicate.Group(sql.FieldNEQ(FieldName, v))
}
// NameIn applies the In predicate on the "name" field.
func NameIn(vs ...string) predicate.Group {
return predicate.Group(sql.FieldIn(FieldName, vs...))
}
// NameNotIn applies the NotIn predicate on the "name" field.
func NameNotIn(vs ...string) predicate.Group {
return predicate.Group(sql.FieldNotIn(FieldName, vs...))
}
// NameGT applies the GT predicate on the "name" field.
func NameGT(v string) predicate.Group {
return predicate.Group(sql.FieldGT(FieldName, v))
}
// NameGTE applies the GTE predicate on the "name" field.
func NameGTE(v string) predicate.Group {
return predicate.Group(sql.FieldGTE(FieldName, v))
}
// NameLT applies the LT predicate on the "name" field.
func NameLT(v string) predicate.Group {
return predicate.Group(sql.FieldLT(FieldName, v))
}
// NameLTE applies the LTE predicate on the "name" field.
func NameLTE(v string) predicate.Group {
return predicate.Group(sql.FieldLTE(FieldName, v))
}
// NameContains applies the Contains predicate on the "name" field.
func NameContains(v string) predicate.Group {
return predicate.Group(sql.FieldContains(FieldName, v))
}
// NameHasPrefix applies the HasPrefix predicate on the "name" field.
func NameHasPrefix(v string) predicate.Group {
return predicate.Group(sql.FieldHasPrefix(FieldName, v))
}
// NameHasSuffix applies the HasSuffix predicate on the "name" field.
func NameHasSuffix(v string) predicate.Group {
return predicate.Group(sql.FieldHasSuffix(FieldName, v))
}
// NameEqualFold applies the EqualFold predicate on the "name" field.
func NameEqualFold(v string) predicate.Group {
return predicate.Group(sql.FieldEqualFold(FieldName, v))
}
// NameContainsFold applies the ContainsFold predicate on the "name" field.
func NameContainsFold(v string) predicate.Group {
return predicate.Group(sql.FieldContainsFold(FieldName, v))
}
// MaxStorageEQ applies the EQ predicate on the "max_storage" field.
func MaxStorageEQ(v int64) predicate.Group {
return predicate.Group(sql.FieldEQ(FieldMaxStorage, v))
}
// MaxStorageNEQ applies the NEQ predicate on the "max_storage" field.
func MaxStorageNEQ(v int64) predicate.Group {
return predicate.Group(sql.FieldNEQ(FieldMaxStorage, v))
}
// MaxStorageIn applies the In predicate on the "max_storage" field.
func MaxStorageIn(vs ...int64) predicate.Group {
return predicate.Group(sql.FieldIn(FieldMaxStorage, vs...))
}
// MaxStorageNotIn applies the NotIn predicate on the "max_storage" field.
func MaxStorageNotIn(vs ...int64) predicate.Group {
return predicate.Group(sql.FieldNotIn(FieldMaxStorage, vs...))
}
// MaxStorageGT applies the GT predicate on the "max_storage" field.
func MaxStorageGT(v int64) predicate.Group {
return predicate.Group(sql.FieldGT(FieldMaxStorage, v))
}
// MaxStorageGTE applies the GTE predicate on the "max_storage" field.
func MaxStorageGTE(v int64) predicate.Group {
return predicate.Group(sql.FieldGTE(FieldMaxStorage, v))
}
// MaxStorageLT applies the LT predicate on the "max_storage" field.
func MaxStorageLT(v int64) predicate.Group {
return predicate.Group(sql.FieldLT(FieldMaxStorage, v))
}
// MaxStorageLTE applies the LTE predicate on the "max_storage" field.
func MaxStorageLTE(v int64) predicate.Group {
return predicate.Group(sql.FieldLTE(FieldMaxStorage, v))
}
// MaxStorageIsNil applies the IsNil predicate on the "max_storage" field.
func MaxStorageIsNil() predicate.Group {
return predicate.Group(sql.FieldIsNull(FieldMaxStorage))
}
// MaxStorageNotNil applies the NotNil predicate on the "max_storage" field.
func MaxStorageNotNil() predicate.Group {
return predicate.Group(sql.FieldNotNull(FieldMaxStorage))
}
// SpeedLimitEQ applies the EQ predicate on the "speed_limit" field.
func SpeedLimitEQ(v int) predicate.Group {
return predicate.Group(sql.FieldEQ(FieldSpeedLimit, v))
}
// SpeedLimitNEQ applies the NEQ predicate on the "speed_limit" field.
func SpeedLimitNEQ(v int) predicate.Group {
return predicate.Group(sql.FieldNEQ(FieldSpeedLimit, v))
}
// SpeedLimitIn applies the In predicate on the "speed_limit" field.
func SpeedLimitIn(vs ...int) predicate.Group {
return predicate.Group(sql.FieldIn(FieldSpeedLimit, vs...))
}
// SpeedLimitNotIn applies the NotIn predicate on the "speed_limit" field.
func SpeedLimitNotIn(vs ...int) predicate.Group {
return predicate.Group(sql.FieldNotIn(FieldSpeedLimit, vs...))
}
// SpeedLimitGT applies the GT predicate on the "speed_limit" field.
func SpeedLimitGT(v int) predicate.Group {
return predicate.Group(sql.FieldGT(FieldSpeedLimit, v))
}
// SpeedLimitGTE applies the GTE predicate on the "speed_limit" field.
func SpeedLimitGTE(v int) predicate.Group {
return predicate.Group(sql.FieldGTE(FieldSpeedLimit, v))
}
// SpeedLimitLT applies the LT predicate on the "speed_limit" field.
func SpeedLimitLT(v int) predicate.Group {
return predicate.Group(sql.FieldLT(FieldSpeedLimit, v))
}
// SpeedLimitLTE applies the LTE predicate on the "speed_limit" field.
func SpeedLimitLTE(v int) predicate.Group {
return predicate.Group(sql.FieldLTE(FieldSpeedLimit, v))
}
// SpeedLimitIsNil applies the IsNil predicate on the "speed_limit" field.
func SpeedLimitIsNil() predicate.Group {
return predicate.Group(sql.FieldIsNull(FieldSpeedLimit))
}
// SpeedLimitNotNil applies the NotNil predicate on the "speed_limit" field.
func SpeedLimitNotNil() predicate.Group {
return predicate.Group(sql.FieldNotNull(FieldSpeedLimit))
}
// PermissionsEQ applies the EQ predicate on the "permissions" field.
func PermissionsEQ(v *boolset.BooleanSet) predicate.Group {
return predicate.Group(sql.FieldEQ(FieldPermissions, v))
}
// PermissionsNEQ applies the NEQ predicate on the "permissions" field.
func PermissionsNEQ(v *boolset.BooleanSet) predicate.Group {
return predicate.Group(sql.FieldNEQ(FieldPermissions, v))
}
// PermissionsIn applies the In predicate on the "permissions" field.
func PermissionsIn(vs ...*boolset.BooleanSet) predicate.Group {
return predicate.Group(sql.FieldIn(FieldPermissions, vs...))
}
// PermissionsNotIn applies the NotIn predicate on the "permissions" field.
func PermissionsNotIn(vs ...*boolset.BooleanSet) predicate.Group {
return predicate.Group(sql.FieldNotIn(FieldPermissions, vs...))
}
// PermissionsGT applies the GT predicate on the "permissions" field.
func PermissionsGT(v *boolset.BooleanSet) predicate.Group {
return predicate.Group(sql.FieldGT(FieldPermissions, v))
}
// PermissionsGTE applies the GTE predicate on the "permissions" field.
func PermissionsGTE(v *boolset.BooleanSet) predicate.Group {
return predicate.Group(sql.FieldGTE(FieldPermissions, v))
}
// PermissionsLT applies the LT predicate on the "permissions" field.
func PermissionsLT(v *boolset.BooleanSet) predicate.Group {
return predicate.Group(sql.FieldLT(FieldPermissions, v))
}
// PermissionsLTE applies the LTE predicate on the "permissions" field.
func PermissionsLTE(v *boolset.BooleanSet) predicate.Group {
return predicate.Group(sql.FieldLTE(FieldPermissions, v))
}
// SettingsIsNil applies the IsNil predicate on the "settings" field.
func SettingsIsNil() predicate.Group {
return predicate.Group(sql.FieldIsNull(FieldSettings))
}
// SettingsNotNil applies the NotNil predicate on the "settings" field.
func SettingsNotNil() predicate.Group {
return predicate.Group(sql.FieldNotNull(FieldSettings))
}
// StoragePolicyIDEQ applies the EQ predicate on the "storage_policy_id" field.
func StoragePolicyIDEQ(v int) predicate.Group {
return predicate.Group(sql.FieldEQ(FieldStoragePolicyID, v))
}
// StoragePolicyIDNEQ applies the NEQ predicate on the "storage_policy_id" field.
func StoragePolicyIDNEQ(v int) predicate.Group {
return predicate.Group(sql.FieldNEQ(FieldStoragePolicyID, v))
}
// StoragePolicyIDIn applies the In predicate on the "storage_policy_id" field.
func StoragePolicyIDIn(vs ...int) predicate.Group {
return predicate.Group(sql.FieldIn(FieldStoragePolicyID, vs...))
}
// StoragePolicyIDNotIn applies the NotIn predicate on the "storage_policy_id" field.
func StoragePolicyIDNotIn(vs ...int) predicate.Group {
return predicate.Group(sql.FieldNotIn(FieldStoragePolicyID, vs...))
}
// StoragePolicyIDIsNil applies the IsNil predicate on the "storage_policy_id" field.
func StoragePolicyIDIsNil() predicate.Group {
return predicate.Group(sql.FieldIsNull(FieldStoragePolicyID))
}
// StoragePolicyIDNotNil applies the NotNil predicate on the "storage_policy_id" field.
func StoragePolicyIDNotNil() predicate.Group {
return predicate.Group(sql.FieldNotNull(FieldStoragePolicyID))
}
// HasUsers applies the HasEdge predicate on the "users" edge.
func HasUsers() predicate.Group {
return predicate.Group(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, UsersTable, UsersColumn),
)
sqlgraph.HasNeighbors(s, step)
})
}
// HasUsersWith applies the HasEdge predicate on the "users" edge with a given conditions (other predicates).
func HasUsersWith(preds ...predicate.User) predicate.Group {
return predicate.Group(func(s *sql.Selector) {
step := newUsersStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
}
})
})
}
// HasStoragePolicies applies the HasEdge predicate on the "storage_policies" edge.
func HasStoragePolicies() predicate.Group {
return predicate.Group(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, StoragePoliciesTable, StoragePoliciesColumn),
)
sqlgraph.HasNeighbors(s, step)
})
}
// HasStoragePoliciesWith applies the HasEdge predicate on the "storage_policies" edge with a given conditions (other predicates).
func HasStoragePoliciesWith(preds ...predicate.StoragePolicy) predicate.Group {
return predicate.Group(func(s *sql.Selector) {
step := newStoragePoliciesStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
}
})
})
}
// And groups predicates with the AND operator between them.
func And(predicates ...predicate.Group) predicate.Group {
return predicate.Group(sql.AndPredicates(predicates...))
}
// Or groups predicates with the OR operator between them.
func Or(predicates ...predicate.Group) predicate.Group {
return predicate.Group(sql.OrPredicates(predicates...))
}
// Not applies the not operator on the given predicate.
func Not(p predicate.Group) predicate.Group {
return predicate.Group(sql.NotPredicates(p))
}

File diff suppressed because it is too large Load Diff

@ -0,0 +1,88 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"context"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/cloudreve/Cloudreve/v4/ent/group"
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
)
// GroupDelete is the builder for deleting a Group entity.
type GroupDelete struct {
config
hooks []Hook
mutation *GroupMutation
}
// Where appends a list predicates to the GroupDelete builder.
func (gd *GroupDelete) Where(ps ...predicate.Group) *GroupDelete {
gd.mutation.Where(ps...)
return gd
}
// Exec executes the deletion query and returns how many vertices were deleted.
func (gd *GroupDelete) Exec(ctx context.Context) (int, error) {
return withHooks(ctx, gd.sqlExec, gd.mutation, gd.hooks)
}
// ExecX is like Exec, but panics if an error occurs.
func (gd *GroupDelete) ExecX(ctx context.Context) int {
n, err := gd.Exec(ctx)
if err != nil {
panic(err)
}
return n
}
func (gd *GroupDelete) sqlExec(ctx context.Context) (int, error) {
_spec := sqlgraph.NewDeleteSpec(group.Table, sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt))
if ps := gd.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
affected, err := sqlgraph.DeleteNodes(ctx, gd.driver, _spec)
if err != nil && sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
gd.mutation.done = true
return affected, err
}
// GroupDeleteOne is the builder for deleting a single Group entity.
type GroupDeleteOne struct {
gd *GroupDelete
}
// Where appends a list predicates to the GroupDelete builder.
func (gdo *GroupDeleteOne) Where(ps ...predicate.Group) *GroupDeleteOne {
gdo.gd.mutation.Where(ps...)
return gdo
}
// Exec executes the deletion query.
func (gdo *GroupDeleteOne) Exec(ctx context.Context) error {
n, err := gdo.gd.Exec(ctx)
switch {
case err != nil:
return err
case n == 0:
return &NotFoundError{group.Label}
default:
return nil
}
}
// ExecX is like Exec, but panics if an error occurs.
func (gdo *GroupDeleteOne) ExecX(ctx context.Context) {
if err := gdo.Exec(ctx); err != nil {
panic(err)
}
}

@ -0,0 +1,681 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"context"
"database/sql/driver"
"fmt"
"math"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/cloudreve/Cloudreve/v4/ent/group"
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
"github.com/cloudreve/Cloudreve/v4/ent/storagepolicy"
"github.com/cloudreve/Cloudreve/v4/ent/user"
)
// GroupQuery is the builder for querying Group entities.
type GroupQuery struct {
config
ctx *QueryContext
order []group.OrderOption
inters []Interceptor
predicates []predicate.Group
withUsers *UserQuery
withStoragePolicies *StoragePolicyQuery
// intermediate query (i.e. traversal path).
sql *sql.Selector
path func(context.Context) (*sql.Selector, error)
}
// Where adds a new predicate for the GroupQuery builder.
func (gq *GroupQuery) Where(ps ...predicate.Group) *GroupQuery {
gq.predicates = append(gq.predicates, ps...)
return gq
}
// Limit the number of records to be returned by this query.
func (gq *GroupQuery) Limit(limit int) *GroupQuery {
gq.ctx.Limit = &limit
return gq
}
// Offset to start from.
func (gq *GroupQuery) Offset(offset int) *GroupQuery {
gq.ctx.Offset = &offset
return gq
}
// Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method.
func (gq *GroupQuery) Unique(unique bool) *GroupQuery {
gq.ctx.Unique = &unique
return gq
}
// Order specifies how the records should be ordered.
func (gq *GroupQuery) Order(o ...group.OrderOption) *GroupQuery {
gq.order = append(gq.order, o...)
return gq
}
// QueryUsers chains the current query on the "users" edge.
func (gq *GroupQuery) QueryUsers() *UserQuery {
query := (&UserClient{config: gq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := gq.prepareQuery(ctx); err != nil {
return nil, err
}
selector := gq.sqlQuery(ctx)
if err := selector.Err(); err != nil {
return nil, err
}
step := sqlgraph.NewStep(
sqlgraph.From(group.Table, group.FieldID, selector),
sqlgraph.To(user.Table, user.FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, group.UsersTable, group.UsersColumn),
)
fromU = sqlgraph.SetNeighbors(gq.driver.Dialect(), step)
return fromU, nil
}
return query
}
// QueryStoragePolicies chains the current query on the "storage_policies" edge.
func (gq *GroupQuery) QueryStoragePolicies() *StoragePolicyQuery {
query := (&StoragePolicyClient{config: gq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := gq.prepareQuery(ctx); err != nil {
return nil, err
}
selector := gq.sqlQuery(ctx)
if err := selector.Err(); err != nil {
return nil, err
}
step := sqlgraph.NewStep(
sqlgraph.From(group.Table, group.FieldID, selector),
sqlgraph.To(storagepolicy.Table, storagepolicy.FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, group.StoragePoliciesTable, group.StoragePoliciesColumn),
)
fromU = sqlgraph.SetNeighbors(gq.driver.Dialect(), step)
return fromU, nil
}
return query
}
// First returns the first Group entity from the query.
// Returns a *NotFoundError when no Group was found.
func (gq *GroupQuery) First(ctx context.Context) (*Group, error) {
nodes, err := gq.Limit(1).All(setContextOp(ctx, gq.ctx, "First"))
if err != nil {
return nil, err
}
if len(nodes) == 0 {
return nil, &NotFoundError{group.Label}
}
return nodes[0], nil
}
// FirstX is like First, but panics if an error occurs.
func (gq *GroupQuery) FirstX(ctx context.Context) *Group {
node, err := gq.First(ctx)
if err != nil && !IsNotFound(err) {
panic(err)
}
return node
}
// FirstID returns the first Group ID from the query.
// Returns a *NotFoundError when no Group ID was found.
func (gq *GroupQuery) FirstID(ctx context.Context) (id int, err error) {
var ids []int
if ids, err = gq.Limit(1).IDs(setContextOp(ctx, gq.ctx, "FirstID")); err != nil {
return
}
if len(ids) == 0 {
err = &NotFoundError{group.Label}
return
}
return ids[0], nil
}
// FirstIDX is like FirstID, but panics if an error occurs.
func (gq *GroupQuery) FirstIDX(ctx context.Context) int {
id, err := gq.FirstID(ctx)
if err != nil && !IsNotFound(err) {
panic(err)
}
return id
}
// Only returns a single Group entity found by the query, ensuring it only returns one.
// Returns a *NotSingularError when more than one Group entity is found.
// Returns a *NotFoundError when no Group entities are found.
func (gq *GroupQuery) Only(ctx context.Context) (*Group, error) {
nodes, err := gq.Limit(2).All(setContextOp(ctx, gq.ctx, "Only"))
if err != nil {
return nil, err
}
switch len(nodes) {
case 1:
return nodes[0], nil
case 0:
return nil, &NotFoundError{group.Label}
default:
return nil, &NotSingularError{group.Label}
}
}
// OnlyX is like Only, but panics if an error occurs.
func (gq *GroupQuery) OnlyX(ctx context.Context) *Group {
node, err := gq.Only(ctx)
if err != nil {
panic(err)
}
return node
}
// OnlyID is like Only, but returns the only Group ID in the query.
// Returns a *NotSingularError when more than one Group ID is found.
// Returns a *NotFoundError when no entities are found.
func (gq *GroupQuery) OnlyID(ctx context.Context) (id int, err error) {
var ids []int
if ids, err = gq.Limit(2).IDs(setContextOp(ctx, gq.ctx, "OnlyID")); err != nil {
return
}
switch len(ids) {
case 1:
id = ids[0]
case 0:
err = &NotFoundError{group.Label}
default:
err = &NotSingularError{group.Label}
}
return
}
// OnlyIDX is like OnlyID, but panics if an error occurs.
func (gq *GroupQuery) OnlyIDX(ctx context.Context) int {
id, err := gq.OnlyID(ctx)
if err != nil {
panic(err)
}
return id
}
// All executes the query and returns a list of Groups.
func (gq *GroupQuery) All(ctx context.Context) ([]*Group, error) {
ctx = setContextOp(ctx, gq.ctx, "All")
if err := gq.prepareQuery(ctx); err != nil {
return nil, err
}
qr := querierAll[[]*Group, *GroupQuery]()
return withInterceptors[[]*Group](ctx, gq, qr, gq.inters)
}
// AllX is like All, but panics if an error occurs.
func (gq *GroupQuery) AllX(ctx context.Context) []*Group {
nodes, err := gq.All(ctx)
if err != nil {
panic(err)
}
return nodes
}
// IDs executes the query and returns a list of Group IDs.
func (gq *GroupQuery) IDs(ctx context.Context) (ids []int, err error) {
if gq.ctx.Unique == nil && gq.path != nil {
gq.Unique(true)
}
ctx = setContextOp(ctx, gq.ctx, "IDs")
if err = gq.Select(group.FieldID).Scan(ctx, &ids); err != nil {
return nil, err
}
return ids, nil
}
// IDsX is like IDs, but panics if an error occurs.
func (gq *GroupQuery) IDsX(ctx context.Context) []int {
ids, err := gq.IDs(ctx)
if err != nil {
panic(err)
}
return ids
}
// Count returns the count of the given query.
func (gq *GroupQuery) Count(ctx context.Context) (int, error) {
ctx = setContextOp(ctx, gq.ctx, "Count")
if err := gq.prepareQuery(ctx); err != nil {
return 0, err
}
return withInterceptors[int](ctx, gq, querierCount[*GroupQuery](), gq.inters)
}
// CountX is like Count, but panics if an error occurs.
func (gq *GroupQuery) CountX(ctx context.Context) int {
count, err := gq.Count(ctx)
if err != nil {
panic(err)
}
return count
}
// Exist returns true if the query has elements in the graph.
func (gq *GroupQuery) Exist(ctx context.Context) (bool, error) {
ctx = setContextOp(ctx, gq.ctx, "Exist")
switch _, err := gq.FirstID(ctx); {
case IsNotFound(err):
return false, nil
case err != nil:
return false, fmt.Errorf("ent: check existence: %w", err)
default:
return true, nil
}
}
// ExistX is like Exist, but panics if an error occurs.
func (gq *GroupQuery) ExistX(ctx context.Context) bool {
exist, err := gq.Exist(ctx)
if err != nil {
panic(err)
}
return exist
}
// Clone returns a duplicate of the GroupQuery builder, including all associated steps. It can be
// used to prepare common query builders and use them differently after the clone is made.
func (gq *GroupQuery) Clone() *GroupQuery {
if gq == nil {
return nil
}
return &GroupQuery{
config: gq.config,
ctx: gq.ctx.Clone(),
order: append([]group.OrderOption{}, gq.order...),
inters: append([]Interceptor{}, gq.inters...),
predicates: append([]predicate.Group{}, gq.predicates...),
withUsers: gq.withUsers.Clone(),
withStoragePolicies: gq.withStoragePolicies.Clone(),
// clone intermediate query.
sql: gq.sql.Clone(),
path: gq.path,
}
}
// WithUsers tells the query-builder to eager-load the nodes that are connected to
// the "users" edge. The optional arguments are used to configure the query builder of the edge.
func (gq *GroupQuery) WithUsers(opts ...func(*UserQuery)) *GroupQuery {
query := (&UserClient{config: gq.config}).Query()
for _, opt := range opts {
opt(query)
}
gq.withUsers = query
return gq
}
// WithStoragePolicies tells the query-builder to eager-load the nodes that are connected to
// the "storage_policies" edge. The optional arguments are used to configure the query builder of the edge.
func (gq *GroupQuery) WithStoragePolicies(opts ...func(*StoragePolicyQuery)) *GroupQuery {
query := (&StoragePolicyClient{config: gq.config}).Query()
for _, opt := range opts {
opt(query)
}
gq.withStoragePolicies = query
return gq
}
// GroupBy is used to group vertices by one or more fields/columns.
// It is often used with aggregate functions, like: count, max, mean, min, sum.
//
// Example:
//
// var v []struct {
// CreatedAt time.Time `json:"created_at,omitempty"`
// Count int `json:"count,omitempty"`
// }
//
// client.Group.Query().
// GroupBy(group.FieldCreatedAt).
// Aggregate(ent.Count()).
// Scan(ctx, &v)
func (gq *GroupQuery) GroupBy(field string, fields ...string) *GroupGroupBy {
gq.ctx.Fields = append([]string{field}, fields...)
grbuild := &GroupGroupBy{build: gq}
grbuild.flds = &gq.ctx.Fields
grbuild.label = group.Label
grbuild.scan = grbuild.Scan
return grbuild
}
// Select allows the selection one or more fields/columns for the given query,
// instead of selecting all fields in the entity.
//
// Example:
//
// var v []struct {
// CreatedAt time.Time `json:"created_at,omitempty"`
// }
//
// client.Group.Query().
// Select(group.FieldCreatedAt).
// Scan(ctx, &v)
func (gq *GroupQuery) Select(fields ...string) *GroupSelect {
gq.ctx.Fields = append(gq.ctx.Fields, fields...)
sbuild := &GroupSelect{GroupQuery: gq}
sbuild.label = group.Label
sbuild.flds, sbuild.scan = &gq.ctx.Fields, sbuild.Scan
return sbuild
}
// Aggregate returns a GroupSelect configured with the given aggregations.
func (gq *GroupQuery) Aggregate(fns ...AggregateFunc) *GroupSelect {
return gq.Select().Aggregate(fns...)
}
func (gq *GroupQuery) prepareQuery(ctx context.Context) error {
for _, inter := range gq.inters {
if inter == nil {
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
}
if trv, ok := inter.(Traverser); ok {
if err := trv.Traverse(ctx, gq); err != nil {
return err
}
}
}
for _, f := range gq.ctx.Fields {
if !group.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
}
}
if gq.path != nil {
prev, err := gq.path(ctx)
if err != nil {
return err
}
gq.sql = prev
}
return nil
}
func (gq *GroupQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Group, error) {
var (
nodes = []*Group{}
_spec = gq.querySpec()
loadedTypes = [2]bool{
gq.withUsers != nil,
gq.withStoragePolicies != nil,
}
)
_spec.ScanValues = func(columns []string) ([]any, error) {
return (*Group).scanValues(nil, columns)
}
_spec.Assign = func(columns []string, values []any) error {
node := &Group{config: gq.config}
nodes = append(nodes, node)
node.Edges.loadedTypes = loadedTypes
return node.assignValues(columns, values)
}
for i := range hooks {
hooks[i](ctx, _spec)
}
if err := sqlgraph.QueryNodes(ctx, gq.driver, _spec); err != nil {
return nil, err
}
if len(nodes) == 0 {
return nodes, nil
}
if query := gq.withUsers; query != nil {
if err := gq.loadUsers(ctx, query, nodes,
func(n *Group) { n.Edges.Users = []*User{} },
func(n *Group, e *User) { n.Edges.Users = append(n.Edges.Users, e) }); err != nil {
return nil, err
}
}
if query := gq.withStoragePolicies; query != nil {
if err := gq.loadStoragePolicies(ctx, query, nodes, nil,
func(n *Group, e *StoragePolicy) { n.Edges.StoragePolicies = e }); err != nil {
return nil, err
}
}
return nodes, nil
}
func (gq *GroupQuery) loadUsers(ctx context.Context, query *UserQuery, nodes []*Group, init func(*Group), assign func(*Group, *User)) error {
fks := make([]driver.Value, 0, len(nodes))
nodeids := make(map[int]*Group)
for i := range nodes {
fks = append(fks, nodes[i].ID)
nodeids[nodes[i].ID] = nodes[i]
if init != nil {
init(nodes[i])
}
}
query.withFKs = true
if len(query.ctx.Fields) > 0 {
query.ctx.AppendFieldOnce(user.FieldGroupUsers)
}
query.Where(predicate.User(func(s *sql.Selector) {
s.Where(sql.InValues(s.C(group.UsersColumn), fks...))
}))
neighbors, err := query.All(ctx)
if err != nil {
return err
}
for _, n := range neighbors {
fk := n.GroupUsers
node, ok := nodeids[fk]
if !ok {
return fmt.Errorf(`unexpected referenced foreign-key "group_users" returned %v for node %v`, fk, n.ID)
}
assign(node, n)
}
return nil
}
func (gq *GroupQuery) loadStoragePolicies(ctx context.Context, query *StoragePolicyQuery, nodes []*Group, init func(*Group), assign func(*Group, *StoragePolicy)) error {
ids := make([]int, 0, len(nodes))
nodeids := make(map[int][]*Group)
for i := range nodes {
fk := nodes[i].StoragePolicyID
if _, ok := nodeids[fk]; !ok {
ids = append(ids, fk)
}
nodeids[fk] = append(nodeids[fk], nodes[i])
}
if len(ids) == 0 {
return nil
}
query.Where(storagepolicy.IDIn(ids...))
neighbors, err := query.All(ctx)
if err != nil {
return err
}
for _, n := range neighbors {
nodes, ok := nodeids[n.ID]
if !ok {
return fmt.Errorf(`unexpected foreign-key "storage_policy_id" returned %v`, n.ID)
}
for i := range nodes {
assign(nodes[i], n)
}
}
return nil
}
func (gq *GroupQuery) sqlCount(ctx context.Context) (int, error) {
_spec := gq.querySpec()
_spec.Node.Columns = gq.ctx.Fields
if len(gq.ctx.Fields) > 0 {
_spec.Unique = gq.ctx.Unique != nil && *gq.ctx.Unique
}
return sqlgraph.CountNodes(ctx, gq.driver, _spec)
}
func (gq *GroupQuery) querySpec() *sqlgraph.QuerySpec {
_spec := sqlgraph.NewQuerySpec(group.Table, group.Columns, sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt))
_spec.From = gq.sql
if unique := gq.ctx.Unique; unique != nil {
_spec.Unique = *unique
} else if gq.path != nil {
_spec.Unique = true
}
if fields := gq.ctx.Fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, group.FieldID)
for i := range fields {
if fields[i] != group.FieldID {
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
}
}
if gq.withStoragePolicies != nil {
_spec.Node.AddColumnOnce(group.FieldStoragePolicyID)
}
}
if ps := gq.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
if limit := gq.ctx.Limit; limit != nil {
_spec.Limit = *limit
}
if offset := gq.ctx.Offset; offset != nil {
_spec.Offset = *offset
}
if ps := gq.order; len(ps) > 0 {
_spec.Order = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
return _spec
}
func (gq *GroupQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(gq.driver.Dialect())
t1 := builder.Table(group.Table)
columns := gq.ctx.Fields
if len(columns) == 0 {
columns = group.Columns
}
selector := builder.Select(t1.Columns(columns...)...).From(t1)
if gq.sql != nil {
selector = gq.sql
selector.Select(selector.Columns(columns...)...)
}
if gq.ctx.Unique != nil && *gq.ctx.Unique {
selector.Distinct()
}
for _, p := range gq.predicates {
p(selector)
}
for _, p := range gq.order {
p(selector)
}
if offset := gq.ctx.Offset; offset != nil {
// limit is mandatory for offset clause. We start
// with default value, and override it below if needed.
selector.Offset(*offset).Limit(math.MaxInt32)
}
if limit := gq.ctx.Limit; limit != nil {
selector.Limit(*limit)
}
return selector
}
// GroupGroupBy is the group-by builder for Group entities.
type GroupGroupBy struct {
selector
build *GroupQuery
}
// Aggregate adds the given aggregation functions to the group-by query.
func (ggb *GroupGroupBy) Aggregate(fns ...AggregateFunc) *GroupGroupBy {
ggb.fns = append(ggb.fns, fns...)
return ggb
}
// Scan applies the selector query and scans the result into the given value.
func (ggb *GroupGroupBy) Scan(ctx context.Context, v any) error {
ctx = setContextOp(ctx, ggb.build.ctx, "GroupBy")
if err := ggb.build.prepareQuery(ctx); err != nil {
return err
}
return scanWithInterceptors[*GroupQuery, *GroupGroupBy](ctx, ggb.build, ggb, ggb.build.inters, v)
}
func (ggb *GroupGroupBy) sqlScan(ctx context.Context, root *GroupQuery, v any) error {
selector := root.sqlQuery(ctx).Select()
aggregation := make([]string, 0, len(ggb.fns))
for _, fn := range ggb.fns {
aggregation = append(aggregation, fn(selector))
}
if len(selector.SelectedColumns()) == 0 {
columns := make([]string, 0, len(*ggb.flds)+len(ggb.fns))
for _, f := range *ggb.flds {
columns = append(columns, selector.C(f))
}
columns = append(columns, aggregation...)
selector.Select(columns...)
}
selector.GroupBy(selector.Columns(*ggb.flds...)...)
if err := selector.Err(); err != nil {
return err
}
rows := &sql.Rows{}
query, args := selector.Query()
if err := ggb.build.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}
// GroupSelect is the builder for selecting fields of Group entities.
type GroupSelect struct {
*GroupQuery
selector
}
// Aggregate adds the given aggregation functions to the selector query.
func (gs *GroupSelect) Aggregate(fns ...AggregateFunc) *GroupSelect {
gs.fns = append(gs.fns, fns...)
return gs
}
// Scan applies the selector query and scans the result into the given value.
func (gs *GroupSelect) Scan(ctx context.Context, v any) error {
ctx = setContextOp(ctx, gs.ctx, "Select")
if err := gs.prepareQuery(ctx); err != nil {
return err
}
return scanWithInterceptors[*GroupQuery, *GroupSelect](ctx, gs.GroupQuery, gs, gs.inters, v)
}
func (gs *GroupSelect) sqlScan(ctx context.Context, root *GroupQuery, v any) error {
selector := root.sqlQuery(ctx)
aggregation := make([]string, 0, len(gs.fns))
for _, fn := range gs.fns {
aggregation = append(aggregation, fn(selector))
}
switch n := len(*gs.selector.flds); {
case n == 0 && len(aggregation) > 0:
selector.Select(aggregation...)
case n != 0 && len(aggregation) > 0:
selector.AppendSelect(aggregation...)
}
rows := &sql.Rows{}
query, args := selector.Query()
if err := gs.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}

@ -0,0 +1,822 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"context"
"errors"
"fmt"
"time"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/cloudreve/Cloudreve/v4/ent/group"
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
"github.com/cloudreve/Cloudreve/v4/ent/storagepolicy"
"github.com/cloudreve/Cloudreve/v4/ent/user"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
)
// GroupUpdate is the builder for updating Group entities.
type GroupUpdate struct {
config
hooks []Hook
mutation *GroupMutation
}
// Where appends a list predicates to the GroupUpdate builder.
func (gu *GroupUpdate) Where(ps ...predicate.Group) *GroupUpdate {
gu.mutation.Where(ps...)
return gu
}
// SetUpdatedAt sets the "updated_at" field.
func (gu *GroupUpdate) SetUpdatedAt(t time.Time) *GroupUpdate {
gu.mutation.SetUpdatedAt(t)
return gu
}
// SetDeletedAt sets the "deleted_at" field.
func (gu *GroupUpdate) SetDeletedAt(t time.Time) *GroupUpdate {
gu.mutation.SetDeletedAt(t)
return gu
}
// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil.
func (gu *GroupUpdate) SetNillableDeletedAt(t *time.Time) *GroupUpdate {
if t != nil {
gu.SetDeletedAt(*t)
}
return gu
}
// ClearDeletedAt clears the value of the "deleted_at" field.
func (gu *GroupUpdate) ClearDeletedAt() *GroupUpdate {
gu.mutation.ClearDeletedAt()
return gu
}
// SetName sets the "name" field.
func (gu *GroupUpdate) SetName(s string) *GroupUpdate {
gu.mutation.SetName(s)
return gu
}
// SetNillableName sets the "name" field if the given value is not nil.
func (gu *GroupUpdate) SetNillableName(s *string) *GroupUpdate {
if s != nil {
gu.SetName(*s)
}
return gu
}
// SetMaxStorage sets the "max_storage" field.
func (gu *GroupUpdate) SetMaxStorage(i int64) *GroupUpdate {
gu.mutation.ResetMaxStorage()
gu.mutation.SetMaxStorage(i)
return gu
}
// SetNillableMaxStorage sets the "max_storage" field if the given value is not nil.
func (gu *GroupUpdate) SetNillableMaxStorage(i *int64) *GroupUpdate {
if i != nil {
gu.SetMaxStorage(*i)
}
return gu
}
// AddMaxStorage adds i to the "max_storage" field.
func (gu *GroupUpdate) AddMaxStorage(i int64) *GroupUpdate {
gu.mutation.AddMaxStorage(i)
return gu
}
// ClearMaxStorage clears the value of the "max_storage" field.
func (gu *GroupUpdate) ClearMaxStorage() *GroupUpdate {
gu.mutation.ClearMaxStorage()
return gu
}
// SetSpeedLimit sets the "speed_limit" field.
func (gu *GroupUpdate) SetSpeedLimit(i int) *GroupUpdate {
gu.mutation.ResetSpeedLimit()
gu.mutation.SetSpeedLimit(i)
return gu
}
// SetNillableSpeedLimit sets the "speed_limit" field if the given value is not nil.
func (gu *GroupUpdate) SetNillableSpeedLimit(i *int) *GroupUpdate {
if i != nil {
gu.SetSpeedLimit(*i)
}
return gu
}
// AddSpeedLimit adds i to the "speed_limit" field.
func (gu *GroupUpdate) AddSpeedLimit(i int) *GroupUpdate {
gu.mutation.AddSpeedLimit(i)
return gu
}
// ClearSpeedLimit clears the value of the "speed_limit" field.
func (gu *GroupUpdate) ClearSpeedLimit() *GroupUpdate {
gu.mutation.ClearSpeedLimit()
return gu
}
// SetPermissions sets the "permissions" field.
func (gu *GroupUpdate) SetPermissions(bs *boolset.BooleanSet) *GroupUpdate {
gu.mutation.SetPermissions(bs)
return gu
}
// SetSettings sets the "settings" field.
func (gu *GroupUpdate) SetSettings(ts *types.GroupSetting) *GroupUpdate {
gu.mutation.SetSettings(ts)
return gu
}
// ClearSettings clears the value of the "settings" field.
func (gu *GroupUpdate) ClearSettings() *GroupUpdate {
gu.mutation.ClearSettings()
return gu
}
// SetStoragePolicyID sets the "storage_policy_id" field.
func (gu *GroupUpdate) SetStoragePolicyID(i int) *GroupUpdate {
gu.mutation.SetStoragePolicyID(i)
return gu
}
// SetNillableStoragePolicyID sets the "storage_policy_id" field if the given value is not nil.
func (gu *GroupUpdate) SetNillableStoragePolicyID(i *int) *GroupUpdate {
if i != nil {
gu.SetStoragePolicyID(*i)
}
return gu
}
// ClearStoragePolicyID clears the value of the "storage_policy_id" field.
func (gu *GroupUpdate) ClearStoragePolicyID() *GroupUpdate {
gu.mutation.ClearStoragePolicyID()
return gu
}
// AddUserIDs adds the "users" edge to the User entity by IDs.
func (gu *GroupUpdate) AddUserIDs(ids ...int) *GroupUpdate {
gu.mutation.AddUserIDs(ids...)
return gu
}
// AddUsers adds the "users" edges to the User entity.
func (gu *GroupUpdate) AddUsers(u ...*User) *GroupUpdate {
ids := make([]int, len(u))
for i := range u {
ids[i] = u[i].ID
}
return gu.AddUserIDs(ids...)
}
// SetStoragePoliciesID sets the "storage_policies" edge to the StoragePolicy entity by ID.
func (gu *GroupUpdate) SetStoragePoliciesID(id int) *GroupUpdate {
gu.mutation.SetStoragePoliciesID(id)
return gu
}
// SetNillableStoragePoliciesID sets the "storage_policies" edge to the StoragePolicy entity by ID if the given value is not nil.
func (gu *GroupUpdate) SetNillableStoragePoliciesID(id *int) *GroupUpdate {
if id != nil {
gu = gu.SetStoragePoliciesID(*id)
}
return gu
}
// SetStoragePolicies sets the "storage_policies" edge to the StoragePolicy entity.
func (gu *GroupUpdate) SetStoragePolicies(s *StoragePolicy) *GroupUpdate {
return gu.SetStoragePoliciesID(s.ID)
}
// Mutation returns the GroupMutation object of the builder.
func (gu *GroupUpdate) Mutation() *GroupMutation {
return gu.mutation
}
// ClearUsers clears all "users" edges to the User entity.
func (gu *GroupUpdate) ClearUsers() *GroupUpdate {
gu.mutation.ClearUsers()
return gu
}
// RemoveUserIDs removes the "users" edge to User entities by IDs.
func (gu *GroupUpdate) RemoveUserIDs(ids ...int) *GroupUpdate {
gu.mutation.RemoveUserIDs(ids...)
return gu
}
// RemoveUsers removes "users" edges to User entities.
func (gu *GroupUpdate) RemoveUsers(u ...*User) *GroupUpdate {
ids := make([]int, len(u))
for i := range u {
ids[i] = u[i].ID
}
return gu.RemoveUserIDs(ids...)
}
// ClearStoragePolicies clears the "storage_policies" edge to the StoragePolicy entity.
func (gu *GroupUpdate) ClearStoragePolicies() *GroupUpdate {
gu.mutation.ClearStoragePolicies()
return gu
}
// Save executes the query and returns the number of nodes affected by the update operation.
func (gu *GroupUpdate) Save(ctx context.Context) (int, error) {
if err := gu.defaults(); err != nil {
return 0, err
}
return withHooks(ctx, gu.sqlSave, gu.mutation, gu.hooks)
}
// SaveX is like Save, but panics if an error occurs.
func (gu *GroupUpdate) SaveX(ctx context.Context) int {
affected, err := gu.Save(ctx)
if err != nil {
panic(err)
}
return affected
}
// Exec executes the query.
func (gu *GroupUpdate) Exec(ctx context.Context) error {
_, err := gu.Save(ctx)
return err
}
// ExecX is like Exec, but panics if an error occurs.
func (gu *GroupUpdate) ExecX(ctx context.Context) {
if err := gu.Exec(ctx); err != nil {
panic(err)
}
}
// defaults sets the default values of the builder before save.
func (gu *GroupUpdate) defaults() error {
if _, ok := gu.mutation.UpdatedAt(); !ok {
if group.UpdateDefaultUpdatedAt == nil {
return fmt.Errorf("ent: uninitialized group.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)")
}
v := group.UpdateDefaultUpdatedAt()
gu.mutation.SetUpdatedAt(v)
}
return nil
}
func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
_spec := sqlgraph.NewUpdateSpec(group.Table, group.Columns, sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt))
if ps := gu.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
if value, ok := gu.mutation.UpdatedAt(); ok {
_spec.SetField(group.FieldUpdatedAt, field.TypeTime, value)
}
if value, ok := gu.mutation.DeletedAt(); ok {
_spec.SetField(group.FieldDeletedAt, field.TypeTime, value)
}
if gu.mutation.DeletedAtCleared() {
_spec.ClearField(group.FieldDeletedAt, field.TypeTime)
}
if value, ok := gu.mutation.Name(); ok {
_spec.SetField(group.FieldName, field.TypeString, value)
}
if value, ok := gu.mutation.MaxStorage(); ok {
_spec.SetField(group.FieldMaxStorage, field.TypeInt64, value)
}
if value, ok := gu.mutation.AddedMaxStorage(); ok {
_spec.AddField(group.FieldMaxStorage, field.TypeInt64, value)
}
if gu.mutation.MaxStorageCleared() {
_spec.ClearField(group.FieldMaxStorage, field.TypeInt64)
}
if value, ok := gu.mutation.SpeedLimit(); ok {
_spec.SetField(group.FieldSpeedLimit, field.TypeInt, value)
}
if value, ok := gu.mutation.AddedSpeedLimit(); ok {
_spec.AddField(group.FieldSpeedLimit, field.TypeInt, value)
}
if gu.mutation.SpeedLimitCleared() {
_spec.ClearField(group.FieldSpeedLimit, field.TypeInt)
}
if value, ok := gu.mutation.Permissions(); ok {
_spec.SetField(group.FieldPermissions, field.TypeBytes, value)
}
if value, ok := gu.mutation.Settings(); ok {
_spec.SetField(group.FieldSettings, field.TypeJSON, value)
}
if gu.mutation.SettingsCleared() {
_spec.ClearField(group.FieldSettings, field.TypeJSON)
}
if gu.mutation.UsersCleared() {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,
Inverse: false,
Table: group.UsersTable,
Columns: []string{group.UsersColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
}
if nodes := gu.mutation.RemovedUsersIDs(); len(nodes) > 0 && !gu.mutation.UsersCleared() {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,
Inverse: false,
Table: group.UsersTable,
Columns: []string{group.UsersColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
}
if nodes := gu.mutation.UsersIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,
Inverse: false,
Table: group.UsersTable,
Columns: []string{group.UsersColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges.Add = append(_spec.Edges.Add, edge)
}
if gu.mutation.StoragePoliciesCleared() {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
Inverse: true,
Table: group.StoragePoliciesTable,
Columns: []string{group.StoragePoliciesColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(storagepolicy.FieldID, field.TypeInt),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
}
if nodes := gu.mutation.StoragePoliciesIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
Inverse: true,
Table: group.StoragePoliciesTable,
Columns: []string{group.StoragePoliciesColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(storagepolicy.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges.Add = append(_spec.Edges.Add, edge)
}
if n, err = sqlgraph.UpdateNodes(ctx, gu.driver, _spec); err != nil {
if _, ok := err.(*sqlgraph.NotFoundError); ok {
err = &NotFoundError{group.Label}
} else if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
return 0, err
}
gu.mutation.done = true
return n, nil
}
// GroupUpdateOne is the builder for updating a single Group entity.
type GroupUpdateOne struct {
config
fields []string
hooks []Hook
mutation *GroupMutation
}
// SetUpdatedAt sets the "updated_at" field.
func (guo *GroupUpdateOne) SetUpdatedAt(t time.Time) *GroupUpdateOne {
guo.mutation.SetUpdatedAt(t)
return guo
}
// SetDeletedAt sets the "deleted_at" field.
func (guo *GroupUpdateOne) SetDeletedAt(t time.Time) *GroupUpdateOne {
guo.mutation.SetDeletedAt(t)
return guo
}
// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil.
func (guo *GroupUpdateOne) SetNillableDeletedAt(t *time.Time) *GroupUpdateOne {
if t != nil {
guo.SetDeletedAt(*t)
}
return guo
}
// ClearDeletedAt clears the value of the "deleted_at" field.
func (guo *GroupUpdateOne) ClearDeletedAt() *GroupUpdateOne {
guo.mutation.ClearDeletedAt()
return guo
}
// SetName sets the "name" field.
func (guo *GroupUpdateOne) SetName(s string) *GroupUpdateOne {
guo.mutation.SetName(s)
return guo
}
// SetNillableName sets the "name" field if the given value is not nil.
func (guo *GroupUpdateOne) SetNillableName(s *string) *GroupUpdateOne {
if s != nil {
guo.SetName(*s)
}
return guo
}
// SetMaxStorage sets the "max_storage" field.
func (guo *GroupUpdateOne) SetMaxStorage(i int64) *GroupUpdateOne {
guo.mutation.ResetMaxStorage()
guo.mutation.SetMaxStorage(i)
return guo
}
// SetNillableMaxStorage sets the "max_storage" field if the given value is not nil.
func (guo *GroupUpdateOne) SetNillableMaxStorage(i *int64) *GroupUpdateOne {
if i != nil {
guo.SetMaxStorage(*i)
}
return guo
}
// AddMaxStorage adds i to the "max_storage" field.
func (guo *GroupUpdateOne) AddMaxStorage(i int64) *GroupUpdateOne {
guo.mutation.AddMaxStorage(i)
return guo
}
// ClearMaxStorage clears the value of the "max_storage" field.
func (guo *GroupUpdateOne) ClearMaxStorage() *GroupUpdateOne {
guo.mutation.ClearMaxStorage()
return guo
}
// SetSpeedLimit sets the "speed_limit" field.
func (guo *GroupUpdateOne) SetSpeedLimit(i int) *GroupUpdateOne {
guo.mutation.ResetSpeedLimit()
guo.mutation.SetSpeedLimit(i)
return guo
}
// SetNillableSpeedLimit sets the "speed_limit" field if the given value is not nil.
func (guo *GroupUpdateOne) SetNillableSpeedLimit(i *int) *GroupUpdateOne {
if i != nil {
guo.SetSpeedLimit(*i)
}
return guo
}
// AddSpeedLimit adds i to the "speed_limit" field.
func (guo *GroupUpdateOne) AddSpeedLimit(i int) *GroupUpdateOne {
guo.mutation.AddSpeedLimit(i)
return guo
}
// ClearSpeedLimit clears the value of the "speed_limit" field.
func (guo *GroupUpdateOne) ClearSpeedLimit() *GroupUpdateOne {
guo.mutation.ClearSpeedLimit()
return guo
}
// SetPermissions sets the "permissions" field.
func (guo *GroupUpdateOne) SetPermissions(bs *boolset.BooleanSet) *GroupUpdateOne {
guo.mutation.SetPermissions(bs)
return guo
}
// SetSettings sets the "settings" field.
func (guo *GroupUpdateOne) SetSettings(ts *types.GroupSetting) *GroupUpdateOne {
guo.mutation.SetSettings(ts)
return guo
}
// ClearSettings clears the value of the "settings" field.
func (guo *GroupUpdateOne) ClearSettings() *GroupUpdateOne {
guo.mutation.ClearSettings()
return guo
}
// SetStoragePolicyID sets the "storage_policy_id" field.
func (guo *GroupUpdateOne) SetStoragePolicyID(i int) *GroupUpdateOne {
guo.mutation.SetStoragePolicyID(i)
return guo
}
// SetNillableStoragePolicyID sets the "storage_policy_id" field if the given value is not nil.
func (guo *GroupUpdateOne) SetNillableStoragePolicyID(i *int) *GroupUpdateOne {
if i != nil {
guo.SetStoragePolicyID(*i)
}
return guo
}
// ClearStoragePolicyID clears the value of the "storage_policy_id" field.
func (guo *GroupUpdateOne) ClearStoragePolicyID() *GroupUpdateOne {
guo.mutation.ClearStoragePolicyID()
return guo
}
// AddUserIDs adds the "users" edge to the User entity by IDs.
func (guo *GroupUpdateOne) AddUserIDs(ids ...int) *GroupUpdateOne {
guo.mutation.AddUserIDs(ids...)
return guo
}
// AddUsers adds the "users" edges to the User entity.
func (guo *GroupUpdateOne) AddUsers(u ...*User) *GroupUpdateOne {
ids := make([]int, len(u))
for i := range u {
ids[i] = u[i].ID
}
return guo.AddUserIDs(ids...)
}
// SetStoragePoliciesID sets the "storage_policies" edge to the StoragePolicy entity by ID.
func (guo *GroupUpdateOne) SetStoragePoliciesID(id int) *GroupUpdateOne {
guo.mutation.SetStoragePoliciesID(id)
return guo
}
// SetNillableStoragePoliciesID sets the "storage_policies" edge to the StoragePolicy entity by ID if the given value is not nil.
func (guo *GroupUpdateOne) SetNillableStoragePoliciesID(id *int) *GroupUpdateOne {
if id != nil {
guo = guo.SetStoragePoliciesID(*id)
}
return guo
}
// SetStoragePolicies sets the "storage_policies" edge to the StoragePolicy entity.
func (guo *GroupUpdateOne) SetStoragePolicies(s *StoragePolicy) *GroupUpdateOne {
return guo.SetStoragePoliciesID(s.ID)
}
// Mutation returns the GroupMutation object of the builder.
func (guo *GroupUpdateOne) Mutation() *GroupMutation {
return guo.mutation
}
// ClearUsers clears all "users" edges to the User entity.
func (guo *GroupUpdateOne) ClearUsers() *GroupUpdateOne {
guo.mutation.ClearUsers()
return guo
}
// RemoveUserIDs removes the "users" edge to User entities by IDs.
func (guo *GroupUpdateOne) RemoveUserIDs(ids ...int) *GroupUpdateOne {
guo.mutation.RemoveUserIDs(ids...)
return guo
}
// RemoveUsers removes "users" edges to User entities.
func (guo *GroupUpdateOne) RemoveUsers(u ...*User) *GroupUpdateOne {
ids := make([]int, len(u))
for i := range u {
ids[i] = u[i].ID
}
return guo.RemoveUserIDs(ids...)
}
// ClearStoragePolicies clears the "storage_policies" edge to the StoragePolicy entity.
func (guo *GroupUpdateOne) ClearStoragePolicies() *GroupUpdateOne {
guo.mutation.ClearStoragePolicies()
return guo
}
// Where appends a list predicates to the GroupUpdate builder.
func (guo *GroupUpdateOne) Where(ps ...predicate.Group) *GroupUpdateOne {
guo.mutation.Where(ps...)
return guo
}
// Select allows selecting one or more fields (columns) of the returned entity.
// The default is selecting all fields defined in the entity schema.
func (guo *GroupUpdateOne) Select(field string, fields ...string) *GroupUpdateOne {
guo.fields = append([]string{field}, fields...)
return guo
}
// Save executes the query and returns the updated Group entity.
func (guo *GroupUpdateOne) Save(ctx context.Context) (*Group, error) {
if err := guo.defaults(); err != nil {
return nil, err
}
return withHooks(ctx, guo.sqlSave, guo.mutation, guo.hooks)
}
// SaveX is like Save, but panics if an error occurs.
func (guo *GroupUpdateOne) SaveX(ctx context.Context) *Group {
node, err := guo.Save(ctx)
if err != nil {
panic(err)
}
return node
}
// Exec executes the query on the entity.
func (guo *GroupUpdateOne) Exec(ctx context.Context) error {
_, err := guo.Save(ctx)
return err
}
// ExecX is like Exec, but panics if an error occurs.
func (guo *GroupUpdateOne) ExecX(ctx context.Context) {
if err := guo.Exec(ctx); err != nil {
panic(err)
}
}
// defaults sets the default values of the builder before save.
func (guo *GroupUpdateOne) defaults() error {
if _, ok := guo.mutation.UpdatedAt(); !ok {
if group.UpdateDefaultUpdatedAt == nil {
return fmt.Errorf("ent: uninitialized group.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)")
}
v := group.UpdateDefaultUpdatedAt()
guo.mutation.SetUpdatedAt(v)
}
return nil
}
func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error) {
_spec := sqlgraph.NewUpdateSpec(group.Table, group.Columns, sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt))
id, ok := guo.mutation.ID()
if !ok {
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Group.id" for update`)}
}
_spec.Node.ID.Value = id
if fields := guo.fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, group.FieldID)
for _, f := range fields {
if !group.ValidColumn(f) {
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
}
if f != group.FieldID {
_spec.Node.Columns = append(_spec.Node.Columns, f)
}
}
}
if ps := guo.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
if value, ok := guo.mutation.UpdatedAt(); ok {
_spec.SetField(group.FieldUpdatedAt, field.TypeTime, value)
}
if value, ok := guo.mutation.DeletedAt(); ok {
_spec.SetField(group.FieldDeletedAt, field.TypeTime, value)
}
if guo.mutation.DeletedAtCleared() {
_spec.ClearField(group.FieldDeletedAt, field.TypeTime)
}
if value, ok := guo.mutation.Name(); ok {
_spec.SetField(group.FieldName, field.TypeString, value)
}
if value, ok := guo.mutation.MaxStorage(); ok {
_spec.SetField(group.FieldMaxStorage, field.TypeInt64, value)
}
if value, ok := guo.mutation.AddedMaxStorage(); ok {
_spec.AddField(group.FieldMaxStorage, field.TypeInt64, value)
}
if guo.mutation.MaxStorageCleared() {
_spec.ClearField(group.FieldMaxStorage, field.TypeInt64)
}
if value, ok := guo.mutation.SpeedLimit(); ok {
_spec.SetField(group.FieldSpeedLimit, field.TypeInt, value)
}
if value, ok := guo.mutation.AddedSpeedLimit(); ok {
_spec.AddField(group.FieldSpeedLimit, field.TypeInt, value)
}
if guo.mutation.SpeedLimitCleared() {
_spec.ClearField(group.FieldSpeedLimit, field.TypeInt)
}
if value, ok := guo.mutation.Permissions(); ok {
_spec.SetField(group.FieldPermissions, field.TypeBytes, value)
}
if value, ok := guo.mutation.Settings(); ok {
_spec.SetField(group.FieldSettings, field.TypeJSON, value)
}
if guo.mutation.SettingsCleared() {
_spec.ClearField(group.FieldSettings, field.TypeJSON)
}
if guo.mutation.UsersCleared() {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,
Inverse: false,
Table: group.UsersTable,
Columns: []string{group.UsersColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
}
if nodes := guo.mutation.RemovedUsersIDs(); len(nodes) > 0 && !guo.mutation.UsersCleared() {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,
Inverse: false,
Table: group.UsersTable,
Columns: []string{group.UsersColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
}
if nodes := guo.mutation.UsersIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,
Inverse: false,
Table: group.UsersTable,
Columns: []string{group.UsersColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges.Add = append(_spec.Edges.Add, edge)
}
if guo.mutation.StoragePoliciesCleared() {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
Inverse: true,
Table: group.StoragePoliciesTable,
Columns: []string{group.StoragePoliciesColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(storagepolicy.FieldID, field.TypeInt),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
}
if nodes := guo.mutation.StoragePoliciesIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
Inverse: true,
Table: group.StoragePoliciesTable,
Columns: []string{group.StoragePoliciesColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(storagepolicy.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges.Add = append(_spec.Edges.Add, edge)
}
_node = &Group{config: guo.config}
_spec.Assign = _node.assignValues
_spec.ScanValues = _node.scanValues
if err = sqlgraph.UpdateNode(ctx, guo.driver, _spec); err != nil {
if _, ok := err.(*sqlgraph.NotFoundError); ok {
err = &NotFoundError{group.Label}
} else if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
return nil, err
}
guo.mutation.done = true
return _node, nil
}

@ -0,0 +1,343 @@
// Code generated by ent, DO NOT EDIT.
package hook
import (
"context"
"fmt"
"github.com/cloudreve/Cloudreve/v4/ent"
)
// The DavAccountFunc type is an adapter to allow the use of ordinary
// function as DavAccount mutator.
type DavAccountFunc func(context.Context, *ent.DavAccountMutation) (ent.Value, error)
// Mutate calls f(ctx, m).
func (f DavAccountFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
if mv, ok := m.(*ent.DavAccountMutation); ok {
return f(ctx, mv)
}
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.DavAccountMutation", m)
}
// The DirectLinkFunc type is an adapter to allow the use of ordinary
// function as DirectLink mutator.
type DirectLinkFunc func(context.Context, *ent.DirectLinkMutation) (ent.Value, error)
// Mutate calls f(ctx, m).
func (f DirectLinkFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
if mv, ok := m.(*ent.DirectLinkMutation); ok {
return f(ctx, mv)
}
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.DirectLinkMutation", m)
}
// The EntityFunc type is an adapter to allow the use of ordinary
// function as Entity mutator.
type EntityFunc func(context.Context, *ent.EntityMutation) (ent.Value, error)
// Mutate calls f(ctx, m).
func (f EntityFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
if mv, ok := m.(*ent.EntityMutation); ok {
return f(ctx, mv)
}
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.EntityMutation", m)
}
// The FileFunc type is an adapter to allow the use of ordinary
// function as File mutator.
type FileFunc func(context.Context, *ent.FileMutation) (ent.Value, error)
// Mutate calls f(ctx, m).
func (f FileFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
if mv, ok := m.(*ent.FileMutation); ok {
return f(ctx, mv)
}
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.FileMutation", m)
}
// The GroupFunc type is an adapter to allow the use of ordinary
// function as Group mutator.
type GroupFunc func(context.Context, *ent.GroupMutation) (ent.Value, error)
// Mutate calls f(ctx, m).
func (f GroupFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
if mv, ok := m.(*ent.GroupMutation); ok {
return f(ctx, mv)
}
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.GroupMutation", m)
}
// The MetadataFunc type is an adapter to allow the use of ordinary
// function as Metadata mutator.
type MetadataFunc func(context.Context, *ent.MetadataMutation) (ent.Value, error)
// Mutate calls f(ctx, m).
func (f MetadataFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
if mv, ok := m.(*ent.MetadataMutation); ok {
return f(ctx, mv)
}
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.MetadataMutation", m)
}
// The NodeFunc type is an adapter to allow the use of ordinary
// function as Node mutator.
type NodeFunc func(context.Context, *ent.NodeMutation) (ent.Value, error)
// Mutate calls f(ctx, m).
func (f NodeFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
if mv, ok := m.(*ent.NodeMutation); ok {
return f(ctx, mv)
}
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.NodeMutation", m)
}
// The PasskeyFunc type is an adapter to allow the use of ordinary
// function as Passkey mutator.
type PasskeyFunc func(context.Context, *ent.PasskeyMutation) (ent.Value, error)
// Mutate calls f(ctx, m).
func (f PasskeyFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
if mv, ok := m.(*ent.PasskeyMutation); ok {
return f(ctx, mv)
}
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.PasskeyMutation", m)
}
// The SettingFunc type is an adapter to allow the use of ordinary
// function as Setting mutator.
type SettingFunc func(context.Context, *ent.SettingMutation) (ent.Value, error)
// Mutate calls f(ctx, m).
func (f SettingFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
if mv, ok := m.(*ent.SettingMutation); ok {
return f(ctx, mv)
}
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.SettingMutation", m)
}
// The ShareFunc type is an adapter to allow the use of ordinary
// function as Share mutator.
type ShareFunc func(context.Context, *ent.ShareMutation) (ent.Value, error)
// Mutate calls f(ctx, m).
func (f ShareFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
if mv, ok := m.(*ent.ShareMutation); ok {
return f(ctx, mv)
}
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.ShareMutation", m)
}
// The StoragePolicyFunc type is an adapter to allow the use of ordinary
// function as StoragePolicy mutator.
type StoragePolicyFunc func(context.Context, *ent.StoragePolicyMutation) (ent.Value, error)
// Mutate calls f(ctx, m).
func (f StoragePolicyFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
if mv, ok := m.(*ent.StoragePolicyMutation); ok {
return f(ctx, mv)
}
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.StoragePolicyMutation", m)
}
// The TaskFunc type is an adapter to allow the use of ordinary
// function as Task mutator.
type TaskFunc func(context.Context, *ent.TaskMutation) (ent.Value, error)
// Mutate calls f(ctx, m).
func (f TaskFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
if mv, ok := m.(*ent.TaskMutation); ok {
return f(ctx, mv)
}
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.TaskMutation", m)
}
// The UserFunc type is an adapter to allow the use of ordinary
// function as User mutator.
type UserFunc func(context.Context, *ent.UserMutation) (ent.Value, error)
// Mutate calls f(ctx, m).
func (f UserFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
if mv, ok := m.(*ent.UserMutation); ok {
return f(ctx, mv)
}
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.UserMutation", m)
}
// Condition is a hook condition function.
type Condition func(context.Context, ent.Mutation) bool
// And groups conditions with the AND operator.
func And(first, second Condition, rest ...Condition) Condition {
return func(ctx context.Context, m ent.Mutation) bool {
if !first(ctx, m) || !second(ctx, m) {
return false
}
for _, cond := range rest {
if !cond(ctx, m) {
return false
}
}
return true
}
}
// Or groups conditions with the OR operator.
func Or(first, second Condition, rest ...Condition) Condition {
return func(ctx context.Context, m ent.Mutation) bool {
if first(ctx, m) || second(ctx, m) {
return true
}
for _, cond := range rest {
if cond(ctx, m) {
return true
}
}
return false
}
}
// Not negates a given condition.
func Not(cond Condition) Condition {
return func(ctx context.Context, m ent.Mutation) bool {
return !cond(ctx, m)
}
}
// HasOp is a condition testing mutation operation.
func HasOp(op ent.Op) Condition {
return func(_ context.Context, m ent.Mutation) bool {
return m.Op().Is(op)
}
}
// HasAddedFields is a condition validating `.AddedField` on fields.
func HasAddedFields(field string, fields ...string) Condition {
return func(_ context.Context, m ent.Mutation) bool {
if _, exists := m.AddedField(field); !exists {
return false
}
for _, field := range fields {
if _, exists := m.AddedField(field); !exists {
return false
}
}
return true
}
}
// HasClearedFields is a condition validating `.FieldCleared` on fields.
func HasClearedFields(field string, fields ...string) Condition {
return func(_ context.Context, m ent.Mutation) bool {
if exists := m.FieldCleared(field); !exists {
return false
}
for _, field := range fields {
if exists := m.FieldCleared(field); !exists {
return false
}
}
return true
}
}
// HasFields is a condition validating `.Field` on fields.
func HasFields(field string, fields ...string) Condition {
return func(_ context.Context, m ent.Mutation) bool {
if _, exists := m.Field(field); !exists {
return false
}
for _, field := range fields {
if _, exists := m.Field(field); !exists {
return false
}
}
return true
}
}
// If executes the given hook under condition.
//
// hook.If(ComputeAverage, And(HasFields(...), HasAddedFields(...)))
func If(hk ent.Hook, cond Condition) ent.Hook {
return func(next ent.Mutator) ent.Mutator {
return ent.MutateFunc(func(ctx context.Context, m ent.Mutation) (ent.Value, error) {
if cond(ctx, m) {
return hk(next).Mutate(ctx, m)
}
return next.Mutate(ctx, m)
})
}
}
// On executes the given hook only for the given operation.
//
// hook.On(Log, ent.Delete|ent.Create)
func On(hk ent.Hook, op ent.Op) ent.Hook {
return If(hk, HasOp(op))
}
// Unless skips the given hook only for the given operation.
//
// hook.Unless(Log, ent.Update|ent.UpdateOne)
func Unless(hk ent.Hook, op ent.Op) ent.Hook {
return If(hk, Not(HasOp(op)))
}
// FixedError is a hook returning a fixed error.
func FixedError(err error) ent.Hook {
return func(ent.Mutator) ent.Mutator {
return ent.MutateFunc(func(context.Context, ent.Mutation) (ent.Value, error) {
return nil, err
})
}
}
// Reject returns a hook that rejects all operations that match op.
//
// func (T) Hooks() []ent.Hook {
// return []ent.Hook{
// Reject(ent.Delete|ent.Update),
// }
// }
func Reject(op ent.Op) ent.Hook {
hk := FixedError(fmt.Errorf("%s operation is not allowed", op))
return On(hk, op)
}
// Chain acts as a list of hooks and is effectively immutable.
// Once created, it will always hold the same set of hooks in the same order.
type Chain struct {
hooks []ent.Hook
}
// NewChain creates a new chain of hooks.
func NewChain(hooks ...ent.Hook) Chain {
return Chain{append([]ent.Hook(nil), hooks...)}
}
// Hook chains the list of hooks and returns the final hook.
func (c Chain) Hook() ent.Hook {
return func(mutator ent.Mutator) ent.Mutator {
for i := len(c.hooks) - 1; i >= 0; i-- {
mutator = c.hooks[i](mutator)
}
return mutator
}
}
// Append extends a chain, adding the specified hook
// as the last ones in the mutation flow.
func (c Chain) Append(hooks ...ent.Hook) Chain {
newHooks := make([]ent.Hook, 0, len(c.hooks)+len(hooks))
newHooks = append(newHooks, c.hooks...)
newHooks = append(newHooks, hooks...)
return Chain{newHooks}
}
// Extend extends a chain, adding the specified chain
// as the last ones in the mutation flow.
func (c Chain) Extend(chain Chain) Chain {
return c.Append(chain.hooks...)
}

@ -0,0 +1,509 @@
// Code generated by ent, DO NOT EDIT.
package intercept
import (
"context"
"fmt"
"entgo.io/ent/dialect/sql"
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/ent/davaccount"
"github.com/cloudreve/Cloudreve/v4/ent/directlink"
"github.com/cloudreve/Cloudreve/v4/ent/entity"
"github.com/cloudreve/Cloudreve/v4/ent/file"
"github.com/cloudreve/Cloudreve/v4/ent/group"
"github.com/cloudreve/Cloudreve/v4/ent/metadata"
"github.com/cloudreve/Cloudreve/v4/ent/node"
"github.com/cloudreve/Cloudreve/v4/ent/passkey"
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
"github.com/cloudreve/Cloudreve/v4/ent/setting"
"github.com/cloudreve/Cloudreve/v4/ent/share"
"github.com/cloudreve/Cloudreve/v4/ent/storagepolicy"
"github.com/cloudreve/Cloudreve/v4/ent/task"
"github.com/cloudreve/Cloudreve/v4/ent/user"
)
// The Query interface represents an operation that queries a graph.
// By using this interface, users can write generic code that manipulates
// query builders of different types.
type Query interface {
// Type returns the string representation of the query type.
Type() string
// Limit the number of records to be returned by this query.
Limit(int)
// Offset to start from.
Offset(int)
// Unique configures the query builder to filter duplicate records.
Unique(bool)
// Order specifies how the records should be ordered.
Order(...func(*sql.Selector))
// WhereP appends storage-level predicates to the query builder. Using this method, users
// can use type-assertion to append predicates that do not depend on any generated package.
WhereP(...func(*sql.Selector))
}
// The Func type is an adapter that allows ordinary functions to be used as interceptors.
// Unlike traversal functions, interceptors are skipped during graph traversals. Note that the
// implementation of Func is different from the one defined in entgo.io/ent.InterceptFunc.
type Func func(context.Context, Query) error
// Intercept calls f(ctx, q) and then applied the next Querier.
func (f Func) Intercept(next ent.Querier) ent.Querier {
return ent.QuerierFunc(func(ctx context.Context, q ent.Query) (ent.Value, error) {
query, err := NewQuery(q)
if err != nil {
return nil, err
}
if err := f(ctx, query); err != nil {
return nil, err
}
return next.Query(ctx, q)
})
}
// The TraverseFunc type is an adapter to allow the use of ordinary function as Traverser.
// If f is a function with the appropriate signature, TraverseFunc(f) is a Traverser that calls f.
type TraverseFunc func(context.Context, Query) error
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
func (f TraverseFunc) Intercept(next ent.Querier) ent.Querier {
return next
}
// Traverse calls f(ctx, q).
func (f TraverseFunc) Traverse(ctx context.Context, q ent.Query) error {
query, err := NewQuery(q)
if err != nil {
return err
}
return f(ctx, query)
}
// The DavAccountFunc type is an adapter to allow the use of ordinary function as a Querier.
type DavAccountFunc func(context.Context, *ent.DavAccountQuery) (ent.Value, error)
// Query calls f(ctx, q).
func (f DavAccountFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
if q, ok := q.(*ent.DavAccountQuery); ok {
return f(ctx, q)
}
return nil, fmt.Errorf("unexpected query type %T. expect *ent.DavAccountQuery", q)
}
// The TraverseDavAccount type is an adapter to allow the use of ordinary function as Traverser.
type TraverseDavAccount func(context.Context, *ent.DavAccountQuery) error
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
func (f TraverseDavAccount) Intercept(next ent.Querier) ent.Querier {
return next
}
// Traverse calls f(ctx, q).
func (f TraverseDavAccount) Traverse(ctx context.Context, q ent.Query) error {
if q, ok := q.(*ent.DavAccountQuery); ok {
return f(ctx, q)
}
return fmt.Errorf("unexpected query type %T. expect *ent.DavAccountQuery", q)
}
// The DirectLinkFunc type is an adapter to allow the use of ordinary function as a Querier.
type DirectLinkFunc func(context.Context, *ent.DirectLinkQuery) (ent.Value, error)
// Query calls f(ctx, q).
func (f DirectLinkFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
if q, ok := q.(*ent.DirectLinkQuery); ok {
return f(ctx, q)
}
return nil, fmt.Errorf("unexpected query type %T. expect *ent.DirectLinkQuery", q)
}
// The TraverseDirectLink type is an adapter to allow the use of ordinary function as Traverser.
type TraverseDirectLink func(context.Context, *ent.DirectLinkQuery) error
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
func (f TraverseDirectLink) Intercept(next ent.Querier) ent.Querier {
return next
}
// Traverse calls f(ctx, q).
func (f TraverseDirectLink) Traverse(ctx context.Context, q ent.Query) error {
if q, ok := q.(*ent.DirectLinkQuery); ok {
return f(ctx, q)
}
return fmt.Errorf("unexpected query type %T. expect *ent.DirectLinkQuery", q)
}
// The EntityFunc type is an adapter to allow the use of ordinary function as a Querier.
type EntityFunc func(context.Context, *ent.EntityQuery) (ent.Value, error)
// Query calls f(ctx, q).
func (f EntityFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
if q, ok := q.(*ent.EntityQuery); ok {
return f(ctx, q)
}
return nil, fmt.Errorf("unexpected query type %T. expect *ent.EntityQuery", q)
}
// The TraverseEntity type is an adapter to allow the use of ordinary function as Traverser.
type TraverseEntity func(context.Context, *ent.EntityQuery) error
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
func (f TraverseEntity) Intercept(next ent.Querier) ent.Querier {
return next
}
// Traverse calls f(ctx, q).
func (f TraverseEntity) Traverse(ctx context.Context, q ent.Query) error {
if q, ok := q.(*ent.EntityQuery); ok {
return f(ctx, q)
}
return fmt.Errorf("unexpected query type %T. expect *ent.EntityQuery", q)
}
// The FileFunc type is an adapter to allow the use of ordinary function as a Querier.
type FileFunc func(context.Context, *ent.FileQuery) (ent.Value, error)
// Query calls f(ctx, q).
func (f FileFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
if q, ok := q.(*ent.FileQuery); ok {
return f(ctx, q)
}
return nil, fmt.Errorf("unexpected query type %T. expect *ent.FileQuery", q)
}
// The TraverseFile type is an adapter to allow the use of ordinary function as Traverser.
type TraverseFile func(context.Context, *ent.FileQuery) error
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
func (f TraverseFile) Intercept(next ent.Querier) ent.Querier {
return next
}
// Traverse calls f(ctx, q).
func (f TraverseFile) Traverse(ctx context.Context, q ent.Query) error {
if q, ok := q.(*ent.FileQuery); ok {
return f(ctx, q)
}
return fmt.Errorf("unexpected query type %T. expect *ent.FileQuery", q)
}
// The GroupFunc type is an adapter to allow the use of ordinary function as a Querier.
type GroupFunc func(context.Context, *ent.GroupQuery) (ent.Value, error)
// Query calls f(ctx, q).
func (f GroupFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
if q, ok := q.(*ent.GroupQuery); ok {
return f(ctx, q)
}
return nil, fmt.Errorf("unexpected query type %T. expect *ent.GroupQuery", q)
}
// The TraverseGroup type is an adapter to allow the use of ordinary function as Traverser.
type TraverseGroup func(context.Context, *ent.GroupQuery) error
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
func (f TraverseGroup) Intercept(next ent.Querier) ent.Querier {
return next
}
// Traverse calls f(ctx, q).
func (f TraverseGroup) Traverse(ctx context.Context, q ent.Query) error {
if q, ok := q.(*ent.GroupQuery); ok {
return f(ctx, q)
}
return fmt.Errorf("unexpected query type %T. expect *ent.GroupQuery", q)
}
// The MetadataFunc type is an adapter to allow the use of ordinary function as a Querier.
type MetadataFunc func(context.Context, *ent.MetadataQuery) (ent.Value, error)
// Query calls f(ctx, q).
func (f MetadataFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
if q, ok := q.(*ent.MetadataQuery); ok {
return f(ctx, q)
}
return nil, fmt.Errorf("unexpected query type %T. expect *ent.MetadataQuery", q)
}
// The TraverseMetadata type is an adapter to allow the use of ordinary function as Traverser.
type TraverseMetadata func(context.Context, *ent.MetadataQuery) error
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
func (f TraverseMetadata) Intercept(next ent.Querier) ent.Querier {
return next
}
// Traverse calls f(ctx, q).
func (f TraverseMetadata) Traverse(ctx context.Context, q ent.Query) error {
if q, ok := q.(*ent.MetadataQuery); ok {
return f(ctx, q)
}
return fmt.Errorf("unexpected query type %T. expect *ent.MetadataQuery", q)
}
// The NodeFunc type is an adapter to allow the use of ordinary function as a Querier.
type NodeFunc func(context.Context, *ent.NodeQuery) (ent.Value, error)
// Query calls f(ctx, q).
func (f NodeFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
if q, ok := q.(*ent.NodeQuery); ok {
return f(ctx, q)
}
return nil, fmt.Errorf("unexpected query type %T. expect *ent.NodeQuery", q)
}
// The TraverseNode type is an adapter to allow the use of ordinary function as Traverser.
type TraverseNode func(context.Context, *ent.NodeQuery) error
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
func (f TraverseNode) Intercept(next ent.Querier) ent.Querier {
return next
}
// Traverse calls f(ctx, q).
func (f TraverseNode) Traverse(ctx context.Context, q ent.Query) error {
if q, ok := q.(*ent.NodeQuery); ok {
return f(ctx, q)
}
return fmt.Errorf("unexpected query type %T. expect *ent.NodeQuery", q)
}
// The PasskeyFunc type is an adapter to allow the use of ordinary function as a Querier.
type PasskeyFunc func(context.Context, *ent.PasskeyQuery) (ent.Value, error)
// Query calls f(ctx, q).
func (f PasskeyFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
if q, ok := q.(*ent.PasskeyQuery); ok {
return f(ctx, q)
}
return nil, fmt.Errorf("unexpected query type %T. expect *ent.PasskeyQuery", q)
}
// The TraversePasskey type is an adapter to allow the use of ordinary function as Traverser.
type TraversePasskey func(context.Context, *ent.PasskeyQuery) error
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
func (f TraversePasskey) Intercept(next ent.Querier) ent.Querier {
return next
}
// Traverse calls f(ctx, q).
func (f TraversePasskey) Traverse(ctx context.Context, q ent.Query) error {
if q, ok := q.(*ent.PasskeyQuery); ok {
return f(ctx, q)
}
return fmt.Errorf("unexpected query type %T. expect *ent.PasskeyQuery", q)
}
// The SettingFunc type is an adapter to allow the use of ordinary function as a Querier.
type SettingFunc func(context.Context, *ent.SettingQuery) (ent.Value, error)
// Query calls f(ctx, q).
func (f SettingFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
if q, ok := q.(*ent.SettingQuery); ok {
return f(ctx, q)
}
return nil, fmt.Errorf("unexpected query type %T. expect *ent.SettingQuery", q)
}
// The TraverseSetting type is an adapter to allow the use of ordinary function as Traverser.
type TraverseSetting func(context.Context, *ent.SettingQuery) error
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
func (f TraverseSetting) Intercept(next ent.Querier) ent.Querier {
return next
}
// Traverse calls f(ctx, q).
func (f TraverseSetting) Traverse(ctx context.Context, q ent.Query) error {
if q, ok := q.(*ent.SettingQuery); ok {
return f(ctx, q)
}
return fmt.Errorf("unexpected query type %T. expect *ent.SettingQuery", q)
}
// The ShareFunc type is an adapter to allow the use of ordinary function as a Querier.
type ShareFunc func(context.Context, *ent.ShareQuery) (ent.Value, error)
// Query calls f(ctx, q).
func (f ShareFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
if q, ok := q.(*ent.ShareQuery); ok {
return f(ctx, q)
}
return nil, fmt.Errorf("unexpected query type %T. expect *ent.ShareQuery", q)
}
// The TraverseShare type is an adapter to allow the use of ordinary function as Traverser.
type TraverseShare func(context.Context, *ent.ShareQuery) error
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
func (f TraverseShare) Intercept(next ent.Querier) ent.Querier {
return next
}
// Traverse calls f(ctx, q).
func (f TraverseShare) Traverse(ctx context.Context, q ent.Query) error {
if q, ok := q.(*ent.ShareQuery); ok {
return f(ctx, q)
}
return fmt.Errorf("unexpected query type %T. expect *ent.ShareQuery", q)
}
// The StoragePolicyFunc type is an adapter to allow the use of ordinary function as a Querier.
type StoragePolicyFunc func(context.Context, *ent.StoragePolicyQuery) (ent.Value, error)
// Query calls f(ctx, q).
func (f StoragePolicyFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
if q, ok := q.(*ent.StoragePolicyQuery); ok {
return f(ctx, q)
}
return nil, fmt.Errorf("unexpected query type %T. expect *ent.StoragePolicyQuery", q)
}
// The TraverseStoragePolicy type is an adapter to allow the use of ordinary function as Traverser.
type TraverseStoragePolicy func(context.Context, *ent.StoragePolicyQuery) error
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
func (f TraverseStoragePolicy) Intercept(next ent.Querier) ent.Querier {
return next
}
// Traverse calls f(ctx, q).
func (f TraverseStoragePolicy) Traverse(ctx context.Context, q ent.Query) error {
if q, ok := q.(*ent.StoragePolicyQuery); ok {
return f(ctx, q)
}
return fmt.Errorf("unexpected query type %T. expect *ent.StoragePolicyQuery", q)
}
// The TaskFunc type is an adapter to allow the use of ordinary function as a Querier.
type TaskFunc func(context.Context, *ent.TaskQuery) (ent.Value, error)
// Query calls f(ctx, q).
func (f TaskFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
if q, ok := q.(*ent.TaskQuery); ok {
return f(ctx, q)
}
return nil, fmt.Errorf("unexpected query type %T. expect *ent.TaskQuery", q)
}
// The TraverseTask type is an adapter to allow the use of ordinary function as Traverser.
type TraverseTask func(context.Context, *ent.TaskQuery) error
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
func (f TraverseTask) Intercept(next ent.Querier) ent.Querier {
return next
}
// Traverse calls f(ctx, q).
func (f TraverseTask) Traverse(ctx context.Context, q ent.Query) error {
if q, ok := q.(*ent.TaskQuery); ok {
return f(ctx, q)
}
return fmt.Errorf("unexpected query type %T. expect *ent.TaskQuery", q)
}
// The UserFunc type is an adapter to allow the use of ordinary function as a Querier.
type UserFunc func(context.Context, *ent.UserQuery) (ent.Value, error)
// Query calls f(ctx, q).
func (f UserFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
if q, ok := q.(*ent.UserQuery); ok {
return f(ctx, q)
}
return nil, fmt.Errorf("unexpected query type %T. expect *ent.UserQuery", q)
}
// The TraverseUser type is an adapter to allow the use of ordinary function as Traverser.
type TraverseUser func(context.Context, *ent.UserQuery) error
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
func (f TraverseUser) Intercept(next ent.Querier) ent.Querier {
return next
}
// Traverse calls f(ctx, q).
func (f TraverseUser) Traverse(ctx context.Context, q ent.Query) error {
if q, ok := q.(*ent.UserQuery); ok {
return f(ctx, q)
}
return fmt.Errorf("unexpected query type %T. expect *ent.UserQuery", q)
}
// NewQuery returns the generic Query interface for the given typed query.
func NewQuery(q ent.Query) (Query, error) {
switch q := q.(type) {
case *ent.DavAccountQuery:
return &query[*ent.DavAccountQuery, predicate.DavAccount, davaccount.OrderOption]{typ: ent.TypeDavAccount, tq: q}, nil
case *ent.DirectLinkQuery:
return &query[*ent.DirectLinkQuery, predicate.DirectLink, directlink.OrderOption]{typ: ent.TypeDirectLink, tq: q}, nil
case *ent.EntityQuery:
return &query[*ent.EntityQuery, predicate.Entity, entity.OrderOption]{typ: ent.TypeEntity, tq: q}, nil
case *ent.FileQuery:
return &query[*ent.FileQuery, predicate.File, file.OrderOption]{typ: ent.TypeFile, tq: q}, nil
case *ent.GroupQuery:
return &query[*ent.GroupQuery, predicate.Group, group.OrderOption]{typ: ent.TypeGroup, tq: q}, nil
case *ent.MetadataQuery:
return &query[*ent.MetadataQuery, predicate.Metadata, metadata.OrderOption]{typ: ent.TypeMetadata, tq: q}, nil
case *ent.NodeQuery:
return &query[*ent.NodeQuery, predicate.Node, node.OrderOption]{typ: ent.TypeNode, tq: q}, nil
case *ent.PasskeyQuery:
return &query[*ent.PasskeyQuery, predicate.Passkey, passkey.OrderOption]{typ: ent.TypePasskey, tq: q}, nil
case *ent.SettingQuery:
return &query[*ent.SettingQuery, predicate.Setting, setting.OrderOption]{typ: ent.TypeSetting, tq: q}, nil
case *ent.ShareQuery:
return &query[*ent.ShareQuery, predicate.Share, share.OrderOption]{typ: ent.TypeShare, tq: q}, nil
case *ent.StoragePolicyQuery:
return &query[*ent.StoragePolicyQuery, predicate.StoragePolicy, storagepolicy.OrderOption]{typ: ent.TypeStoragePolicy, tq: q}, nil
case *ent.TaskQuery:
return &query[*ent.TaskQuery, predicate.Task, task.OrderOption]{typ: ent.TypeTask, tq: q}, nil
case *ent.UserQuery:
return &query[*ent.UserQuery, predicate.User, user.OrderOption]{typ: ent.TypeUser, tq: q}, nil
default:
return nil, fmt.Errorf("unknown query type %T", q)
}
}
type query[T any, P ~func(*sql.Selector), R ~func(*sql.Selector)] struct {
typ string
tq interface {
Limit(int) T
Offset(int) T
Unique(bool) T
Order(...R) T
Where(...P) T
}
}
func (q query[T, P, R]) Type() string {
return q.typ
}
func (q query[T, P, R]) Limit(limit int) {
q.tq.Limit(limit)
}
func (q query[T, P, R]) Offset(offset int) {
q.tq.Offset(offset)
}
func (q query[T, P, R]) Unique(unique bool) {
q.tq.Unique(unique)
}
func (q query[T, P, R]) Order(orders ...func(*sql.Selector)) {
rs := make([]R, len(orders))
for i := range orders {
rs[i] = orders[i]
}
q.tq.Order(rs...)
}
func (q query[T, P, R]) WhereP(ps ...func(*sql.Selector)) {
p := make([]P, len(ps))
for i := range ps {
p[i] = ps[i]
}
q.tq.Where(p...)
}

File diff suppressed because one or more lines are too long

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save