Compare commits

...

20 Commits

Author SHA1 Message Date
Aaron Liu 32632db36f feat(fs): change event debounce before emitting to subscriber 2025-12-13 14:48:01 +08:00
Aaron Liu c01b748dfc feat(fs): fs change event notification via SSE / show panic stack trace in task queue 2025-12-13 14:48:01 +08:00
Darren Yu 05c68b4062
fix(thumb blob path): separators be wrongly modified (#3062) (#3116)
* fix(thumb blob path): separators be wrongly modified

* Update common.go
2025-12-05 15:57:58 +08:00
Darren Yu a08c796e3f
fix(ks3): fix content disposition format for download filename (#3040) (#3057) 2025-12-05 15:33:18 +08:00
Aaron Liu fec4dec3ac feat(upload): etag check in client-side upload / support empty policy ID 2025-12-05 15:17:07 +08:00
Aaron Liu 67c6f937c9 fix(oss): disable RSA min key size check for OSS callback (#3038) 2025-11-15 11:59:09 +08:00
Aaron Liu 6ad72e07f4 update submodule 2025-11-14 11:18:39 +08:00
Aaron Liu 994ef7af81 fix(search): multiple metadata search does not work (#3027) 2025-11-12 13:57:38 +08:00
Darren Yu b507c1b893
docs: update feature description (#3023)
* docs: update feature description

* Apply suggestion from @HFO4

---------

Co-authored-by: AaronLiu <abslant.liu@gmail.com>
2025-11-12 13:55:38 +08:00
Darren Yu deecc5c20b
feat(thumb blob path): support magic variables in thumb blob path (#3030) 2025-11-12 13:49:32 +08:00
dependabot[bot] 6085f2090f
chore(deps): bump golang.org/x/image (#2093)
Bumps [golang.org/x/image](https://github.com/golang/image) from 0.0.0-20211028202545-6944b10bf410 to 0.18.0.
- [Commits](https://github.com/golang/image/commits/v0.18.0)

---
updated-dependencies:
- dependency-name: golang.org/x/image
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-10-28 10:56:01 +08:00
dependabot[bot] 670b79eef3
chore(deps): bump github.com/gin-contrib/cors from 1.3.0 to 1.6.0 (#2097)
Bumps [github.com/gin-contrib/cors](https://github.com/gin-contrib/cors) from 1.3.0 to 1.6.0.
- [Release notes](https://github.com/gin-contrib/cors/releases)
- [Changelog](https://github.com/gin-contrib/cors/blob/master/.goreleaser.yaml)
- [Commits](https://github.com/gin-contrib/cors/compare/v1.3.0...v1.6.0)

---
updated-dependencies:
- dependency-name: github.com/gin-contrib/cors
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-10-28 10:53:40 +08:00
dependabot[bot] 4785be81c2
chore(deps): bump github.com/wneessen/go-mail from 0.6.2 to 0.7.1 (#2939)
Bumps [github.com/wneessen/go-mail](https://github.com/wneessen/go-mail) from 0.6.2 to 0.7.1.
- [Release notes](https://github.com/wneessen/go-mail/releases)
- [Commits](https://github.com/wneessen/go-mail/compare/v0.6.2...v0.7.1)

---
updated-dependencies:
- dependency-name: github.com/wneessen/go-mail
  dependency-version: 0.7.1
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-10-28 10:50:54 +08:00
Aaron Liu f27969d74f chore: update required golang version and gzip middleware 2025-10-24 15:07:12 +08:00
Aaron Liu e3580d9351 feat(encryption): add UI and settings for file encryption 2025-10-24 15:04:54 +08:00
Aaron Liu 16b02b1fb3 feat: file blob encryption 2025-10-21 14:54:13 +08:00
Darren Yu 6bd30a8af7
fix(oss): change default expire ttl and sign param to adapt SDK v2 (#2979)
* fix(oss): change default expire ttl and sign param to adapt SDK v2

* fix(oss): add expire ttl limit
2025-10-16 11:49:21 +08:00
Aaron Liu 21cdafb2af fix(oss): traffic limit should be in query instead of headers (#2977) 2025-10-16 07:46:22 +08:00
Aaron Liu e29237d593 fix(webdav): error code for missing parent in mkcol should be `409` instead of `404` (#2953) 2025-10-15 10:28:31 +08:00
Aaron Liu 46897e2880 fix(oss): presigned multipart upload mismatch 2025-10-14 10:21:43 +08:00
95 changed files with 7110 additions and 615 deletions

View File

@ -38,12 +38,12 @@
## :sparkles: Features
- :cloud: Support storing files into Local, Remote node, OneDrive, S3 compatible API, Qiniu, Aliyun OSS, Tencent COS, Upyun.
- :cloud: Support storing files into Local, Remote node, OneDrive, S3 compatible API, Qiniu Kodo, Aliyun OSS, Tencent COS, Huawei Cloud OBS, Kingsoft Cloud KS3, Upyun.
- :outbox_tray: Upload/Download in directly transmission from client to storage providers.
- 💾 Integrate with Aria2/qBittorrent to download files in background, use multiple download nodes to share the load.
- 📚 Compress/Extract files, download files in batch.
- 📚 Compress/Extract/Preview archived files, download files in batch.
- 💻 WebDAV support covering all storage providers.
- :zap:Drag&Drop to upload files or folders, with resumable upload support.
- :zap:Drag&Drop to upload files or folders, with parallel resumable upload support.
- :card_file_box: Extract media metadata from files, search files by metadata or tags.
- :family_woman_girl_boy: Multi-users with multi-groups.
- :link: Create share links for files and folders with expiration date.

View File

@ -39,12 +39,12 @@
## :sparkles: 特性
- :cloud: 支持本机、从机、七牛、阿里云 OSS、腾讯云 COS、华为云 OBS、又拍云、OneDrive (包括世纪互联版) 、S3 兼容协议 作为存储端
- :cloud: 支持本机、从机、七牛 Kodo、阿里云 OSS、腾讯云 COS、华为云 OBS、金山云 KS3、又拍云、OneDrive (包括世纪互联版) 、S3 兼容协议 作为存储端
- :outbox_tray: 上传/下载 支持客户端直传,支持下载限速
- 💾 可对接 Aria2 离线下载,可使用多个从机节点分担下载任务
- 📚 在线 压缩/解压缩、多文件打包下载
- 💾 可对接 Aria2/qBittorrent 离线下载,可使用多个从机节点分担下载任务
- 📚 在线 压缩/解压缩/压缩包预览、多文件打包下载
- 💻 覆盖全部存储策略的 WebDAV 协议支持
- :zap: 拖拽上传、目录上传、分片上传
- :zap: 拖拽上传、目录上传、并行分片上传
- :card_file_box: 提取媒体元数据,通过元数据或标签搜索文件
- :family_woman_girl_boy: 多用户、用户组、多存储策略
- :link: 创建文件、目录的分享链接,可设定自动过期

View File

@ -178,6 +178,8 @@ func (s *server) Close() {
defer cancel()
}
s.dep.EventHub().Close()
// Shutdown http server
if s.server != nil {
err := s.server.Shutdown(ctx)

View File

@ -17,6 +17,8 @@ import (
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
"github.com/cloudreve/Cloudreve/v4/pkg/credmanager"
"github.com/cloudreve/Cloudreve/v4/pkg/email"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/encrypt"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/eventhub"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/mime"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/lock"
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
@ -129,55 +131,63 @@ type Dep interface {
WebAuthn(ctx context.Context) (*webauthn.WebAuthn, error)
// UAParser Get a singleton uaparser.Parser instance for user agent parsing.
UAParser() *uaparser.Parser
// MasterEncryptKeyVault Get a singleton encrypt.MasterEncryptKeyVault instance for master encrypt key vault.
MasterEncryptKeyVault(ctx context.Context) encrypt.MasterEncryptKeyVault
// EncryptorFactory Get a new encrypt.CryptorFactory instance.
EncryptorFactory(ctx context.Context) encrypt.CryptorFactory
// EventHub Get a singleton eventhub.EventHub instance for event publishing.
EventHub() eventhub.EventHub
}
type dependency struct {
configProvider conf.ConfigProvider
logger logging.Logger
statics iofs.FS
serverStaticFS static.ServeFileSystem
dbClient *ent.Client
rawEntClient *ent.Client
kv cache.Driver
navigatorStateKv cache.Driver
settingClient inventory.SettingClient
fileClient inventory.FileClient
shareClient inventory.ShareClient
settingProvider setting.Provider
userClient inventory.UserClient
groupClient inventory.GroupClient
storagePolicyClient inventory.StoragePolicyClient
taskClient inventory.TaskClient
nodeClient inventory.NodeClient
davAccountClient inventory.DavAccountClient
directLinkClient inventory.DirectLinkClient
emailClient email.Driver
generalAuth auth.Auth
hashidEncoder hashid.Encoder
tokenAuth auth.TokenAuth
lockSystem lock.LockSystem
requestClient request.Client
ioIntenseQueue queue.Queue
thumbQueue queue.Queue
mediaMetaQueue queue.Queue
entityRecycleQueue queue.Queue
slaveQueue queue.Queue
remoteDownloadQueue queue.Queue
ioIntenseQueueTask queue.Task
mediaMeta mediameta.Extractor
thumbPipeline thumb.Generator
mimeDetector mime.MimeDetector
credManager credmanager.CredManager
nodePool cluster.NodePool
taskRegistry queue.TaskRegistry
webauthn *webauthn.WebAuthn
parser *uaparser.Parser
cron *cron.Cron
configProvider conf.ConfigProvider
logger logging.Logger
statics iofs.FS
serverStaticFS static.ServeFileSystem
dbClient *ent.Client
rawEntClient *ent.Client
kv cache.Driver
navigatorStateKv cache.Driver
settingClient inventory.SettingClient
fileClient inventory.FileClient
shareClient inventory.ShareClient
settingProvider setting.Provider
userClient inventory.UserClient
groupClient inventory.GroupClient
storagePolicyClient inventory.StoragePolicyClient
taskClient inventory.TaskClient
nodeClient inventory.NodeClient
davAccountClient inventory.DavAccountClient
directLinkClient inventory.DirectLinkClient
fsEventClient inventory.FsEventClient
emailClient email.Driver
generalAuth auth.Auth
hashidEncoder hashid.Encoder
tokenAuth auth.TokenAuth
lockSystem lock.LockSystem
requestClient request.Client
ioIntenseQueue queue.Queue
thumbQueue queue.Queue
mediaMetaQueue queue.Queue
entityRecycleQueue queue.Queue
slaveQueue queue.Queue
remoteDownloadQueue queue.Queue
ioIntenseQueueTask queue.Task
mediaMeta mediameta.Extractor
thumbPipeline thumb.Generator
mimeDetector mime.MimeDetector
credManager credmanager.CredManager
nodePool cluster.NodePool
taskRegistry queue.TaskRegistry
webauthn *webauthn.WebAuthn
parser *uaparser.Parser
cron *cron.Cron
masterEncryptKeyVault encrypt.MasterEncryptKeyVault
eventHub eventhub.EventHub
configPath string
isPro bool
requiredDbVersion string
licenseKey string
// Protects inner deps that can be reloaded at runtime.
mu sync.Mutex
@ -206,6 +216,19 @@ func (d *dependency) RequestClient(opts ...request.Option) request.Client {
return request.NewClient(d.ConfigProvider(), opts...)
}
func (d *dependency) MasterEncryptKeyVault(ctx context.Context) encrypt.MasterEncryptKeyVault {
if d.masterEncryptKeyVault != nil {
return d.masterEncryptKeyVault
}
d.masterEncryptKeyVault = encrypt.NewMasterEncryptKeyVault(ctx, d.SettingProvider())
return d.masterEncryptKeyVault
}
func (d *dependency) EncryptorFactory(ctx context.Context) encrypt.CryptorFactory {
return encrypt.NewCryptorFactory(d.MasterEncryptKeyVault(ctx))
}
func (d *dependency) WebAuthn(ctx context.Context) (*webauthn.WebAuthn, error) {
if d.webauthn != nil {
return d.webauthn, nil
@ -346,6 +369,21 @@ func (d *dependency) NavigatorStateKV() cache.Driver {
return d.navigatorStateKv
}
func (d *dependency) EventHub() eventhub.EventHub {
if d.eventHub != nil {
return d.eventHub
}
d.eventHub = eventhub.NewEventHub(d.UserClient(), d.FsEventClient())
return d.eventHub
}
func (d *dependency) FsEventClient() inventory.FsEventClient {
if d.fsEventClient != nil {
return d.fsEventClient
}
return inventory.NewFsEventClient(d.DBClient(), d.ConfigProvider().Database().Type)
}
func (d *dependency) SettingClient() inventory.SettingClient {
if d.settingClient != nil {
return d.settingClient
@ -843,6 +881,14 @@ func (d *dependency) Shutdown(ctx context.Context) error {
}()
}
if d.eventHub != nil {
wg.Add(1)
go func() {
d.eventHub.Close()
defer wg.Done()
}()
}
d.mu.Unlock()
wg.Wait()

View File

@ -1,6 +1,8 @@
package dependency
import (
"io/fs"
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/inventory"
"github.com/cloudreve/Cloudreve/v4/pkg/auth"
@ -11,7 +13,6 @@ import (
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
"github.com/gin-contrib/static"
"io/fs"
)
// Option 发送请求的额外设置
@ -67,12 +68,6 @@ func WithProFlag(c bool) Option {
})
}
func WithLicenseKey(c string) Option {
return optionFunc(func(o *dependency) {
o.licenseKey = c
})
}
// WithRawEntClient Set the default raw ent client.
func WithRawEntClient(c *ent.Client) Option {
return optionFunc(func(o *dependency) {

2
assets

@ -1 +1 @@
Subproject commit 71e5fbd240824ad0b6e8ebe5e47d25704a82d7c4
Subproject commit 0b388cc50a6c8e67f645d1b7d569bd9e58ae2c30

230
cmd/masterkey.go Normal file
View File

@ -0,0 +1,230 @@
package cmd
import (
"context"
"crypto/rand"
"encoding/base64"
"fmt"
"io"
"os"
"github.com/cloudreve/Cloudreve/v4/application/dependency"
"github.com/cloudreve/Cloudreve/v4/ent/entity"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/encrypt"
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
"github.com/spf13/cobra"
)
var (
outputToFile string
newMasterKeyFile string
)
func init() {
rootCmd.AddCommand(masterKeyCmd)
masterKeyCmd.AddCommand(masterKeyGenerateCmd)
masterKeyCmd.AddCommand(masterKeyGetCmd)
masterKeyCmd.AddCommand(masterKeyRotateCmd)
masterKeyGenerateCmd.Flags().StringVarP(&outputToFile, "output", "o", "", "Output master key to file instead of stdout")
masterKeyRotateCmd.Flags().StringVarP(&newMasterKeyFile, "new-key", "n", "", "Path to file containing the new master key (base64 encoded).")
}
var masterKeyCmd = &cobra.Command{
Use: "master-key",
Short: "Master encryption key management",
Long: "Manage master encryption keys for file encryption. Use subcommands to generate, get, or rotate keys.",
Run: func(cmd *cobra.Command, args []string) {
_ = cmd.Help()
},
}
var masterKeyGenerateCmd = &cobra.Command{
Use: "generate",
Short: "Generate a new master encryption key",
Long: "Generate a new random 32-byte (256-bit) master encryption key and output it in base64 format.",
Run: func(cmd *cobra.Command, args []string) {
// Generate 32-byte random key
key := make([]byte, 32)
if _, err := io.ReadFull(rand.Reader, key); err != nil {
fmt.Fprintf(os.Stderr, "Error: Failed to generate random key: %v\n", err)
os.Exit(1)
}
// Encode to base64
encodedKey := base64.StdEncoding.EncodeToString(key)
if outputToFile != "" {
// Write to file
if err := os.WriteFile(outputToFile, []byte(encodedKey), 0600); err != nil {
fmt.Fprintf(os.Stderr, "Error: Failed to write key to file: %v\n", err)
os.Exit(1)
}
fmt.Printf("Master key generated and saved to: %s\n", outputToFile)
} else {
// Output to stdout
fmt.Println(encodedKey)
}
},
}
var masterKeyGetCmd = &cobra.Command{
Use: "get",
Short: "Get the current master encryption key",
Long: "Retrieve and display the current master encryption key from the configured vault (setting, env, or file).",
Run: func(cmd *cobra.Command, args []string) {
ctx := context.Background()
dep := dependency.NewDependency(
dependency.WithConfigPath(confPath),
)
logger := dep.Logger()
// Get the master key vault
vault := encrypt.NewMasterEncryptKeyVault(ctx, dep.SettingProvider())
// Retrieve the master key
key, err := vault.GetMasterKey(ctx)
if err != nil {
logger.Error("Failed to get master key: %s", err)
os.Exit(1)
}
// Encode to base64 and display
encodedKey := base64.StdEncoding.EncodeToString(key)
fmt.Println("")
fmt.Println(encodedKey)
},
}
var masterKeyRotateCmd = &cobra.Command{
Use: "rotate",
Short: "Rotate the master encryption key",
Long: `Rotate the master encryption key by re-encrypting all encrypted file keys with a new master key.
This operation:
1. Retrieves the current master key
2. Loads a new master key from file
3. Re-encrypts all file encryption keys with the new master key
4. Updates the master key in the settings database
Warning: This is a critical operation. Make sure to backup your database before proceeding.`,
Run: func(cmd *cobra.Command, args []string) {
ctx := context.Background()
dep := dependency.NewDependency(
dependency.WithConfigPath(confPath),
)
logger := dep.Logger()
logger.Info("Starting master key rotation...")
// Get the old master key
vault := encrypt.NewMasterEncryptKeyVault(ctx, dep.SettingProvider())
oldMasterKey, err := vault.GetMasterKey(ctx)
if err != nil {
logger.Error("Failed to get current master key: %s", err)
os.Exit(1)
}
logger.Info("Retrieved current master key")
// Get or generate the new master key
var newMasterKey []byte
// Load from file
keyData, err := os.ReadFile(newMasterKeyFile)
if err != nil {
logger.Error("Failed to read new master key file: %s", err)
os.Exit(1)
}
newMasterKey, err = base64.StdEncoding.DecodeString(string(keyData))
if err != nil {
logger.Error("Failed to decode new master key: %s", err)
os.Exit(1)
}
if len(newMasterKey) != 32 {
logger.Error("Invalid new master key: must be 32 bytes (256 bits), got %d bytes", len(newMasterKey))
os.Exit(1)
}
logger.Info("Loaded new master key from file: %s", newMasterKeyFile)
// Query all entities with encryption metadata
db := dep.DBClient()
entities, err := db.Entity.Query().
Where(entity.Not(entity.PropsIsNil())).
All(ctx)
if err != nil {
logger.Error("Failed to query entities: %s", err)
os.Exit(1)
}
logger.Info("Found %d entities to check for encryption", len(entities))
// Re-encrypt each entity's encryption key
encryptedCount := 0
for _, ent := range entities {
if ent.Props == nil || ent.Props.EncryptMetadata == nil {
continue
}
encMeta := ent.Props.EncryptMetadata
// Decrypt the file key with old master key
decryptedFileKey, err := encrypt.DecryptWithMasterKey(oldMasterKey, encMeta.Key)
if err != nil {
logger.Error("Failed to decrypt key for entity %d: %s", ent.ID, err)
os.Exit(1)
}
// Re-encrypt the file key with new master key
newEncryptedKey, err := encrypt.EncryptWithMasterKey(newMasterKey, decryptedFileKey)
if err != nil {
logger.Error("Failed to re-encrypt key for entity %d: %s", ent.ID, err)
os.Exit(1)
}
// Update the entity
newProps := *ent.Props
newProps.EncryptMetadata = &types.EncryptMetadata{
Algorithm: encMeta.Algorithm,
Key: newEncryptedKey,
KeyPlainText: nil, // Don't store plaintext
IV: encMeta.IV,
}
err = db.Entity.UpdateOne(ent).
SetProps(&newProps).
Exec(ctx)
if err != nil {
logger.Error("Failed to update entity %d: %s", ent.ID, err)
os.Exit(1)
}
encryptedCount++
}
logger.Info("Re-encrypted %d file keys", encryptedCount)
// Update the master key in settings
keyStore := dep.SettingProvider().MasterEncryptKeyVault(ctx)
if keyStore == setting.MasterEncryptKeyVaultTypeSetting {
encodedNewKey := base64.StdEncoding.EncodeToString(newMasterKey)
err = dep.SettingClient().Set(ctx, map[string]string{
"encrypt_master_key": encodedNewKey,
})
if err != nil {
logger.Error("Failed to update master key in settings: %s", err)
logger.Error("WARNING: File keys have been re-encrypted but master key update failed!")
logger.Error("Please manually update the encrypt_master_key setting.")
os.Exit(1)
}
} else {
logger.Info("Current master key is stored in %q", keyStore)
if keyStore == setting.MasterEncryptKeyVaultTypeEnv {
logger.Info("Please update the new master encryption key in your \"CR_ENCRYPT_MASTER_KEY\" environment variable.")
} else if keyStore == setting.MasterEncryptKeyVaultTypeFile {
logger.Info("Please update the new master encryption key in your key file: %q", dep.SettingProvider().MasterEncryptKeyFile(ctx))
}
logger.Info("Last step: Please manually update the new master encryption key in your ENV or key file.")
}
logger.Info("Master key rotation completed successfully")
},
}

View File

@ -2,14 +2,16 @@ package cmd
import (
"fmt"
"os"
"github.com/cloudreve/Cloudreve/v4/pkg/util"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"os"
)
var (
confPath string
confPath string
licenseKey string
)
func init() {

View File

@ -12,10 +12,6 @@ import (
"github.com/spf13/cobra"
)
var (
licenseKey string
)
func init() {
rootCmd.AddCommand(serverCmd)
serverCmd.PersistentFlags().StringVarP(&licenseKey, "license-key", "l", "", "License key of your Cloudreve Pro")
@ -29,7 +25,6 @@ var serverCmd = &cobra.Command{
dependency.WithConfigPath(confPath),
dependency.WithProFlag(constants.IsProBool),
dependency.WithRequiredDbVersion(constants.BackendVersion),
dependency.WithLicenseKey(licenseKey),
)
server := application.NewServer(dep)
logger := dep.Logger()

View File

@ -19,6 +19,7 @@ import (
"github.com/cloudreve/Cloudreve/v4/ent/directlink"
"github.com/cloudreve/Cloudreve/v4/ent/entity"
"github.com/cloudreve/Cloudreve/v4/ent/file"
"github.com/cloudreve/Cloudreve/v4/ent/fsevent"
"github.com/cloudreve/Cloudreve/v4/ent/group"
"github.com/cloudreve/Cloudreve/v4/ent/metadata"
"github.com/cloudreve/Cloudreve/v4/ent/node"
@ -45,6 +46,8 @@ type Client struct {
Entity *EntityClient
// File is the client for interacting with the File builders.
File *FileClient
// FsEvent is the client for interacting with the FsEvent builders.
FsEvent *FsEventClient
// Group is the client for interacting with the Group builders.
Group *GroupClient
// Metadata is the client for interacting with the Metadata builders.
@ -78,6 +81,7 @@ func (c *Client) init() {
c.DirectLink = NewDirectLinkClient(c.config)
c.Entity = NewEntityClient(c.config)
c.File = NewFileClient(c.config)
c.FsEvent = NewFsEventClient(c.config)
c.Group = NewGroupClient(c.config)
c.Metadata = NewMetadataClient(c.config)
c.Node = NewNodeClient(c.config)
@ -183,6 +187,7 @@ func (c *Client) Tx(ctx context.Context) (*Tx, error) {
DirectLink: NewDirectLinkClient(cfg),
Entity: NewEntityClient(cfg),
File: NewFileClient(cfg),
FsEvent: NewFsEventClient(cfg),
Group: NewGroupClient(cfg),
Metadata: NewMetadataClient(cfg),
Node: NewNodeClient(cfg),
@ -215,6 +220,7 @@ func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error)
DirectLink: NewDirectLinkClient(cfg),
Entity: NewEntityClient(cfg),
File: NewFileClient(cfg),
FsEvent: NewFsEventClient(cfg),
Group: NewGroupClient(cfg),
Metadata: NewMetadataClient(cfg),
Node: NewNodeClient(cfg),
@ -253,8 +259,8 @@ func (c *Client) Close() error {
// In order to add hooks to a specific client, call: `client.Node.Use(...)`.
func (c *Client) Use(hooks ...Hook) {
for _, n := range []interface{ Use(...Hook) }{
c.DavAccount, c.DirectLink, c.Entity, c.File, c.Group, c.Metadata, c.Node,
c.Passkey, c.Setting, c.Share, c.StoragePolicy, c.Task, c.User,
c.DavAccount, c.DirectLink, c.Entity, c.File, c.FsEvent, c.Group, c.Metadata,
c.Node, c.Passkey, c.Setting, c.Share, c.StoragePolicy, c.Task, c.User,
} {
n.Use(hooks...)
}
@ -264,8 +270,8 @@ func (c *Client) Use(hooks ...Hook) {
// In order to add interceptors to a specific client, call: `client.Node.Intercept(...)`.
func (c *Client) Intercept(interceptors ...Interceptor) {
for _, n := range []interface{ Intercept(...Interceptor) }{
c.DavAccount, c.DirectLink, c.Entity, c.File, c.Group, c.Metadata, c.Node,
c.Passkey, c.Setting, c.Share, c.StoragePolicy, c.Task, c.User,
c.DavAccount, c.DirectLink, c.Entity, c.File, c.FsEvent, c.Group, c.Metadata,
c.Node, c.Passkey, c.Setting, c.Share, c.StoragePolicy, c.Task, c.User,
} {
n.Intercept(interceptors...)
}
@ -282,6 +288,8 @@ func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) {
return c.Entity.mutate(ctx, m)
case *FileMutation:
return c.File.mutate(ctx, m)
case *FsEventMutation:
return c.FsEvent.mutate(ctx, m)
case *GroupMutation:
return c.Group.mutate(ctx, m)
case *MetadataMutation:
@ -1052,6 +1060,157 @@ func (c *FileClient) mutate(ctx context.Context, m *FileMutation) (Value, error)
}
}
// FsEventClient is a client for the FsEvent schema.
type FsEventClient struct {
config
}
// NewFsEventClient returns a client for the FsEvent from the given config.
func NewFsEventClient(c config) *FsEventClient {
return &FsEventClient{config: c}
}
// Use adds a list of mutation hooks to the hooks stack.
// A call to `Use(f, g, h)` equals to `fsevent.Hooks(f(g(h())))`.
func (c *FsEventClient) Use(hooks ...Hook) {
c.hooks.FsEvent = append(c.hooks.FsEvent, hooks...)
}
// Intercept adds a list of query interceptors to the interceptors stack.
// A call to `Intercept(f, g, h)` equals to `fsevent.Intercept(f(g(h())))`.
func (c *FsEventClient) Intercept(interceptors ...Interceptor) {
c.inters.FsEvent = append(c.inters.FsEvent, interceptors...)
}
// Create returns a builder for creating a FsEvent entity.
func (c *FsEventClient) Create() *FsEventCreate {
mutation := newFsEventMutation(c.config, OpCreate)
return &FsEventCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// CreateBulk returns a builder for creating a bulk of FsEvent entities.
func (c *FsEventClient) CreateBulk(builders ...*FsEventCreate) *FsEventCreateBulk {
return &FsEventCreateBulk{config: c.config, builders: builders}
}
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
// a builder and applies setFunc on it.
func (c *FsEventClient) MapCreateBulk(slice any, setFunc func(*FsEventCreate, int)) *FsEventCreateBulk {
rv := reflect.ValueOf(slice)
if rv.Kind() != reflect.Slice {
return &FsEventCreateBulk{err: fmt.Errorf("calling to FsEventClient.MapCreateBulk with wrong type %T, need slice", slice)}
}
builders := make([]*FsEventCreate, rv.Len())
for i := 0; i < rv.Len(); i++ {
builders[i] = c.Create()
setFunc(builders[i], i)
}
return &FsEventCreateBulk{config: c.config, builders: builders}
}
// Update returns an update builder for FsEvent.
func (c *FsEventClient) Update() *FsEventUpdate {
mutation := newFsEventMutation(c.config, OpUpdate)
return &FsEventUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOne returns an update builder for the given entity.
func (c *FsEventClient) UpdateOne(fe *FsEvent) *FsEventUpdateOne {
mutation := newFsEventMutation(c.config, OpUpdateOne, withFsEvent(fe))
return &FsEventUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOneID returns an update builder for the given id.
func (c *FsEventClient) UpdateOneID(id int) *FsEventUpdateOne {
mutation := newFsEventMutation(c.config, OpUpdateOne, withFsEventID(id))
return &FsEventUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// Delete returns a delete builder for FsEvent.
func (c *FsEventClient) Delete() *FsEventDelete {
mutation := newFsEventMutation(c.config, OpDelete)
return &FsEventDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// DeleteOne returns a builder for deleting the given entity.
func (c *FsEventClient) DeleteOne(fe *FsEvent) *FsEventDeleteOne {
return c.DeleteOneID(fe.ID)
}
// DeleteOneID returns a builder for deleting the given entity by its id.
func (c *FsEventClient) DeleteOneID(id int) *FsEventDeleteOne {
builder := c.Delete().Where(fsevent.ID(id))
builder.mutation.id = &id
builder.mutation.op = OpDeleteOne
return &FsEventDeleteOne{builder}
}
// Query returns a query builder for FsEvent.
func (c *FsEventClient) Query() *FsEventQuery {
return &FsEventQuery{
config: c.config,
ctx: &QueryContext{Type: TypeFsEvent},
inters: c.Interceptors(),
}
}
// Get returns a FsEvent entity by its id.
func (c *FsEventClient) Get(ctx context.Context, id int) (*FsEvent, error) {
return c.Query().Where(fsevent.ID(id)).Only(ctx)
}
// GetX is like Get, but panics if an error occurs.
func (c *FsEventClient) GetX(ctx context.Context, id int) *FsEvent {
obj, err := c.Get(ctx, id)
if err != nil {
panic(err)
}
return obj
}
// QueryUser queries the user edge of a FsEvent.
func (c *FsEventClient) QueryUser(fe *FsEvent) *UserQuery {
query := (&UserClient{config: c.config}).Query()
query.path = func(context.Context) (fromV *sql.Selector, _ error) {
id := fe.ID
step := sqlgraph.NewStep(
sqlgraph.From(fsevent.Table, fsevent.FieldID, id),
sqlgraph.To(user.Table, user.FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, fsevent.UserTable, fsevent.UserColumn),
)
fromV = sqlgraph.Neighbors(fe.driver.Dialect(), step)
return fromV, nil
}
return query
}
// Hooks returns the client hooks.
func (c *FsEventClient) Hooks() []Hook {
hooks := c.hooks.FsEvent
return append(hooks[:len(hooks):len(hooks)], fsevent.Hooks[:]...)
}
// Interceptors returns the client interceptors.
func (c *FsEventClient) Interceptors() []Interceptor {
inters := c.inters.FsEvent
return append(inters[:len(inters):len(inters)], fsevent.Interceptors[:]...)
}
func (c *FsEventClient) mutate(ctx context.Context, m *FsEventMutation) (Value, error) {
switch m.Op() {
case OpCreate:
return (&FsEventCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
case OpUpdate:
return (&FsEventUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
case OpUpdateOne:
return (&FsEventUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
case OpDelete, OpDeleteOne:
return (&FsEventDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx)
default:
return nil, fmt.Errorf("ent: unknown FsEvent mutation op: %q", m.Op())
}
}
// GroupClient is a client for the Group schema.
type GroupClient struct {
config
@ -2528,6 +2687,22 @@ func (c *UserClient) QueryTasks(u *User) *TaskQuery {
return query
}
// QueryFsevents queries the fsevents edge of a User.
func (c *UserClient) QueryFsevents(u *User) *FsEventQuery {
query := (&FsEventClient{config: c.config}).Query()
query.path = func(context.Context) (fromV *sql.Selector, _ error) {
id := u.ID
step := sqlgraph.NewStep(
sqlgraph.From(user.Table, user.FieldID, id),
sqlgraph.To(fsevent.Table, fsevent.FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, user.FseventsTable, user.FseventsColumn),
)
fromV = sqlgraph.Neighbors(u.driver.Dialect(), step)
return fromV, nil
}
return query
}
// QueryEntities queries the entities edge of a User.
func (c *UserClient) QueryEntities(u *User) *EntityQuery {
query := (&EntityClient{config: c.config}).Query()
@ -2574,12 +2749,12 @@ func (c *UserClient) mutate(ctx context.Context, m *UserMutation) (Value, error)
// hooks and interceptors per client, for fast access.
type (
hooks struct {
DavAccount, DirectLink, Entity, File, Group, Metadata, Node, Passkey, Setting,
Share, StoragePolicy, Task, User []ent.Hook
DavAccount, DirectLink, Entity, File, FsEvent, Group, Metadata, Node, Passkey,
Setting, Share, StoragePolicy, Task, User []ent.Hook
}
inters struct {
DavAccount, DirectLink, Entity, File, Group, Metadata, Node, Passkey, Setting,
Share, StoragePolicy, Task, User []ent.Interceptor
DavAccount, DirectLink, Entity, File, FsEvent, Group, Metadata, Node, Passkey,
Setting, Share, StoragePolicy, Task, User []ent.Interceptor
}
)

View File

@ -16,6 +16,7 @@ import (
"github.com/cloudreve/Cloudreve/v4/ent/directlink"
"github.com/cloudreve/Cloudreve/v4/ent/entity"
"github.com/cloudreve/Cloudreve/v4/ent/file"
"github.com/cloudreve/Cloudreve/v4/ent/fsevent"
"github.com/cloudreve/Cloudreve/v4/ent/group"
"github.com/cloudreve/Cloudreve/v4/ent/metadata"
"github.com/cloudreve/Cloudreve/v4/ent/node"
@ -89,6 +90,7 @@ func checkColumn(table, column string) error {
directlink.Table: directlink.ValidColumn,
entity.Table: entity.ValidColumn,
file.Table: file.ValidColumn,
fsevent.Table: fsevent.ValidColumn,
group.Table: group.ValidColumn,
metadata.Table: metadata.ValidColumn,
node.Table: node.ValidColumn,

View File

@ -42,8 +42,8 @@ type Entity struct {
CreatedBy int `json:"created_by,omitempty"`
// UploadSessionID holds the value of the "upload_session_id" field.
UploadSessionID *uuid.UUID `json:"upload_session_id,omitempty"`
// RecycleOptions holds the value of the "recycle_options" field.
RecycleOptions *types.EntityRecycleOption `json:"recycle_options,omitempty"`
// Props holds the value of the "props" field.
Props *types.EntityProps `json:"props,omitempty"`
// Edges holds the relations/edges for other nodes in the graph.
// The values are being populated by the EntityQuery when eager-loading is set.
Edges EntityEdges `json:"edges"`
@ -105,7 +105,7 @@ func (*Entity) scanValues(columns []string) ([]any, error) {
switch columns[i] {
case entity.FieldUploadSessionID:
values[i] = &sql.NullScanner{S: new(uuid.UUID)}
case entity.FieldRecycleOptions:
case entity.FieldProps:
values[i] = new([]byte)
case entity.FieldID, entity.FieldType, entity.FieldSize, entity.FieldReferenceCount, entity.FieldStoragePolicyEntities, entity.FieldCreatedBy:
values[i] = new(sql.NullInt64)
@ -196,12 +196,12 @@ func (e *Entity) assignValues(columns []string, values []any) error {
e.UploadSessionID = new(uuid.UUID)
*e.UploadSessionID = *value.S.(*uuid.UUID)
}
case entity.FieldRecycleOptions:
case entity.FieldProps:
if value, ok := values[i].(*[]byte); !ok {
return fmt.Errorf("unexpected type %T for field recycle_options", values[i])
return fmt.Errorf("unexpected type %T for field props", values[i])
} else if value != nil && len(*value) > 0 {
if err := json.Unmarshal(*value, &e.RecycleOptions); err != nil {
return fmt.Errorf("unmarshal field recycle_options: %w", err)
if err := json.Unmarshal(*value, &e.Props); err != nil {
return fmt.Errorf("unmarshal field props: %w", err)
}
}
default:
@ -289,8 +289,8 @@ func (e *Entity) String() string {
builder.WriteString(fmt.Sprintf("%v", *v))
}
builder.WriteString(", ")
builder.WriteString("recycle_options=")
builder.WriteString(fmt.Sprintf("%v", e.RecycleOptions))
builder.WriteString("props=")
builder.WriteString(fmt.Sprintf("%v", e.Props))
builder.WriteByte(')')
return builder.String()
}

View File

@ -35,8 +35,8 @@ const (
FieldCreatedBy = "created_by"
// FieldUploadSessionID holds the string denoting the upload_session_id field in the database.
FieldUploadSessionID = "upload_session_id"
// FieldRecycleOptions holds the string denoting the recycle_options field in the database.
FieldRecycleOptions = "recycle_options"
// FieldProps holds the string denoting the props field in the database.
FieldProps = "recycle_options"
// EdgeFile holds the string denoting the file edge name in mutations.
EdgeFile = "file"
// EdgeUser holds the string denoting the user edge name in mutations.
@ -79,7 +79,7 @@ var Columns = []string{
FieldStoragePolicyEntities,
FieldCreatedBy,
FieldUploadSessionID,
FieldRecycleOptions,
FieldProps,
}
var (

View File

@ -521,14 +521,14 @@ func UploadSessionIDNotNil() predicate.Entity {
return predicate.Entity(sql.FieldNotNull(FieldUploadSessionID))
}
// RecycleOptionsIsNil applies the IsNil predicate on the "recycle_options" field.
func RecycleOptionsIsNil() predicate.Entity {
return predicate.Entity(sql.FieldIsNull(FieldRecycleOptions))
// PropsIsNil applies the IsNil predicate on the "props" field.
func PropsIsNil() predicate.Entity {
return predicate.Entity(sql.FieldIsNull(FieldProps))
}
// RecycleOptionsNotNil applies the NotNil predicate on the "recycle_options" field.
func RecycleOptionsNotNil() predicate.Entity {
return predicate.Entity(sql.FieldNotNull(FieldRecycleOptions))
// PropsNotNil applies the NotNil predicate on the "props" field.
func PropsNotNil() predicate.Entity {
return predicate.Entity(sql.FieldNotNull(FieldProps))
}
// HasFile applies the HasEdge predicate on the "file" edge.

View File

@ -135,9 +135,9 @@ func (ec *EntityCreate) SetNillableUploadSessionID(u *uuid.UUID) *EntityCreate {
return ec
}
// SetRecycleOptions sets the "recycle_options" field.
func (ec *EntityCreate) SetRecycleOptions(tro *types.EntityRecycleOption) *EntityCreate {
ec.mutation.SetRecycleOptions(tro)
// SetProps sets the "props" field.
func (ec *EntityCreate) SetProps(tp *types.EntityProps) *EntityCreate {
ec.mutation.SetProps(tp)
return ec
}
@ -336,9 +336,9 @@ func (ec *EntityCreate) createSpec() (*Entity, *sqlgraph.CreateSpec) {
_spec.SetField(entity.FieldUploadSessionID, field.TypeUUID, value)
_node.UploadSessionID = &value
}
if value, ok := ec.mutation.RecycleOptions(); ok {
_spec.SetField(entity.FieldRecycleOptions, field.TypeJSON, value)
_node.RecycleOptions = value
if value, ok := ec.mutation.Props(); ok {
_spec.SetField(entity.FieldProps, field.TypeJSON, value)
_node.Props = value
}
if nodes := ec.mutation.FileIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
@ -586,21 +586,21 @@ func (u *EntityUpsert) ClearUploadSessionID() *EntityUpsert {
return u
}
// SetRecycleOptions sets the "recycle_options" field.
func (u *EntityUpsert) SetRecycleOptions(v *types.EntityRecycleOption) *EntityUpsert {
u.Set(entity.FieldRecycleOptions, v)
// SetProps sets the "props" field.
func (u *EntityUpsert) SetProps(v *types.EntityProps) *EntityUpsert {
u.Set(entity.FieldProps, v)
return u
}
// UpdateRecycleOptions sets the "recycle_options" field to the value that was provided on create.
func (u *EntityUpsert) UpdateRecycleOptions() *EntityUpsert {
u.SetExcluded(entity.FieldRecycleOptions)
// UpdateProps sets the "props" field to the value that was provided on create.
func (u *EntityUpsert) UpdateProps() *EntityUpsert {
u.SetExcluded(entity.FieldProps)
return u
}
// ClearRecycleOptions clears the value of the "recycle_options" field.
func (u *EntityUpsert) ClearRecycleOptions() *EntityUpsert {
u.SetNull(entity.FieldRecycleOptions)
// ClearProps clears the value of the "props" field.
func (u *EntityUpsert) ClearProps() *EntityUpsert {
u.SetNull(entity.FieldProps)
return u
}
@ -817,24 +817,24 @@ func (u *EntityUpsertOne) ClearUploadSessionID() *EntityUpsertOne {
})
}
// SetRecycleOptions sets the "recycle_options" field.
func (u *EntityUpsertOne) SetRecycleOptions(v *types.EntityRecycleOption) *EntityUpsertOne {
// SetProps sets the "props" field.
func (u *EntityUpsertOne) SetProps(v *types.EntityProps) *EntityUpsertOne {
return u.Update(func(s *EntityUpsert) {
s.SetRecycleOptions(v)
s.SetProps(v)
})
}
// UpdateRecycleOptions sets the "recycle_options" field to the value that was provided on create.
func (u *EntityUpsertOne) UpdateRecycleOptions() *EntityUpsertOne {
// UpdateProps sets the "props" field to the value that was provided on create.
func (u *EntityUpsertOne) UpdateProps() *EntityUpsertOne {
return u.Update(func(s *EntityUpsert) {
s.UpdateRecycleOptions()
s.UpdateProps()
})
}
// ClearRecycleOptions clears the value of the "recycle_options" field.
func (u *EntityUpsertOne) ClearRecycleOptions() *EntityUpsertOne {
// ClearProps clears the value of the "props" field.
func (u *EntityUpsertOne) ClearProps() *EntityUpsertOne {
return u.Update(func(s *EntityUpsert) {
s.ClearRecycleOptions()
s.ClearProps()
})
}
@ -1222,24 +1222,24 @@ func (u *EntityUpsertBulk) ClearUploadSessionID() *EntityUpsertBulk {
})
}
// SetRecycleOptions sets the "recycle_options" field.
func (u *EntityUpsertBulk) SetRecycleOptions(v *types.EntityRecycleOption) *EntityUpsertBulk {
// SetProps sets the "props" field.
func (u *EntityUpsertBulk) SetProps(v *types.EntityProps) *EntityUpsertBulk {
return u.Update(func(s *EntityUpsert) {
s.SetRecycleOptions(v)
s.SetProps(v)
})
}
// UpdateRecycleOptions sets the "recycle_options" field to the value that was provided on create.
func (u *EntityUpsertBulk) UpdateRecycleOptions() *EntityUpsertBulk {
// UpdateProps sets the "props" field to the value that was provided on create.
func (u *EntityUpsertBulk) UpdateProps() *EntityUpsertBulk {
return u.Update(func(s *EntityUpsert) {
s.UpdateRecycleOptions()
s.UpdateProps()
})
}
// ClearRecycleOptions clears the value of the "recycle_options" field.
func (u *EntityUpsertBulk) ClearRecycleOptions() *EntityUpsertBulk {
// ClearProps clears the value of the "props" field.
func (u *EntityUpsertBulk) ClearProps() *EntityUpsertBulk {
return u.Update(func(s *EntityUpsert) {
s.ClearRecycleOptions()
s.ClearProps()
})
}

View File

@ -190,15 +190,15 @@ func (eu *EntityUpdate) ClearUploadSessionID() *EntityUpdate {
return eu
}
// SetRecycleOptions sets the "recycle_options" field.
func (eu *EntityUpdate) SetRecycleOptions(tro *types.EntityRecycleOption) *EntityUpdate {
eu.mutation.SetRecycleOptions(tro)
// SetProps sets the "props" field.
func (eu *EntityUpdate) SetProps(tp *types.EntityProps) *EntityUpdate {
eu.mutation.SetProps(tp)
return eu
}
// ClearRecycleOptions clears the value of the "recycle_options" field.
func (eu *EntityUpdate) ClearRecycleOptions() *EntityUpdate {
eu.mutation.ClearRecycleOptions()
// ClearProps clears the value of the "props" field.
func (eu *EntityUpdate) ClearProps() *EntityUpdate {
eu.mutation.ClearProps()
return eu
}
@ -383,11 +383,11 @@ func (eu *EntityUpdate) sqlSave(ctx context.Context) (n int, err error) {
if eu.mutation.UploadSessionIDCleared() {
_spec.ClearField(entity.FieldUploadSessionID, field.TypeUUID)
}
if value, ok := eu.mutation.RecycleOptions(); ok {
_spec.SetField(entity.FieldRecycleOptions, field.TypeJSON, value)
if value, ok := eu.mutation.Props(); ok {
_spec.SetField(entity.FieldProps, field.TypeJSON, value)
}
if eu.mutation.RecycleOptionsCleared() {
_spec.ClearField(entity.FieldRecycleOptions, field.TypeJSON)
if eu.mutation.PropsCleared() {
_spec.ClearField(entity.FieldProps, field.TypeJSON)
}
if eu.mutation.FileCleared() {
edge := &sqlgraph.EdgeSpec{
@ -669,15 +669,15 @@ func (euo *EntityUpdateOne) ClearUploadSessionID() *EntityUpdateOne {
return euo
}
// SetRecycleOptions sets the "recycle_options" field.
func (euo *EntityUpdateOne) SetRecycleOptions(tro *types.EntityRecycleOption) *EntityUpdateOne {
euo.mutation.SetRecycleOptions(tro)
// SetProps sets the "props" field.
func (euo *EntityUpdateOne) SetProps(tp *types.EntityProps) *EntityUpdateOne {
euo.mutation.SetProps(tp)
return euo
}
// ClearRecycleOptions clears the value of the "recycle_options" field.
func (euo *EntityUpdateOne) ClearRecycleOptions() *EntityUpdateOne {
euo.mutation.ClearRecycleOptions()
// ClearProps clears the value of the "props" field.
func (euo *EntityUpdateOne) ClearProps() *EntityUpdateOne {
euo.mutation.ClearProps()
return euo
}
@ -892,11 +892,11 @@ func (euo *EntityUpdateOne) sqlSave(ctx context.Context) (_node *Entity, err err
if euo.mutation.UploadSessionIDCleared() {
_spec.ClearField(entity.FieldUploadSessionID, field.TypeUUID)
}
if value, ok := euo.mutation.RecycleOptions(); ok {
_spec.SetField(entity.FieldRecycleOptions, field.TypeJSON, value)
if value, ok := euo.mutation.Props(); ok {
_spec.SetField(entity.FieldProps, field.TypeJSON, value)
}
if euo.mutation.RecycleOptionsCleared() {
_spec.ClearField(entity.FieldRecycleOptions, field.TypeJSON)
if euo.mutation.PropsCleared() {
_spec.ClearField(entity.FieldProps, field.TypeJSON)
}
if euo.mutation.FileCleared() {
edge := &sqlgraph.EdgeSpec{

204
ent/fsevent.go Normal file
View File

@ -0,0 +1,204 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"fmt"
"strings"
"time"
"entgo.io/ent"
"entgo.io/ent/dialect/sql"
"github.com/cloudreve/Cloudreve/v4/ent/fsevent"
"github.com/cloudreve/Cloudreve/v4/ent/user"
"github.com/gofrs/uuid"
)
// FsEvent is the model entity for the FsEvent schema.
type FsEvent struct {
config `json:"-"`
// ID of the ent.
ID int `json:"id,omitempty"`
// CreatedAt holds the value of the "created_at" field.
CreatedAt time.Time `json:"created_at,omitempty"`
// UpdatedAt holds the value of the "updated_at" field.
UpdatedAt time.Time `json:"updated_at,omitempty"`
// DeletedAt holds the value of the "deleted_at" field.
DeletedAt *time.Time `json:"deleted_at,omitempty"`
// Event holds the value of the "event" field.
Event string `json:"event,omitempty"`
// Subscriber holds the value of the "subscriber" field.
Subscriber uuid.UUID `json:"subscriber,omitempty"`
// UserFsevent holds the value of the "user_fsevent" field.
UserFsevent int `json:"user_fsevent,omitempty"`
// Edges holds the relations/edges for other nodes in the graph.
// The values are being populated by the FsEventQuery when eager-loading is set.
Edges FsEventEdges `json:"edges"`
selectValues sql.SelectValues
}
// FsEventEdges holds the relations/edges for other nodes in the graph.
type FsEventEdges struct {
// User holds the value of the user edge.
User *User `json:"user,omitempty"`
// loadedTypes holds the information for reporting if a
// type was loaded (or requested) in eager-loading or not.
loadedTypes [1]bool
}
// UserOrErr returns the User value or an error if the edge
// was not loaded in eager-loading, or loaded but was not found.
func (e FsEventEdges) UserOrErr() (*User, error) {
if e.loadedTypes[0] {
if e.User == nil {
// Edge was loaded but was not found.
return nil, &NotFoundError{label: user.Label}
}
return e.User, nil
}
return nil, &NotLoadedError{edge: "user"}
}
// scanValues returns the types for scanning values from sql.Rows.
func (*FsEvent) scanValues(columns []string) ([]any, error) {
values := make([]any, len(columns))
for i := range columns {
switch columns[i] {
case fsevent.FieldID, fsevent.FieldUserFsevent:
values[i] = new(sql.NullInt64)
case fsevent.FieldEvent:
values[i] = new(sql.NullString)
case fsevent.FieldCreatedAt, fsevent.FieldUpdatedAt, fsevent.FieldDeletedAt:
values[i] = new(sql.NullTime)
case fsevent.FieldSubscriber:
values[i] = new(uuid.UUID)
default:
values[i] = new(sql.UnknownType)
}
}
return values, nil
}
// assignValues assigns the values that were returned from sql.Rows (after scanning)
// to the FsEvent fields.
func (fe *FsEvent) assignValues(columns []string, values []any) error {
if m, n := len(values), len(columns); m < n {
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
}
for i := range columns {
switch columns[i] {
case fsevent.FieldID:
value, ok := values[i].(*sql.NullInt64)
if !ok {
return fmt.Errorf("unexpected type %T for field id", value)
}
fe.ID = int(value.Int64)
case fsevent.FieldCreatedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field created_at", values[i])
} else if value.Valid {
fe.CreatedAt = value.Time
}
case fsevent.FieldUpdatedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field updated_at", values[i])
} else if value.Valid {
fe.UpdatedAt = value.Time
}
case fsevent.FieldDeletedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field deleted_at", values[i])
} else if value.Valid {
fe.DeletedAt = new(time.Time)
*fe.DeletedAt = value.Time
}
case fsevent.FieldEvent:
if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field event", values[i])
} else if value.Valid {
fe.Event = value.String
}
case fsevent.FieldSubscriber:
if value, ok := values[i].(*uuid.UUID); !ok {
return fmt.Errorf("unexpected type %T for field subscriber", values[i])
} else if value != nil {
fe.Subscriber = *value
}
case fsevent.FieldUserFsevent:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field user_fsevent", values[i])
} else if value.Valid {
fe.UserFsevent = int(value.Int64)
}
default:
fe.selectValues.Set(columns[i], values[i])
}
}
return nil
}
// Value returns the ent.Value that was dynamically selected and assigned to the FsEvent.
// This includes values selected through modifiers, order, etc.
func (fe *FsEvent) Value(name string) (ent.Value, error) {
return fe.selectValues.Get(name)
}
// QueryUser queries the "user" edge of the FsEvent entity.
func (fe *FsEvent) QueryUser() *UserQuery {
return NewFsEventClient(fe.config).QueryUser(fe)
}
// Update returns a builder for updating this FsEvent.
// Note that you need to call FsEvent.Unwrap() before calling this method if this FsEvent
// was returned from a transaction, and the transaction was committed or rolled back.
func (fe *FsEvent) Update() *FsEventUpdateOne {
return NewFsEventClient(fe.config).UpdateOne(fe)
}
// Unwrap unwraps the FsEvent entity that was returned from a transaction after it was closed,
// so that all future queries will be executed through the driver which created the transaction.
func (fe *FsEvent) Unwrap() *FsEvent {
_tx, ok := fe.config.driver.(*txDriver)
if !ok {
panic("ent: FsEvent is not a transactional entity")
}
fe.config.driver = _tx.drv
return fe
}
// String implements the fmt.Stringer.
func (fe *FsEvent) String() string {
var builder strings.Builder
builder.WriteString("FsEvent(")
builder.WriteString(fmt.Sprintf("id=%v, ", fe.ID))
builder.WriteString("created_at=")
builder.WriteString(fe.CreatedAt.Format(time.ANSIC))
builder.WriteString(", ")
builder.WriteString("updated_at=")
builder.WriteString(fe.UpdatedAt.Format(time.ANSIC))
builder.WriteString(", ")
if v := fe.DeletedAt; v != nil {
builder.WriteString("deleted_at=")
builder.WriteString(v.Format(time.ANSIC))
}
builder.WriteString(", ")
builder.WriteString("event=")
builder.WriteString(fe.Event)
builder.WriteString(", ")
builder.WriteString("subscriber=")
builder.WriteString(fmt.Sprintf("%v", fe.Subscriber))
builder.WriteString(", ")
builder.WriteString("user_fsevent=")
builder.WriteString(fmt.Sprintf("%v", fe.UserFsevent))
builder.WriteByte(')')
return builder.String()
}
// SetUser manually set the edge as loaded state.
func (e *FsEvent) SetUser(v *User) {
e.Edges.User = v
e.Edges.loadedTypes[0] = true
}
// FsEvents is a parsable slice of FsEvent.
type FsEvents []*FsEvent

130
ent/fsevent/fsevent.go Normal file
View File

@ -0,0 +1,130 @@
// Code generated by ent, DO NOT EDIT.
package fsevent
import (
"time"
"entgo.io/ent"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
)
const (
// Label holds the string label denoting the fsevent type in the database.
Label = "fs_event"
// FieldID holds the string denoting the id field in the database.
FieldID = "id"
// FieldCreatedAt holds the string denoting the created_at field in the database.
FieldCreatedAt = "created_at"
// FieldUpdatedAt holds the string denoting the updated_at field in the database.
FieldUpdatedAt = "updated_at"
// FieldDeletedAt holds the string denoting the deleted_at field in the database.
FieldDeletedAt = "deleted_at"
// FieldEvent holds the string denoting the event field in the database.
FieldEvent = "event"
// FieldSubscriber holds the string denoting the subscriber field in the database.
FieldSubscriber = "subscriber"
// FieldUserFsevent holds the string denoting the user_fsevent field in the database.
FieldUserFsevent = "user_fsevent"
// EdgeUser holds the string denoting the user edge name in mutations.
EdgeUser = "user"
// Table holds the table name of the fsevent in the database.
Table = "fs_events"
// UserTable is the table that holds the user relation/edge.
UserTable = "fs_events"
// UserInverseTable is the table name for the User entity.
// It exists in this package in order to avoid circular dependency with the "user" package.
UserInverseTable = "users"
// UserColumn is the table column denoting the user relation/edge.
UserColumn = "user_fsevent"
)
// Columns holds all SQL columns for fsevent fields.
var Columns = []string{
FieldID,
FieldCreatedAt,
FieldUpdatedAt,
FieldDeletedAt,
FieldEvent,
FieldSubscriber,
FieldUserFsevent,
}
// ValidColumn reports if the column name is valid (part of the table columns).
func ValidColumn(column string) bool {
for i := range Columns {
if column == Columns[i] {
return true
}
}
return false
}
// Note that the variables below are initialized by the runtime
// package on the initialization of the application. Therefore,
// it should be imported in the main as follows:
//
// import _ "github.com/cloudreve/Cloudreve/v4/ent/runtime"
var (
Hooks [1]ent.Hook
Interceptors [1]ent.Interceptor
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
DefaultCreatedAt func() time.Time
// DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
DefaultUpdatedAt func() time.Time
// UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
UpdateDefaultUpdatedAt func() time.Time
)
// OrderOption defines the ordering options for the FsEvent queries.
type OrderOption func(*sql.Selector)
// ByID orders the results by the id field.
func ByID(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldID, opts...).ToFunc()
}
// ByCreatedAt orders the results by the created_at field.
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
}
// ByUpdatedAt orders the results by the updated_at field.
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
}
// ByDeletedAt orders the results by the deleted_at field.
func ByDeletedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldDeletedAt, opts...).ToFunc()
}
// ByEvent orders the results by the event field.
func ByEvent(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldEvent, opts...).ToFunc()
}
// BySubscriber orders the results by the subscriber field.
func BySubscriber(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldSubscriber, opts...).ToFunc()
}
// ByUserFsevent orders the results by the user_fsevent field.
func ByUserFsevent(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldUserFsevent, opts...).ToFunc()
}
// ByUserField orders the results by user field.
func ByUserField(field string, opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newUserStep(), sql.OrderByField(field, opts...))
}
}
func newUserStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(UserInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn),
)
}

390
ent/fsevent/where.go Normal file
View File

@ -0,0 +1,390 @@
// Code generated by ent, DO NOT EDIT.
package fsevent
import (
"time"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
"github.com/gofrs/uuid"
)
// ID filters vertices based on their ID field.
func ID(id int) predicate.FsEvent {
return predicate.FsEvent(sql.FieldEQ(FieldID, id))
}
// IDEQ applies the EQ predicate on the ID field.
func IDEQ(id int) predicate.FsEvent {
return predicate.FsEvent(sql.FieldEQ(FieldID, id))
}
// IDNEQ applies the NEQ predicate on the ID field.
func IDNEQ(id int) predicate.FsEvent {
return predicate.FsEvent(sql.FieldNEQ(FieldID, id))
}
// IDIn applies the In predicate on the ID field.
func IDIn(ids ...int) predicate.FsEvent {
return predicate.FsEvent(sql.FieldIn(FieldID, ids...))
}
// IDNotIn applies the NotIn predicate on the ID field.
func IDNotIn(ids ...int) predicate.FsEvent {
return predicate.FsEvent(sql.FieldNotIn(FieldID, ids...))
}
// IDGT applies the GT predicate on the ID field.
func IDGT(id int) predicate.FsEvent {
return predicate.FsEvent(sql.FieldGT(FieldID, id))
}
// IDGTE applies the GTE predicate on the ID field.
func IDGTE(id int) predicate.FsEvent {
return predicate.FsEvent(sql.FieldGTE(FieldID, id))
}
// IDLT applies the LT predicate on the ID field.
func IDLT(id int) predicate.FsEvent {
return predicate.FsEvent(sql.FieldLT(FieldID, id))
}
// IDLTE applies the LTE predicate on the ID field.
func IDLTE(id int) predicate.FsEvent {
return predicate.FsEvent(sql.FieldLTE(FieldID, id))
}
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
func CreatedAt(v time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldEQ(FieldCreatedAt, v))
}
// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
func UpdatedAt(v time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldEQ(FieldUpdatedAt, v))
}
// DeletedAt applies equality check predicate on the "deleted_at" field. It's identical to DeletedAtEQ.
func DeletedAt(v time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldEQ(FieldDeletedAt, v))
}
// Event applies equality check predicate on the "event" field. It's identical to EventEQ.
func Event(v string) predicate.FsEvent {
return predicate.FsEvent(sql.FieldEQ(FieldEvent, v))
}
// Subscriber applies equality check predicate on the "subscriber" field. It's identical to SubscriberEQ.
func Subscriber(v uuid.UUID) predicate.FsEvent {
return predicate.FsEvent(sql.FieldEQ(FieldSubscriber, v))
}
// UserFsevent applies equality check predicate on the "user_fsevent" field. It's identical to UserFseventEQ.
func UserFsevent(v int) predicate.FsEvent {
return predicate.FsEvent(sql.FieldEQ(FieldUserFsevent, v))
}
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
func CreatedAtEQ(v time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldEQ(FieldCreatedAt, v))
}
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
func CreatedAtNEQ(v time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldNEQ(FieldCreatedAt, v))
}
// CreatedAtIn applies the In predicate on the "created_at" field.
func CreatedAtIn(vs ...time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldIn(FieldCreatedAt, vs...))
}
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
func CreatedAtNotIn(vs ...time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldNotIn(FieldCreatedAt, vs...))
}
// CreatedAtGT applies the GT predicate on the "created_at" field.
func CreatedAtGT(v time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldGT(FieldCreatedAt, v))
}
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
func CreatedAtGTE(v time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldGTE(FieldCreatedAt, v))
}
// CreatedAtLT applies the LT predicate on the "created_at" field.
func CreatedAtLT(v time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldLT(FieldCreatedAt, v))
}
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
func CreatedAtLTE(v time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldLTE(FieldCreatedAt, v))
}
// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
func UpdatedAtEQ(v time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldEQ(FieldUpdatedAt, v))
}
// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
func UpdatedAtNEQ(v time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldNEQ(FieldUpdatedAt, v))
}
// UpdatedAtIn applies the In predicate on the "updated_at" field.
func UpdatedAtIn(vs ...time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldIn(FieldUpdatedAt, vs...))
}
// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
func UpdatedAtNotIn(vs ...time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldNotIn(FieldUpdatedAt, vs...))
}
// UpdatedAtGT applies the GT predicate on the "updated_at" field.
func UpdatedAtGT(v time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldGT(FieldUpdatedAt, v))
}
// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
func UpdatedAtGTE(v time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldGTE(FieldUpdatedAt, v))
}
// UpdatedAtLT applies the LT predicate on the "updated_at" field.
func UpdatedAtLT(v time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldLT(FieldUpdatedAt, v))
}
// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
func UpdatedAtLTE(v time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldLTE(FieldUpdatedAt, v))
}
// DeletedAtEQ applies the EQ predicate on the "deleted_at" field.
func DeletedAtEQ(v time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldEQ(FieldDeletedAt, v))
}
// DeletedAtNEQ applies the NEQ predicate on the "deleted_at" field.
func DeletedAtNEQ(v time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldNEQ(FieldDeletedAt, v))
}
// DeletedAtIn applies the In predicate on the "deleted_at" field.
func DeletedAtIn(vs ...time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldIn(FieldDeletedAt, vs...))
}
// DeletedAtNotIn applies the NotIn predicate on the "deleted_at" field.
func DeletedAtNotIn(vs ...time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldNotIn(FieldDeletedAt, vs...))
}
// DeletedAtGT applies the GT predicate on the "deleted_at" field.
func DeletedAtGT(v time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldGT(FieldDeletedAt, v))
}
// DeletedAtGTE applies the GTE predicate on the "deleted_at" field.
func DeletedAtGTE(v time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldGTE(FieldDeletedAt, v))
}
// DeletedAtLT applies the LT predicate on the "deleted_at" field.
func DeletedAtLT(v time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldLT(FieldDeletedAt, v))
}
// DeletedAtLTE applies the LTE predicate on the "deleted_at" field.
func DeletedAtLTE(v time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldLTE(FieldDeletedAt, v))
}
// DeletedAtIsNil applies the IsNil predicate on the "deleted_at" field.
func DeletedAtIsNil() predicate.FsEvent {
return predicate.FsEvent(sql.FieldIsNull(FieldDeletedAt))
}
// DeletedAtNotNil applies the NotNil predicate on the "deleted_at" field.
func DeletedAtNotNil() predicate.FsEvent {
return predicate.FsEvent(sql.FieldNotNull(FieldDeletedAt))
}
// EventEQ applies the EQ predicate on the "event" field.
func EventEQ(v string) predicate.FsEvent {
return predicate.FsEvent(sql.FieldEQ(FieldEvent, v))
}
// EventNEQ applies the NEQ predicate on the "event" field.
func EventNEQ(v string) predicate.FsEvent {
return predicate.FsEvent(sql.FieldNEQ(FieldEvent, v))
}
// EventIn applies the In predicate on the "event" field.
func EventIn(vs ...string) predicate.FsEvent {
return predicate.FsEvent(sql.FieldIn(FieldEvent, vs...))
}
// EventNotIn applies the NotIn predicate on the "event" field.
func EventNotIn(vs ...string) predicate.FsEvent {
return predicate.FsEvent(sql.FieldNotIn(FieldEvent, vs...))
}
// EventGT applies the GT predicate on the "event" field.
func EventGT(v string) predicate.FsEvent {
return predicate.FsEvent(sql.FieldGT(FieldEvent, v))
}
// EventGTE applies the GTE predicate on the "event" field.
func EventGTE(v string) predicate.FsEvent {
return predicate.FsEvent(sql.FieldGTE(FieldEvent, v))
}
// EventLT applies the LT predicate on the "event" field.
func EventLT(v string) predicate.FsEvent {
return predicate.FsEvent(sql.FieldLT(FieldEvent, v))
}
// EventLTE applies the LTE predicate on the "event" field.
func EventLTE(v string) predicate.FsEvent {
return predicate.FsEvent(sql.FieldLTE(FieldEvent, v))
}
// EventContains applies the Contains predicate on the "event" field.
func EventContains(v string) predicate.FsEvent {
return predicate.FsEvent(sql.FieldContains(FieldEvent, v))
}
// EventHasPrefix applies the HasPrefix predicate on the "event" field.
func EventHasPrefix(v string) predicate.FsEvent {
return predicate.FsEvent(sql.FieldHasPrefix(FieldEvent, v))
}
// EventHasSuffix applies the HasSuffix predicate on the "event" field.
func EventHasSuffix(v string) predicate.FsEvent {
return predicate.FsEvent(sql.FieldHasSuffix(FieldEvent, v))
}
// EventEqualFold applies the EqualFold predicate on the "event" field.
func EventEqualFold(v string) predicate.FsEvent {
return predicate.FsEvent(sql.FieldEqualFold(FieldEvent, v))
}
// EventContainsFold applies the ContainsFold predicate on the "event" field.
func EventContainsFold(v string) predicate.FsEvent {
return predicate.FsEvent(sql.FieldContainsFold(FieldEvent, v))
}
// SubscriberEQ applies the EQ predicate on the "subscriber" field.
func SubscriberEQ(v uuid.UUID) predicate.FsEvent {
return predicate.FsEvent(sql.FieldEQ(FieldSubscriber, v))
}
// SubscriberNEQ applies the NEQ predicate on the "subscriber" field.
func SubscriberNEQ(v uuid.UUID) predicate.FsEvent {
return predicate.FsEvent(sql.FieldNEQ(FieldSubscriber, v))
}
// SubscriberIn applies the In predicate on the "subscriber" field.
func SubscriberIn(vs ...uuid.UUID) predicate.FsEvent {
return predicate.FsEvent(sql.FieldIn(FieldSubscriber, vs...))
}
// SubscriberNotIn applies the NotIn predicate on the "subscriber" field.
func SubscriberNotIn(vs ...uuid.UUID) predicate.FsEvent {
return predicate.FsEvent(sql.FieldNotIn(FieldSubscriber, vs...))
}
// SubscriberGT applies the GT predicate on the "subscriber" field.
func SubscriberGT(v uuid.UUID) predicate.FsEvent {
return predicate.FsEvent(sql.FieldGT(FieldSubscriber, v))
}
// SubscriberGTE applies the GTE predicate on the "subscriber" field.
func SubscriberGTE(v uuid.UUID) predicate.FsEvent {
return predicate.FsEvent(sql.FieldGTE(FieldSubscriber, v))
}
// SubscriberLT applies the LT predicate on the "subscriber" field.
func SubscriberLT(v uuid.UUID) predicate.FsEvent {
return predicate.FsEvent(sql.FieldLT(FieldSubscriber, v))
}
// SubscriberLTE applies the LTE predicate on the "subscriber" field.
func SubscriberLTE(v uuid.UUID) predicate.FsEvent {
return predicate.FsEvent(sql.FieldLTE(FieldSubscriber, v))
}
// UserFseventEQ applies the EQ predicate on the "user_fsevent" field.
func UserFseventEQ(v int) predicate.FsEvent {
return predicate.FsEvent(sql.FieldEQ(FieldUserFsevent, v))
}
// UserFseventNEQ applies the NEQ predicate on the "user_fsevent" field.
func UserFseventNEQ(v int) predicate.FsEvent {
return predicate.FsEvent(sql.FieldNEQ(FieldUserFsevent, v))
}
// UserFseventIn applies the In predicate on the "user_fsevent" field.
func UserFseventIn(vs ...int) predicate.FsEvent {
return predicate.FsEvent(sql.FieldIn(FieldUserFsevent, vs...))
}
// UserFseventNotIn applies the NotIn predicate on the "user_fsevent" field.
func UserFseventNotIn(vs ...int) predicate.FsEvent {
return predicate.FsEvent(sql.FieldNotIn(FieldUserFsevent, vs...))
}
// UserFseventIsNil applies the IsNil predicate on the "user_fsevent" field.
func UserFseventIsNil() predicate.FsEvent {
return predicate.FsEvent(sql.FieldIsNull(FieldUserFsevent))
}
// UserFseventNotNil applies the NotNil predicate on the "user_fsevent" field.
func UserFseventNotNil() predicate.FsEvent {
return predicate.FsEvent(sql.FieldNotNull(FieldUserFsevent))
}
// HasUser applies the HasEdge predicate on the "user" edge.
func HasUser() predicate.FsEvent {
return predicate.FsEvent(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn),
)
sqlgraph.HasNeighbors(s, step)
})
}
// HasUserWith applies the HasEdge predicate on the "user" edge with a given conditions (other predicates).
func HasUserWith(preds ...predicate.User) predicate.FsEvent {
return predicate.FsEvent(func(s *sql.Selector) {
step := newUserStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
}
})
})
}
// And groups predicates with the AND operator between them.
func And(predicates ...predicate.FsEvent) predicate.FsEvent {
return predicate.FsEvent(sql.AndPredicates(predicates...))
}
// Or groups predicates with the OR operator between them.
func Or(predicates ...predicate.FsEvent) predicate.FsEvent {
return predicate.FsEvent(sql.OrPredicates(predicates...))
}
// Not applies the not operator on the given predicate.
func Not(p predicate.FsEvent) predicate.FsEvent {
return predicate.FsEvent(sql.NotPredicates(p))
}

827
ent/fsevent_create.go Normal file
View File

@ -0,0 +1,827 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"context"
"errors"
"fmt"
"time"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/cloudreve/Cloudreve/v4/ent/fsevent"
"github.com/cloudreve/Cloudreve/v4/ent/user"
"github.com/gofrs/uuid"
)
// FsEventCreate is the builder for creating a FsEvent entity.
type FsEventCreate struct {
config
mutation *FsEventMutation
hooks []Hook
conflict []sql.ConflictOption
}
// SetCreatedAt sets the "created_at" field.
func (fec *FsEventCreate) SetCreatedAt(t time.Time) *FsEventCreate {
fec.mutation.SetCreatedAt(t)
return fec
}
// SetNillableCreatedAt sets the "created_at" field if the given value is not nil.
func (fec *FsEventCreate) SetNillableCreatedAt(t *time.Time) *FsEventCreate {
if t != nil {
fec.SetCreatedAt(*t)
}
return fec
}
// SetUpdatedAt sets the "updated_at" field.
func (fec *FsEventCreate) SetUpdatedAt(t time.Time) *FsEventCreate {
fec.mutation.SetUpdatedAt(t)
return fec
}
// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil.
func (fec *FsEventCreate) SetNillableUpdatedAt(t *time.Time) *FsEventCreate {
if t != nil {
fec.SetUpdatedAt(*t)
}
return fec
}
// SetDeletedAt sets the "deleted_at" field.
func (fec *FsEventCreate) SetDeletedAt(t time.Time) *FsEventCreate {
fec.mutation.SetDeletedAt(t)
return fec
}
// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil.
func (fec *FsEventCreate) SetNillableDeletedAt(t *time.Time) *FsEventCreate {
if t != nil {
fec.SetDeletedAt(*t)
}
return fec
}
// SetEvent sets the "event" field.
func (fec *FsEventCreate) SetEvent(s string) *FsEventCreate {
fec.mutation.SetEvent(s)
return fec
}
// SetSubscriber sets the "subscriber" field.
func (fec *FsEventCreate) SetSubscriber(u uuid.UUID) *FsEventCreate {
fec.mutation.SetSubscriber(u)
return fec
}
// SetUserFsevent sets the "user_fsevent" field.
func (fec *FsEventCreate) SetUserFsevent(i int) *FsEventCreate {
fec.mutation.SetUserFsevent(i)
return fec
}
// SetNillableUserFsevent sets the "user_fsevent" field if the given value is not nil.
func (fec *FsEventCreate) SetNillableUserFsevent(i *int) *FsEventCreate {
if i != nil {
fec.SetUserFsevent(*i)
}
return fec
}
// SetUserID sets the "user" edge to the User entity by ID.
func (fec *FsEventCreate) SetUserID(id int) *FsEventCreate {
fec.mutation.SetUserID(id)
return fec
}
// SetNillableUserID sets the "user" edge to the User entity by ID if the given value is not nil.
func (fec *FsEventCreate) SetNillableUserID(id *int) *FsEventCreate {
if id != nil {
fec = fec.SetUserID(*id)
}
return fec
}
// SetUser sets the "user" edge to the User entity.
func (fec *FsEventCreate) SetUser(u *User) *FsEventCreate {
return fec.SetUserID(u.ID)
}
// Mutation returns the FsEventMutation object of the builder.
func (fec *FsEventCreate) Mutation() *FsEventMutation {
return fec.mutation
}
// Save creates the FsEvent in the database.
func (fec *FsEventCreate) Save(ctx context.Context) (*FsEvent, error) {
if err := fec.defaults(); err != nil {
return nil, err
}
return withHooks(ctx, fec.sqlSave, fec.mutation, fec.hooks)
}
// SaveX calls Save and panics if Save returns an error.
func (fec *FsEventCreate) SaveX(ctx context.Context) *FsEvent {
v, err := fec.Save(ctx)
if err != nil {
panic(err)
}
return v
}
// Exec executes the query.
func (fec *FsEventCreate) Exec(ctx context.Context) error {
_, err := fec.Save(ctx)
return err
}
// ExecX is like Exec, but panics if an error occurs.
func (fec *FsEventCreate) ExecX(ctx context.Context) {
if err := fec.Exec(ctx); err != nil {
panic(err)
}
}
// defaults sets the default values of the builder before save.
func (fec *FsEventCreate) defaults() error {
if _, ok := fec.mutation.CreatedAt(); !ok {
if fsevent.DefaultCreatedAt == nil {
return fmt.Errorf("ent: uninitialized fsevent.DefaultCreatedAt (forgotten import ent/runtime?)")
}
v := fsevent.DefaultCreatedAt()
fec.mutation.SetCreatedAt(v)
}
if _, ok := fec.mutation.UpdatedAt(); !ok {
if fsevent.DefaultUpdatedAt == nil {
return fmt.Errorf("ent: uninitialized fsevent.DefaultUpdatedAt (forgotten import ent/runtime?)")
}
v := fsevent.DefaultUpdatedAt()
fec.mutation.SetUpdatedAt(v)
}
return nil
}
// check runs all checks and user-defined validators on the builder.
func (fec *FsEventCreate) check() error {
if _, ok := fec.mutation.CreatedAt(); !ok {
return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "FsEvent.created_at"`)}
}
if _, ok := fec.mutation.UpdatedAt(); !ok {
return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "FsEvent.updated_at"`)}
}
if _, ok := fec.mutation.Event(); !ok {
return &ValidationError{Name: "event", err: errors.New(`ent: missing required field "FsEvent.event"`)}
}
if _, ok := fec.mutation.Subscriber(); !ok {
return &ValidationError{Name: "subscriber", err: errors.New(`ent: missing required field "FsEvent.subscriber"`)}
}
return nil
}
func (fec *FsEventCreate) sqlSave(ctx context.Context) (*FsEvent, error) {
if err := fec.check(); err != nil {
return nil, err
}
_node, _spec := fec.createSpec()
if err := sqlgraph.CreateNode(ctx, fec.driver, _spec); err != nil {
if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
return nil, err
}
id := _spec.ID.Value.(int64)
_node.ID = int(id)
fec.mutation.id = &_node.ID
fec.mutation.done = true
return _node, nil
}
func (fec *FsEventCreate) createSpec() (*FsEvent, *sqlgraph.CreateSpec) {
var (
_node = &FsEvent{config: fec.config}
_spec = sqlgraph.NewCreateSpec(fsevent.Table, sqlgraph.NewFieldSpec(fsevent.FieldID, field.TypeInt))
)
if id, ok := fec.mutation.ID(); ok {
_node.ID = id
id64 := int64(id)
_spec.ID.Value = id64
}
_spec.OnConflict = fec.conflict
if value, ok := fec.mutation.CreatedAt(); ok {
_spec.SetField(fsevent.FieldCreatedAt, field.TypeTime, value)
_node.CreatedAt = value
}
if value, ok := fec.mutation.UpdatedAt(); ok {
_spec.SetField(fsevent.FieldUpdatedAt, field.TypeTime, value)
_node.UpdatedAt = value
}
if value, ok := fec.mutation.DeletedAt(); ok {
_spec.SetField(fsevent.FieldDeletedAt, field.TypeTime, value)
_node.DeletedAt = &value
}
if value, ok := fec.mutation.Event(); ok {
_spec.SetField(fsevent.FieldEvent, field.TypeString, value)
_node.Event = value
}
if value, ok := fec.mutation.Subscriber(); ok {
_spec.SetField(fsevent.FieldSubscriber, field.TypeUUID, value)
_node.Subscriber = value
}
if nodes := fec.mutation.UserIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
Inverse: true,
Table: fsevent.UserTable,
Columns: []string{fsevent.UserColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_node.UserFsevent = nodes[0]
_spec.Edges = append(_spec.Edges, edge)
}
return _node, _spec
}
// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause
// of the `INSERT` statement. For example:
//
// client.FsEvent.Create().
// SetCreatedAt(v).
// OnConflict(
// // Update the row with the new values
// // the was proposed for insertion.
// sql.ResolveWithNewValues(),
// ).
// // Override some of the fields with custom
// // update values.
// Update(func(u *ent.FsEventUpsert) {
// SetCreatedAt(v+v).
// }).
// Exec(ctx)
func (fec *FsEventCreate) OnConflict(opts ...sql.ConflictOption) *FsEventUpsertOne {
fec.conflict = opts
return &FsEventUpsertOne{
create: fec,
}
}
// OnConflictColumns calls `OnConflict` and configures the columns
// as conflict target. Using this option is equivalent to using:
//
// client.FsEvent.Create().
// OnConflict(sql.ConflictColumns(columns...)).
// Exec(ctx)
func (fec *FsEventCreate) OnConflictColumns(columns ...string) *FsEventUpsertOne {
fec.conflict = append(fec.conflict, sql.ConflictColumns(columns...))
return &FsEventUpsertOne{
create: fec,
}
}
type (
// FsEventUpsertOne is the builder for "upsert"-ing
// one FsEvent node.
FsEventUpsertOne struct {
create *FsEventCreate
}
// FsEventUpsert is the "OnConflict" setter.
FsEventUpsert struct {
*sql.UpdateSet
}
)
// SetUpdatedAt sets the "updated_at" field.
func (u *FsEventUpsert) SetUpdatedAt(v time.Time) *FsEventUpsert {
u.Set(fsevent.FieldUpdatedAt, v)
return u
}
// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create.
func (u *FsEventUpsert) UpdateUpdatedAt() *FsEventUpsert {
u.SetExcluded(fsevent.FieldUpdatedAt)
return u
}
// SetDeletedAt sets the "deleted_at" field.
func (u *FsEventUpsert) SetDeletedAt(v time.Time) *FsEventUpsert {
u.Set(fsevent.FieldDeletedAt, v)
return u
}
// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create.
func (u *FsEventUpsert) UpdateDeletedAt() *FsEventUpsert {
u.SetExcluded(fsevent.FieldDeletedAt)
return u
}
// ClearDeletedAt clears the value of the "deleted_at" field.
func (u *FsEventUpsert) ClearDeletedAt() *FsEventUpsert {
u.SetNull(fsevent.FieldDeletedAt)
return u
}
// SetEvent sets the "event" field.
func (u *FsEventUpsert) SetEvent(v string) *FsEventUpsert {
u.Set(fsevent.FieldEvent, v)
return u
}
// UpdateEvent sets the "event" field to the value that was provided on create.
func (u *FsEventUpsert) UpdateEvent() *FsEventUpsert {
u.SetExcluded(fsevent.FieldEvent)
return u
}
// SetSubscriber sets the "subscriber" field.
func (u *FsEventUpsert) SetSubscriber(v uuid.UUID) *FsEventUpsert {
u.Set(fsevent.FieldSubscriber, v)
return u
}
// UpdateSubscriber sets the "subscriber" field to the value that was provided on create.
func (u *FsEventUpsert) UpdateSubscriber() *FsEventUpsert {
u.SetExcluded(fsevent.FieldSubscriber)
return u
}
// SetUserFsevent sets the "user_fsevent" field.
func (u *FsEventUpsert) SetUserFsevent(v int) *FsEventUpsert {
u.Set(fsevent.FieldUserFsevent, v)
return u
}
// UpdateUserFsevent sets the "user_fsevent" field to the value that was provided on create.
func (u *FsEventUpsert) UpdateUserFsevent() *FsEventUpsert {
u.SetExcluded(fsevent.FieldUserFsevent)
return u
}
// ClearUserFsevent clears the value of the "user_fsevent" field.
func (u *FsEventUpsert) ClearUserFsevent() *FsEventUpsert {
u.SetNull(fsevent.FieldUserFsevent)
return u
}
// UpdateNewValues updates the mutable fields using the new values that were set on create.
// Using this option is equivalent to using:
//
// client.FsEvent.Create().
// OnConflict(
// sql.ResolveWithNewValues(),
// ).
// Exec(ctx)
func (u *FsEventUpsertOne) UpdateNewValues() *FsEventUpsertOne {
u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues())
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) {
if _, exists := u.create.mutation.CreatedAt(); exists {
s.SetIgnore(fsevent.FieldCreatedAt)
}
}))
return u
}
// Ignore sets each column to itself in case of conflict.
// Using this option is equivalent to using:
//
// client.FsEvent.Create().
// OnConflict(sql.ResolveWithIgnore()).
// Exec(ctx)
func (u *FsEventUpsertOne) Ignore() *FsEventUpsertOne {
u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore())
return u
}
// DoNothing configures the conflict_action to `DO NOTHING`.
// Supported only by SQLite and PostgreSQL.
func (u *FsEventUpsertOne) DoNothing() *FsEventUpsertOne {
u.create.conflict = append(u.create.conflict, sql.DoNothing())
return u
}
// Update allows overriding fields `UPDATE` values. See the FsEventCreate.OnConflict
// documentation for more info.
func (u *FsEventUpsertOne) Update(set func(*FsEventUpsert)) *FsEventUpsertOne {
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) {
set(&FsEventUpsert{UpdateSet: update})
}))
return u
}
// SetUpdatedAt sets the "updated_at" field.
func (u *FsEventUpsertOne) SetUpdatedAt(v time.Time) *FsEventUpsertOne {
return u.Update(func(s *FsEventUpsert) {
s.SetUpdatedAt(v)
})
}
// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create.
func (u *FsEventUpsertOne) UpdateUpdatedAt() *FsEventUpsertOne {
return u.Update(func(s *FsEventUpsert) {
s.UpdateUpdatedAt()
})
}
// SetDeletedAt sets the "deleted_at" field.
func (u *FsEventUpsertOne) SetDeletedAt(v time.Time) *FsEventUpsertOne {
return u.Update(func(s *FsEventUpsert) {
s.SetDeletedAt(v)
})
}
// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create.
func (u *FsEventUpsertOne) UpdateDeletedAt() *FsEventUpsertOne {
return u.Update(func(s *FsEventUpsert) {
s.UpdateDeletedAt()
})
}
// ClearDeletedAt clears the value of the "deleted_at" field.
func (u *FsEventUpsertOne) ClearDeletedAt() *FsEventUpsertOne {
return u.Update(func(s *FsEventUpsert) {
s.ClearDeletedAt()
})
}
// SetEvent sets the "event" field.
func (u *FsEventUpsertOne) SetEvent(v string) *FsEventUpsertOne {
return u.Update(func(s *FsEventUpsert) {
s.SetEvent(v)
})
}
// UpdateEvent sets the "event" field to the value that was provided on create.
func (u *FsEventUpsertOne) UpdateEvent() *FsEventUpsertOne {
return u.Update(func(s *FsEventUpsert) {
s.UpdateEvent()
})
}
// SetSubscriber sets the "subscriber" field.
func (u *FsEventUpsertOne) SetSubscriber(v uuid.UUID) *FsEventUpsertOne {
return u.Update(func(s *FsEventUpsert) {
s.SetSubscriber(v)
})
}
// UpdateSubscriber sets the "subscriber" field to the value that was provided on create.
func (u *FsEventUpsertOne) UpdateSubscriber() *FsEventUpsertOne {
return u.Update(func(s *FsEventUpsert) {
s.UpdateSubscriber()
})
}
// SetUserFsevent sets the "user_fsevent" field.
func (u *FsEventUpsertOne) SetUserFsevent(v int) *FsEventUpsertOne {
return u.Update(func(s *FsEventUpsert) {
s.SetUserFsevent(v)
})
}
// UpdateUserFsevent sets the "user_fsevent" field to the value that was provided on create.
func (u *FsEventUpsertOne) UpdateUserFsevent() *FsEventUpsertOne {
return u.Update(func(s *FsEventUpsert) {
s.UpdateUserFsevent()
})
}
// ClearUserFsevent clears the value of the "user_fsevent" field.
func (u *FsEventUpsertOne) ClearUserFsevent() *FsEventUpsertOne {
return u.Update(func(s *FsEventUpsert) {
s.ClearUserFsevent()
})
}
// Exec executes the query.
func (u *FsEventUpsertOne) Exec(ctx context.Context) error {
if len(u.create.conflict) == 0 {
return errors.New("ent: missing options for FsEventCreate.OnConflict")
}
return u.create.Exec(ctx)
}
// ExecX is like Exec, but panics if an error occurs.
func (u *FsEventUpsertOne) ExecX(ctx context.Context) {
if err := u.create.Exec(ctx); err != nil {
panic(err)
}
}
// Exec executes the UPSERT query and returns the inserted/updated ID.
func (u *FsEventUpsertOne) ID(ctx context.Context) (id int, err error) {
node, err := u.create.Save(ctx)
if err != nil {
return id, err
}
return node.ID, nil
}
// IDX is like ID, but panics if an error occurs.
func (u *FsEventUpsertOne) IDX(ctx context.Context) int {
id, err := u.ID(ctx)
if err != nil {
panic(err)
}
return id
}
func (m *FsEventCreate) SetRawID(t int) *FsEventCreate {
m.mutation.SetRawID(t)
return m
}
// FsEventCreateBulk is the builder for creating many FsEvent entities in bulk.
type FsEventCreateBulk struct {
config
err error
builders []*FsEventCreate
conflict []sql.ConflictOption
}
// Save creates the FsEvent entities in the database.
func (fecb *FsEventCreateBulk) Save(ctx context.Context) ([]*FsEvent, error) {
if fecb.err != nil {
return nil, fecb.err
}
specs := make([]*sqlgraph.CreateSpec, len(fecb.builders))
nodes := make([]*FsEvent, len(fecb.builders))
mutators := make([]Mutator, len(fecb.builders))
for i := range fecb.builders {
func(i int, root context.Context) {
builder := fecb.builders[i]
builder.defaults()
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*FsEventMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
if err := builder.check(); err != nil {
return nil, err
}
builder.mutation = mutation
var err error
nodes[i], specs[i] = builder.createSpec()
if i < len(mutators)-1 {
_, err = mutators[i+1].Mutate(root, fecb.builders[i+1].mutation)
} else {
spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
spec.OnConflict = fecb.conflict
// Invoke the actual operation on the latest mutation in the chain.
if err = sqlgraph.BatchCreate(ctx, fecb.driver, spec); err != nil {
if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
}
}
if err != nil {
return nil, err
}
mutation.id = &nodes[i].ID
if specs[i].ID.Value != nil {
id := specs[i].ID.Value.(int64)
nodes[i].ID = int(id)
}
mutation.done = true
return nodes[i], nil
})
for i := len(builder.hooks) - 1; i >= 0; i-- {
mut = builder.hooks[i](mut)
}
mutators[i] = mut
}(i, ctx)
}
if len(mutators) > 0 {
if _, err := mutators[0].Mutate(ctx, fecb.builders[0].mutation); err != nil {
return nil, err
}
}
return nodes, nil
}
// SaveX is like Save, but panics if an error occurs.
func (fecb *FsEventCreateBulk) SaveX(ctx context.Context) []*FsEvent {
v, err := fecb.Save(ctx)
if err != nil {
panic(err)
}
return v
}
// Exec executes the query.
func (fecb *FsEventCreateBulk) Exec(ctx context.Context) error {
_, err := fecb.Save(ctx)
return err
}
// ExecX is like Exec, but panics if an error occurs.
func (fecb *FsEventCreateBulk) ExecX(ctx context.Context) {
if err := fecb.Exec(ctx); err != nil {
panic(err)
}
}
// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause
// of the `INSERT` statement. For example:
//
// client.FsEvent.CreateBulk(builders...).
// OnConflict(
// // Update the row with the new values
// // the was proposed for insertion.
// sql.ResolveWithNewValues(),
// ).
// // Override some of the fields with custom
// // update values.
// Update(func(u *ent.FsEventUpsert) {
// SetCreatedAt(v+v).
// }).
// Exec(ctx)
func (fecb *FsEventCreateBulk) OnConflict(opts ...sql.ConflictOption) *FsEventUpsertBulk {
fecb.conflict = opts
return &FsEventUpsertBulk{
create: fecb,
}
}
// OnConflictColumns calls `OnConflict` and configures the columns
// as conflict target. Using this option is equivalent to using:
//
// client.FsEvent.Create().
// OnConflict(sql.ConflictColumns(columns...)).
// Exec(ctx)
func (fecb *FsEventCreateBulk) OnConflictColumns(columns ...string) *FsEventUpsertBulk {
fecb.conflict = append(fecb.conflict, sql.ConflictColumns(columns...))
return &FsEventUpsertBulk{
create: fecb,
}
}
// FsEventUpsertBulk is the builder for "upsert"-ing
// a bulk of FsEvent nodes.
type FsEventUpsertBulk struct {
create *FsEventCreateBulk
}
// UpdateNewValues updates the mutable fields using the new values that
// were set on create. Using this option is equivalent to using:
//
// client.FsEvent.Create().
// OnConflict(
// sql.ResolveWithNewValues(),
// ).
// Exec(ctx)
func (u *FsEventUpsertBulk) UpdateNewValues() *FsEventUpsertBulk {
u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues())
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) {
for _, b := range u.create.builders {
if _, exists := b.mutation.CreatedAt(); exists {
s.SetIgnore(fsevent.FieldCreatedAt)
}
}
}))
return u
}
// Ignore sets each column to itself in case of conflict.
// Using this option is equivalent to using:
//
// client.FsEvent.Create().
// OnConflict(sql.ResolveWithIgnore()).
// Exec(ctx)
func (u *FsEventUpsertBulk) Ignore() *FsEventUpsertBulk {
u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore())
return u
}
// DoNothing configures the conflict_action to `DO NOTHING`.
// Supported only by SQLite and PostgreSQL.
func (u *FsEventUpsertBulk) DoNothing() *FsEventUpsertBulk {
u.create.conflict = append(u.create.conflict, sql.DoNothing())
return u
}
// Update allows overriding fields `UPDATE` values. See the FsEventCreateBulk.OnConflict
// documentation for more info.
func (u *FsEventUpsertBulk) Update(set func(*FsEventUpsert)) *FsEventUpsertBulk {
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) {
set(&FsEventUpsert{UpdateSet: update})
}))
return u
}
// SetUpdatedAt sets the "updated_at" field.
func (u *FsEventUpsertBulk) SetUpdatedAt(v time.Time) *FsEventUpsertBulk {
return u.Update(func(s *FsEventUpsert) {
s.SetUpdatedAt(v)
})
}
// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create.
func (u *FsEventUpsertBulk) UpdateUpdatedAt() *FsEventUpsertBulk {
return u.Update(func(s *FsEventUpsert) {
s.UpdateUpdatedAt()
})
}
// SetDeletedAt sets the "deleted_at" field.
func (u *FsEventUpsertBulk) SetDeletedAt(v time.Time) *FsEventUpsertBulk {
return u.Update(func(s *FsEventUpsert) {
s.SetDeletedAt(v)
})
}
// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create.
func (u *FsEventUpsertBulk) UpdateDeletedAt() *FsEventUpsertBulk {
return u.Update(func(s *FsEventUpsert) {
s.UpdateDeletedAt()
})
}
// ClearDeletedAt clears the value of the "deleted_at" field.
func (u *FsEventUpsertBulk) ClearDeletedAt() *FsEventUpsertBulk {
return u.Update(func(s *FsEventUpsert) {
s.ClearDeletedAt()
})
}
// SetEvent sets the "event" field.
func (u *FsEventUpsertBulk) SetEvent(v string) *FsEventUpsertBulk {
return u.Update(func(s *FsEventUpsert) {
s.SetEvent(v)
})
}
// UpdateEvent sets the "event" field to the value that was provided on create.
func (u *FsEventUpsertBulk) UpdateEvent() *FsEventUpsertBulk {
return u.Update(func(s *FsEventUpsert) {
s.UpdateEvent()
})
}
// SetSubscriber sets the "subscriber" field.
func (u *FsEventUpsertBulk) SetSubscriber(v uuid.UUID) *FsEventUpsertBulk {
return u.Update(func(s *FsEventUpsert) {
s.SetSubscriber(v)
})
}
// UpdateSubscriber sets the "subscriber" field to the value that was provided on create.
func (u *FsEventUpsertBulk) UpdateSubscriber() *FsEventUpsertBulk {
return u.Update(func(s *FsEventUpsert) {
s.UpdateSubscriber()
})
}
// SetUserFsevent sets the "user_fsevent" field.
func (u *FsEventUpsertBulk) SetUserFsevent(v int) *FsEventUpsertBulk {
return u.Update(func(s *FsEventUpsert) {
s.SetUserFsevent(v)
})
}
// UpdateUserFsevent sets the "user_fsevent" field to the value that was provided on create.
func (u *FsEventUpsertBulk) UpdateUserFsevent() *FsEventUpsertBulk {
return u.Update(func(s *FsEventUpsert) {
s.UpdateUserFsevent()
})
}
// ClearUserFsevent clears the value of the "user_fsevent" field.
func (u *FsEventUpsertBulk) ClearUserFsevent() *FsEventUpsertBulk {
return u.Update(func(s *FsEventUpsert) {
s.ClearUserFsevent()
})
}
// Exec executes the query.
func (u *FsEventUpsertBulk) Exec(ctx context.Context) error {
if u.create.err != nil {
return u.create.err
}
for i, b := range u.create.builders {
if len(b.conflict) != 0 {
return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the FsEventCreateBulk instead", i)
}
}
if len(u.create.conflict) == 0 {
return errors.New("ent: missing options for FsEventCreateBulk.OnConflict")
}
return u.create.Exec(ctx)
}
// ExecX is like Exec, but panics if an error occurs.
func (u *FsEventUpsertBulk) ExecX(ctx context.Context) {
if err := u.create.Exec(ctx); err != nil {
panic(err)
}
}

88
ent/fsevent_delete.go Normal file
View File

@ -0,0 +1,88 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"context"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/cloudreve/Cloudreve/v4/ent/fsevent"
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
)
// FsEventDelete is the builder for deleting a FsEvent entity.
type FsEventDelete struct {
config
hooks []Hook
mutation *FsEventMutation
}
// Where appends a list predicates to the FsEventDelete builder.
func (fed *FsEventDelete) Where(ps ...predicate.FsEvent) *FsEventDelete {
fed.mutation.Where(ps...)
return fed
}
// Exec executes the deletion query and returns how many vertices were deleted.
func (fed *FsEventDelete) Exec(ctx context.Context) (int, error) {
return withHooks(ctx, fed.sqlExec, fed.mutation, fed.hooks)
}
// ExecX is like Exec, but panics if an error occurs.
func (fed *FsEventDelete) ExecX(ctx context.Context) int {
n, err := fed.Exec(ctx)
if err != nil {
panic(err)
}
return n
}
func (fed *FsEventDelete) sqlExec(ctx context.Context) (int, error) {
_spec := sqlgraph.NewDeleteSpec(fsevent.Table, sqlgraph.NewFieldSpec(fsevent.FieldID, field.TypeInt))
if ps := fed.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
affected, err := sqlgraph.DeleteNodes(ctx, fed.driver, _spec)
if err != nil && sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
fed.mutation.done = true
return affected, err
}
// FsEventDeleteOne is the builder for deleting a single FsEvent entity.
type FsEventDeleteOne struct {
fed *FsEventDelete
}
// Where appends a list predicates to the FsEventDelete builder.
func (fedo *FsEventDeleteOne) Where(ps ...predicate.FsEvent) *FsEventDeleteOne {
fedo.fed.mutation.Where(ps...)
return fedo
}
// Exec executes the deletion query.
func (fedo *FsEventDeleteOne) Exec(ctx context.Context) error {
n, err := fedo.fed.Exec(ctx)
switch {
case err != nil:
return err
case n == 0:
return &NotFoundError{fsevent.Label}
default:
return nil
}
}
// ExecX is like Exec, but panics if an error occurs.
func (fedo *FsEventDeleteOne) ExecX(ctx context.Context) {
if err := fedo.Exec(ctx); err != nil {
panic(err)
}
}

605
ent/fsevent_query.go Normal file
View File

@ -0,0 +1,605 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"context"
"fmt"
"math"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/cloudreve/Cloudreve/v4/ent/fsevent"
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
"github.com/cloudreve/Cloudreve/v4/ent/user"
)
// FsEventQuery is the builder for querying FsEvent entities.
type FsEventQuery struct {
config
ctx *QueryContext
order []fsevent.OrderOption
inters []Interceptor
predicates []predicate.FsEvent
withUser *UserQuery
// intermediate query (i.e. traversal path).
sql *sql.Selector
path func(context.Context) (*sql.Selector, error)
}
// Where adds a new predicate for the FsEventQuery builder.
func (feq *FsEventQuery) Where(ps ...predicate.FsEvent) *FsEventQuery {
feq.predicates = append(feq.predicates, ps...)
return feq
}
// Limit the number of records to be returned by this query.
func (feq *FsEventQuery) Limit(limit int) *FsEventQuery {
feq.ctx.Limit = &limit
return feq
}
// Offset to start from.
func (feq *FsEventQuery) Offset(offset int) *FsEventQuery {
feq.ctx.Offset = &offset
return feq
}
// Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method.
func (feq *FsEventQuery) Unique(unique bool) *FsEventQuery {
feq.ctx.Unique = &unique
return feq
}
// Order specifies how the records should be ordered.
func (feq *FsEventQuery) Order(o ...fsevent.OrderOption) *FsEventQuery {
feq.order = append(feq.order, o...)
return feq
}
// QueryUser chains the current query on the "user" edge.
func (feq *FsEventQuery) QueryUser() *UserQuery {
query := (&UserClient{config: feq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := feq.prepareQuery(ctx); err != nil {
return nil, err
}
selector := feq.sqlQuery(ctx)
if err := selector.Err(); err != nil {
return nil, err
}
step := sqlgraph.NewStep(
sqlgraph.From(fsevent.Table, fsevent.FieldID, selector),
sqlgraph.To(user.Table, user.FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, fsevent.UserTable, fsevent.UserColumn),
)
fromU = sqlgraph.SetNeighbors(feq.driver.Dialect(), step)
return fromU, nil
}
return query
}
// First returns the first FsEvent entity from the query.
// Returns a *NotFoundError when no FsEvent was found.
func (feq *FsEventQuery) First(ctx context.Context) (*FsEvent, error) {
nodes, err := feq.Limit(1).All(setContextOp(ctx, feq.ctx, "First"))
if err != nil {
return nil, err
}
if len(nodes) == 0 {
return nil, &NotFoundError{fsevent.Label}
}
return nodes[0], nil
}
// FirstX is like First, but panics if an error occurs.
func (feq *FsEventQuery) FirstX(ctx context.Context) *FsEvent {
node, err := feq.First(ctx)
if err != nil && !IsNotFound(err) {
panic(err)
}
return node
}
// FirstID returns the first FsEvent ID from the query.
// Returns a *NotFoundError when no FsEvent ID was found.
func (feq *FsEventQuery) FirstID(ctx context.Context) (id int, err error) {
var ids []int
if ids, err = feq.Limit(1).IDs(setContextOp(ctx, feq.ctx, "FirstID")); err != nil {
return
}
if len(ids) == 0 {
err = &NotFoundError{fsevent.Label}
return
}
return ids[0], nil
}
// FirstIDX is like FirstID, but panics if an error occurs.
func (feq *FsEventQuery) FirstIDX(ctx context.Context) int {
id, err := feq.FirstID(ctx)
if err != nil && !IsNotFound(err) {
panic(err)
}
return id
}
// Only returns a single FsEvent entity found by the query, ensuring it only returns one.
// Returns a *NotSingularError when more than one FsEvent entity is found.
// Returns a *NotFoundError when no FsEvent entities are found.
func (feq *FsEventQuery) Only(ctx context.Context) (*FsEvent, error) {
nodes, err := feq.Limit(2).All(setContextOp(ctx, feq.ctx, "Only"))
if err != nil {
return nil, err
}
switch len(nodes) {
case 1:
return nodes[0], nil
case 0:
return nil, &NotFoundError{fsevent.Label}
default:
return nil, &NotSingularError{fsevent.Label}
}
}
// OnlyX is like Only, but panics if an error occurs.
func (feq *FsEventQuery) OnlyX(ctx context.Context) *FsEvent {
node, err := feq.Only(ctx)
if err != nil {
panic(err)
}
return node
}
// OnlyID is like Only, but returns the only FsEvent ID in the query.
// Returns a *NotSingularError when more than one FsEvent ID is found.
// Returns a *NotFoundError when no entities are found.
func (feq *FsEventQuery) OnlyID(ctx context.Context) (id int, err error) {
var ids []int
if ids, err = feq.Limit(2).IDs(setContextOp(ctx, feq.ctx, "OnlyID")); err != nil {
return
}
switch len(ids) {
case 1:
id = ids[0]
case 0:
err = &NotFoundError{fsevent.Label}
default:
err = &NotSingularError{fsevent.Label}
}
return
}
// OnlyIDX is like OnlyID, but panics if an error occurs.
func (feq *FsEventQuery) OnlyIDX(ctx context.Context) int {
id, err := feq.OnlyID(ctx)
if err != nil {
panic(err)
}
return id
}
// All executes the query and returns a list of FsEvents.
func (feq *FsEventQuery) All(ctx context.Context) ([]*FsEvent, error) {
ctx = setContextOp(ctx, feq.ctx, "All")
if err := feq.prepareQuery(ctx); err != nil {
return nil, err
}
qr := querierAll[[]*FsEvent, *FsEventQuery]()
return withInterceptors[[]*FsEvent](ctx, feq, qr, feq.inters)
}
// AllX is like All, but panics if an error occurs.
func (feq *FsEventQuery) AllX(ctx context.Context) []*FsEvent {
nodes, err := feq.All(ctx)
if err != nil {
panic(err)
}
return nodes
}
// IDs executes the query and returns a list of FsEvent IDs.
func (feq *FsEventQuery) IDs(ctx context.Context) (ids []int, err error) {
if feq.ctx.Unique == nil && feq.path != nil {
feq.Unique(true)
}
ctx = setContextOp(ctx, feq.ctx, "IDs")
if err = feq.Select(fsevent.FieldID).Scan(ctx, &ids); err != nil {
return nil, err
}
return ids, nil
}
// IDsX is like IDs, but panics if an error occurs.
func (feq *FsEventQuery) IDsX(ctx context.Context) []int {
ids, err := feq.IDs(ctx)
if err != nil {
panic(err)
}
return ids
}
// Count returns the count of the given query.
func (feq *FsEventQuery) Count(ctx context.Context) (int, error) {
ctx = setContextOp(ctx, feq.ctx, "Count")
if err := feq.prepareQuery(ctx); err != nil {
return 0, err
}
return withInterceptors[int](ctx, feq, querierCount[*FsEventQuery](), feq.inters)
}
// CountX is like Count, but panics if an error occurs.
func (feq *FsEventQuery) CountX(ctx context.Context) int {
count, err := feq.Count(ctx)
if err != nil {
panic(err)
}
return count
}
// Exist returns true if the query has elements in the graph.
func (feq *FsEventQuery) Exist(ctx context.Context) (bool, error) {
ctx = setContextOp(ctx, feq.ctx, "Exist")
switch _, err := feq.FirstID(ctx); {
case IsNotFound(err):
return false, nil
case err != nil:
return false, fmt.Errorf("ent: check existence: %w", err)
default:
return true, nil
}
}
// ExistX is like Exist, but panics if an error occurs.
func (feq *FsEventQuery) ExistX(ctx context.Context) bool {
exist, err := feq.Exist(ctx)
if err != nil {
panic(err)
}
return exist
}
// Clone returns a duplicate of the FsEventQuery builder, including all associated steps. It can be
// used to prepare common query builders and use them differently after the clone is made.
func (feq *FsEventQuery) Clone() *FsEventQuery {
if feq == nil {
return nil
}
return &FsEventQuery{
config: feq.config,
ctx: feq.ctx.Clone(),
order: append([]fsevent.OrderOption{}, feq.order...),
inters: append([]Interceptor{}, feq.inters...),
predicates: append([]predicate.FsEvent{}, feq.predicates...),
withUser: feq.withUser.Clone(),
// clone intermediate query.
sql: feq.sql.Clone(),
path: feq.path,
}
}
// WithUser tells the query-builder to eager-load the nodes that are connected to
// the "user" edge. The optional arguments are used to configure the query builder of the edge.
func (feq *FsEventQuery) WithUser(opts ...func(*UserQuery)) *FsEventQuery {
query := (&UserClient{config: feq.config}).Query()
for _, opt := range opts {
opt(query)
}
feq.withUser = query
return feq
}
// GroupBy is used to group vertices by one or more fields/columns.
// It is often used with aggregate functions, like: count, max, mean, min, sum.
//
// Example:
//
// var v []struct {
// CreatedAt time.Time `json:"created_at,omitempty"`
// Count int `json:"count,omitempty"`
// }
//
// client.FsEvent.Query().
// GroupBy(fsevent.FieldCreatedAt).
// Aggregate(ent.Count()).
// Scan(ctx, &v)
func (feq *FsEventQuery) GroupBy(field string, fields ...string) *FsEventGroupBy {
feq.ctx.Fields = append([]string{field}, fields...)
grbuild := &FsEventGroupBy{build: feq}
grbuild.flds = &feq.ctx.Fields
grbuild.label = fsevent.Label
grbuild.scan = grbuild.Scan
return grbuild
}
// Select allows the selection one or more fields/columns for the given query,
// instead of selecting all fields in the entity.
//
// Example:
//
// var v []struct {
// CreatedAt time.Time `json:"created_at,omitempty"`
// }
//
// client.FsEvent.Query().
// Select(fsevent.FieldCreatedAt).
// Scan(ctx, &v)
func (feq *FsEventQuery) Select(fields ...string) *FsEventSelect {
feq.ctx.Fields = append(feq.ctx.Fields, fields...)
sbuild := &FsEventSelect{FsEventQuery: feq}
sbuild.label = fsevent.Label
sbuild.flds, sbuild.scan = &feq.ctx.Fields, sbuild.Scan
return sbuild
}
// Aggregate returns a FsEventSelect configured with the given aggregations.
func (feq *FsEventQuery) Aggregate(fns ...AggregateFunc) *FsEventSelect {
return feq.Select().Aggregate(fns...)
}
func (feq *FsEventQuery) prepareQuery(ctx context.Context) error {
for _, inter := range feq.inters {
if inter == nil {
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
}
if trv, ok := inter.(Traverser); ok {
if err := trv.Traverse(ctx, feq); err != nil {
return err
}
}
}
for _, f := range feq.ctx.Fields {
if !fsevent.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
}
}
if feq.path != nil {
prev, err := feq.path(ctx)
if err != nil {
return err
}
feq.sql = prev
}
return nil
}
func (feq *FsEventQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*FsEvent, error) {
var (
nodes = []*FsEvent{}
_spec = feq.querySpec()
loadedTypes = [1]bool{
feq.withUser != nil,
}
)
_spec.ScanValues = func(columns []string) ([]any, error) {
return (*FsEvent).scanValues(nil, columns)
}
_spec.Assign = func(columns []string, values []any) error {
node := &FsEvent{config: feq.config}
nodes = append(nodes, node)
node.Edges.loadedTypes = loadedTypes
return node.assignValues(columns, values)
}
for i := range hooks {
hooks[i](ctx, _spec)
}
if err := sqlgraph.QueryNodes(ctx, feq.driver, _spec); err != nil {
return nil, err
}
if len(nodes) == 0 {
return nodes, nil
}
if query := feq.withUser; query != nil {
if err := feq.loadUser(ctx, query, nodes, nil,
func(n *FsEvent, e *User) { n.Edges.User = e }); err != nil {
return nil, err
}
}
return nodes, nil
}
func (feq *FsEventQuery) loadUser(ctx context.Context, query *UserQuery, nodes []*FsEvent, init func(*FsEvent), assign func(*FsEvent, *User)) error {
ids := make([]int, 0, len(nodes))
nodeids := make(map[int][]*FsEvent)
for i := range nodes {
fk := nodes[i].UserFsevent
if _, ok := nodeids[fk]; !ok {
ids = append(ids, fk)
}
nodeids[fk] = append(nodeids[fk], nodes[i])
}
if len(ids) == 0 {
return nil
}
query.Where(user.IDIn(ids...))
neighbors, err := query.All(ctx)
if err != nil {
return err
}
for _, n := range neighbors {
nodes, ok := nodeids[n.ID]
if !ok {
return fmt.Errorf(`unexpected foreign-key "user_fsevent" returned %v`, n.ID)
}
for i := range nodes {
assign(nodes[i], n)
}
}
return nil
}
func (feq *FsEventQuery) sqlCount(ctx context.Context) (int, error) {
_spec := feq.querySpec()
_spec.Node.Columns = feq.ctx.Fields
if len(feq.ctx.Fields) > 0 {
_spec.Unique = feq.ctx.Unique != nil && *feq.ctx.Unique
}
return sqlgraph.CountNodes(ctx, feq.driver, _spec)
}
func (feq *FsEventQuery) querySpec() *sqlgraph.QuerySpec {
_spec := sqlgraph.NewQuerySpec(fsevent.Table, fsevent.Columns, sqlgraph.NewFieldSpec(fsevent.FieldID, field.TypeInt))
_spec.From = feq.sql
if unique := feq.ctx.Unique; unique != nil {
_spec.Unique = *unique
} else if feq.path != nil {
_spec.Unique = true
}
if fields := feq.ctx.Fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, fsevent.FieldID)
for i := range fields {
if fields[i] != fsevent.FieldID {
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
}
}
if feq.withUser != nil {
_spec.Node.AddColumnOnce(fsevent.FieldUserFsevent)
}
}
if ps := feq.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
if limit := feq.ctx.Limit; limit != nil {
_spec.Limit = *limit
}
if offset := feq.ctx.Offset; offset != nil {
_spec.Offset = *offset
}
if ps := feq.order; len(ps) > 0 {
_spec.Order = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
return _spec
}
func (feq *FsEventQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(feq.driver.Dialect())
t1 := builder.Table(fsevent.Table)
columns := feq.ctx.Fields
if len(columns) == 0 {
columns = fsevent.Columns
}
selector := builder.Select(t1.Columns(columns...)...).From(t1)
if feq.sql != nil {
selector = feq.sql
selector.Select(selector.Columns(columns...)...)
}
if feq.ctx.Unique != nil && *feq.ctx.Unique {
selector.Distinct()
}
for _, p := range feq.predicates {
p(selector)
}
for _, p := range feq.order {
p(selector)
}
if offset := feq.ctx.Offset; offset != nil {
// limit is mandatory for offset clause. We start
// with default value, and override it below if needed.
selector.Offset(*offset).Limit(math.MaxInt32)
}
if limit := feq.ctx.Limit; limit != nil {
selector.Limit(*limit)
}
return selector
}
// FsEventGroupBy is the group-by builder for FsEvent entities.
type FsEventGroupBy struct {
selector
build *FsEventQuery
}
// Aggregate adds the given aggregation functions to the group-by query.
func (fegb *FsEventGroupBy) Aggregate(fns ...AggregateFunc) *FsEventGroupBy {
fegb.fns = append(fegb.fns, fns...)
return fegb
}
// Scan applies the selector query and scans the result into the given value.
func (fegb *FsEventGroupBy) Scan(ctx context.Context, v any) error {
ctx = setContextOp(ctx, fegb.build.ctx, "GroupBy")
if err := fegb.build.prepareQuery(ctx); err != nil {
return err
}
return scanWithInterceptors[*FsEventQuery, *FsEventGroupBy](ctx, fegb.build, fegb, fegb.build.inters, v)
}
func (fegb *FsEventGroupBy) sqlScan(ctx context.Context, root *FsEventQuery, v any) error {
selector := root.sqlQuery(ctx).Select()
aggregation := make([]string, 0, len(fegb.fns))
for _, fn := range fegb.fns {
aggregation = append(aggregation, fn(selector))
}
if len(selector.SelectedColumns()) == 0 {
columns := make([]string, 0, len(*fegb.flds)+len(fegb.fns))
for _, f := range *fegb.flds {
columns = append(columns, selector.C(f))
}
columns = append(columns, aggregation...)
selector.Select(columns...)
}
selector.GroupBy(selector.Columns(*fegb.flds...)...)
if err := selector.Err(); err != nil {
return err
}
rows := &sql.Rows{}
query, args := selector.Query()
if err := fegb.build.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}
// FsEventSelect is the builder for selecting fields of FsEvent entities.
type FsEventSelect struct {
*FsEventQuery
selector
}
// Aggregate adds the given aggregation functions to the selector query.
func (fes *FsEventSelect) Aggregate(fns ...AggregateFunc) *FsEventSelect {
fes.fns = append(fes.fns, fns...)
return fes
}
// Scan applies the selector query and scans the result into the given value.
func (fes *FsEventSelect) Scan(ctx context.Context, v any) error {
ctx = setContextOp(ctx, fes.ctx, "Select")
if err := fes.prepareQuery(ctx); err != nil {
return err
}
return scanWithInterceptors[*FsEventQuery, *FsEventSelect](ctx, fes.FsEventQuery, fes, fes.inters, v)
}
func (fes *FsEventSelect) sqlScan(ctx context.Context, root *FsEventQuery, v any) error {
selector := root.sqlQuery(ctx)
aggregation := make([]string, 0, len(fes.fns))
for _, fn := range fes.fns {
aggregation = append(aggregation, fn(selector))
}
switch n := len(*fes.selector.flds); {
case n == 0 && len(aggregation) > 0:
selector.Select(aggregation...)
case n != 0 && len(aggregation) > 0:
selector.AppendSelect(aggregation...)
}
rows := &sql.Rows{}
query, args := selector.Query()
if err := fes.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}

494
ent/fsevent_update.go Normal file
View File

@ -0,0 +1,494 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"context"
"errors"
"fmt"
"time"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/cloudreve/Cloudreve/v4/ent/fsevent"
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
"github.com/cloudreve/Cloudreve/v4/ent/user"
"github.com/gofrs/uuid"
)
// FsEventUpdate is the builder for updating FsEvent entities.
type FsEventUpdate struct {
config
hooks []Hook
mutation *FsEventMutation
}
// Where appends a list predicates to the FsEventUpdate builder.
func (feu *FsEventUpdate) Where(ps ...predicate.FsEvent) *FsEventUpdate {
feu.mutation.Where(ps...)
return feu
}
// SetUpdatedAt sets the "updated_at" field.
func (feu *FsEventUpdate) SetUpdatedAt(t time.Time) *FsEventUpdate {
feu.mutation.SetUpdatedAt(t)
return feu
}
// SetDeletedAt sets the "deleted_at" field.
func (feu *FsEventUpdate) SetDeletedAt(t time.Time) *FsEventUpdate {
feu.mutation.SetDeletedAt(t)
return feu
}
// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil.
func (feu *FsEventUpdate) SetNillableDeletedAt(t *time.Time) *FsEventUpdate {
if t != nil {
feu.SetDeletedAt(*t)
}
return feu
}
// ClearDeletedAt clears the value of the "deleted_at" field.
func (feu *FsEventUpdate) ClearDeletedAt() *FsEventUpdate {
feu.mutation.ClearDeletedAt()
return feu
}
// SetEvent sets the "event" field.
func (feu *FsEventUpdate) SetEvent(s string) *FsEventUpdate {
feu.mutation.SetEvent(s)
return feu
}
// SetNillableEvent sets the "event" field if the given value is not nil.
func (feu *FsEventUpdate) SetNillableEvent(s *string) *FsEventUpdate {
if s != nil {
feu.SetEvent(*s)
}
return feu
}
// SetSubscriber sets the "subscriber" field.
func (feu *FsEventUpdate) SetSubscriber(u uuid.UUID) *FsEventUpdate {
feu.mutation.SetSubscriber(u)
return feu
}
// SetNillableSubscriber sets the "subscriber" field if the given value is not nil.
func (feu *FsEventUpdate) SetNillableSubscriber(u *uuid.UUID) *FsEventUpdate {
if u != nil {
feu.SetSubscriber(*u)
}
return feu
}
// SetUserFsevent sets the "user_fsevent" field.
func (feu *FsEventUpdate) SetUserFsevent(i int) *FsEventUpdate {
feu.mutation.SetUserFsevent(i)
return feu
}
// SetNillableUserFsevent sets the "user_fsevent" field if the given value is not nil.
func (feu *FsEventUpdate) SetNillableUserFsevent(i *int) *FsEventUpdate {
if i != nil {
feu.SetUserFsevent(*i)
}
return feu
}
// ClearUserFsevent clears the value of the "user_fsevent" field.
func (feu *FsEventUpdate) ClearUserFsevent() *FsEventUpdate {
feu.mutation.ClearUserFsevent()
return feu
}
// SetUserID sets the "user" edge to the User entity by ID.
func (feu *FsEventUpdate) SetUserID(id int) *FsEventUpdate {
feu.mutation.SetUserID(id)
return feu
}
// SetNillableUserID sets the "user" edge to the User entity by ID if the given value is not nil.
func (feu *FsEventUpdate) SetNillableUserID(id *int) *FsEventUpdate {
if id != nil {
feu = feu.SetUserID(*id)
}
return feu
}
// SetUser sets the "user" edge to the User entity.
func (feu *FsEventUpdate) SetUser(u *User) *FsEventUpdate {
return feu.SetUserID(u.ID)
}
// Mutation returns the FsEventMutation object of the builder.
func (feu *FsEventUpdate) Mutation() *FsEventMutation {
return feu.mutation
}
// ClearUser clears the "user" edge to the User entity.
func (feu *FsEventUpdate) ClearUser() *FsEventUpdate {
feu.mutation.ClearUser()
return feu
}
// Save executes the query and returns the number of nodes affected by the update operation.
func (feu *FsEventUpdate) Save(ctx context.Context) (int, error) {
if err := feu.defaults(); err != nil {
return 0, err
}
return withHooks(ctx, feu.sqlSave, feu.mutation, feu.hooks)
}
// SaveX is like Save, but panics if an error occurs.
func (feu *FsEventUpdate) SaveX(ctx context.Context) int {
affected, err := feu.Save(ctx)
if err != nil {
panic(err)
}
return affected
}
// Exec executes the query.
func (feu *FsEventUpdate) Exec(ctx context.Context) error {
_, err := feu.Save(ctx)
return err
}
// ExecX is like Exec, but panics if an error occurs.
func (feu *FsEventUpdate) ExecX(ctx context.Context) {
if err := feu.Exec(ctx); err != nil {
panic(err)
}
}
// defaults sets the default values of the builder before save.
func (feu *FsEventUpdate) defaults() error {
if _, ok := feu.mutation.UpdatedAt(); !ok {
if fsevent.UpdateDefaultUpdatedAt == nil {
return fmt.Errorf("ent: uninitialized fsevent.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)")
}
v := fsevent.UpdateDefaultUpdatedAt()
feu.mutation.SetUpdatedAt(v)
}
return nil
}
func (feu *FsEventUpdate) sqlSave(ctx context.Context) (n int, err error) {
_spec := sqlgraph.NewUpdateSpec(fsevent.Table, fsevent.Columns, sqlgraph.NewFieldSpec(fsevent.FieldID, field.TypeInt))
if ps := feu.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
if value, ok := feu.mutation.UpdatedAt(); ok {
_spec.SetField(fsevent.FieldUpdatedAt, field.TypeTime, value)
}
if value, ok := feu.mutation.DeletedAt(); ok {
_spec.SetField(fsevent.FieldDeletedAt, field.TypeTime, value)
}
if feu.mutation.DeletedAtCleared() {
_spec.ClearField(fsevent.FieldDeletedAt, field.TypeTime)
}
if value, ok := feu.mutation.Event(); ok {
_spec.SetField(fsevent.FieldEvent, field.TypeString, value)
}
if value, ok := feu.mutation.Subscriber(); ok {
_spec.SetField(fsevent.FieldSubscriber, field.TypeUUID, value)
}
if feu.mutation.UserCleared() {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
Inverse: true,
Table: fsevent.UserTable,
Columns: []string{fsevent.UserColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
}
if nodes := feu.mutation.UserIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
Inverse: true,
Table: fsevent.UserTable,
Columns: []string{fsevent.UserColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges.Add = append(_spec.Edges.Add, edge)
}
if n, err = sqlgraph.UpdateNodes(ctx, feu.driver, _spec); err != nil {
if _, ok := err.(*sqlgraph.NotFoundError); ok {
err = &NotFoundError{fsevent.Label}
} else if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
return 0, err
}
feu.mutation.done = true
return n, nil
}
// FsEventUpdateOne is the builder for updating a single FsEvent entity.
type FsEventUpdateOne struct {
config
fields []string
hooks []Hook
mutation *FsEventMutation
}
// SetUpdatedAt sets the "updated_at" field.
func (feuo *FsEventUpdateOne) SetUpdatedAt(t time.Time) *FsEventUpdateOne {
feuo.mutation.SetUpdatedAt(t)
return feuo
}
// SetDeletedAt sets the "deleted_at" field.
func (feuo *FsEventUpdateOne) SetDeletedAt(t time.Time) *FsEventUpdateOne {
feuo.mutation.SetDeletedAt(t)
return feuo
}
// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil.
func (feuo *FsEventUpdateOne) SetNillableDeletedAt(t *time.Time) *FsEventUpdateOne {
if t != nil {
feuo.SetDeletedAt(*t)
}
return feuo
}
// ClearDeletedAt clears the value of the "deleted_at" field.
func (feuo *FsEventUpdateOne) ClearDeletedAt() *FsEventUpdateOne {
feuo.mutation.ClearDeletedAt()
return feuo
}
// SetEvent sets the "event" field.
func (feuo *FsEventUpdateOne) SetEvent(s string) *FsEventUpdateOne {
feuo.mutation.SetEvent(s)
return feuo
}
// SetNillableEvent sets the "event" field if the given value is not nil.
func (feuo *FsEventUpdateOne) SetNillableEvent(s *string) *FsEventUpdateOne {
if s != nil {
feuo.SetEvent(*s)
}
return feuo
}
// SetSubscriber sets the "subscriber" field.
func (feuo *FsEventUpdateOne) SetSubscriber(u uuid.UUID) *FsEventUpdateOne {
feuo.mutation.SetSubscriber(u)
return feuo
}
// SetNillableSubscriber sets the "subscriber" field if the given value is not nil.
func (feuo *FsEventUpdateOne) SetNillableSubscriber(u *uuid.UUID) *FsEventUpdateOne {
if u != nil {
feuo.SetSubscriber(*u)
}
return feuo
}
// SetUserFsevent sets the "user_fsevent" field.
func (feuo *FsEventUpdateOne) SetUserFsevent(i int) *FsEventUpdateOne {
feuo.mutation.SetUserFsevent(i)
return feuo
}
// SetNillableUserFsevent sets the "user_fsevent" field if the given value is not nil.
func (feuo *FsEventUpdateOne) SetNillableUserFsevent(i *int) *FsEventUpdateOne {
if i != nil {
feuo.SetUserFsevent(*i)
}
return feuo
}
// ClearUserFsevent clears the value of the "user_fsevent" field.
func (feuo *FsEventUpdateOne) ClearUserFsevent() *FsEventUpdateOne {
feuo.mutation.ClearUserFsevent()
return feuo
}
// SetUserID sets the "user" edge to the User entity by ID.
func (feuo *FsEventUpdateOne) SetUserID(id int) *FsEventUpdateOne {
feuo.mutation.SetUserID(id)
return feuo
}
// SetNillableUserID sets the "user" edge to the User entity by ID if the given value is not nil.
func (feuo *FsEventUpdateOne) SetNillableUserID(id *int) *FsEventUpdateOne {
if id != nil {
feuo = feuo.SetUserID(*id)
}
return feuo
}
// SetUser sets the "user" edge to the User entity.
func (feuo *FsEventUpdateOne) SetUser(u *User) *FsEventUpdateOne {
return feuo.SetUserID(u.ID)
}
// Mutation returns the FsEventMutation object of the builder.
func (feuo *FsEventUpdateOne) Mutation() *FsEventMutation {
return feuo.mutation
}
// ClearUser clears the "user" edge to the User entity.
func (feuo *FsEventUpdateOne) ClearUser() *FsEventUpdateOne {
feuo.mutation.ClearUser()
return feuo
}
// Where appends a list predicates to the FsEventUpdate builder.
func (feuo *FsEventUpdateOne) Where(ps ...predicate.FsEvent) *FsEventUpdateOne {
feuo.mutation.Where(ps...)
return feuo
}
// Select allows selecting one or more fields (columns) of the returned entity.
// The default is selecting all fields defined in the entity schema.
func (feuo *FsEventUpdateOne) Select(field string, fields ...string) *FsEventUpdateOne {
feuo.fields = append([]string{field}, fields...)
return feuo
}
// Save executes the query and returns the updated FsEvent entity.
func (feuo *FsEventUpdateOne) Save(ctx context.Context) (*FsEvent, error) {
if err := feuo.defaults(); err != nil {
return nil, err
}
return withHooks(ctx, feuo.sqlSave, feuo.mutation, feuo.hooks)
}
// SaveX is like Save, but panics if an error occurs.
func (feuo *FsEventUpdateOne) SaveX(ctx context.Context) *FsEvent {
node, err := feuo.Save(ctx)
if err != nil {
panic(err)
}
return node
}
// Exec executes the query on the entity.
func (feuo *FsEventUpdateOne) Exec(ctx context.Context) error {
_, err := feuo.Save(ctx)
return err
}
// ExecX is like Exec, but panics if an error occurs.
func (feuo *FsEventUpdateOne) ExecX(ctx context.Context) {
if err := feuo.Exec(ctx); err != nil {
panic(err)
}
}
// defaults sets the default values of the builder before save.
func (feuo *FsEventUpdateOne) defaults() error {
if _, ok := feuo.mutation.UpdatedAt(); !ok {
if fsevent.UpdateDefaultUpdatedAt == nil {
return fmt.Errorf("ent: uninitialized fsevent.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)")
}
v := fsevent.UpdateDefaultUpdatedAt()
feuo.mutation.SetUpdatedAt(v)
}
return nil
}
func (feuo *FsEventUpdateOne) sqlSave(ctx context.Context) (_node *FsEvent, err error) {
_spec := sqlgraph.NewUpdateSpec(fsevent.Table, fsevent.Columns, sqlgraph.NewFieldSpec(fsevent.FieldID, field.TypeInt))
id, ok := feuo.mutation.ID()
if !ok {
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "FsEvent.id" for update`)}
}
_spec.Node.ID.Value = id
if fields := feuo.fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, fsevent.FieldID)
for _, f := range fields {
if !fsevent.ValidColumn(f) {
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
}
if f != fsevent.FieldID {
_spec.Node.Columns = append(_spec.Node.Columns, f)
}
}
}
if ps := feuo.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
if value, ok := feuo.mutation.UpdatedAt(); ok {
_spec.SetField(fsevent.FieldUpdatedAt, field.TypeTime, value)
}
if value, ok := feuo.mutation.DeletedAt(); ok {
_spec.SetField(fsevent.FieldDeletedAt, field.TypeTime, value)
}
if feuo.mutation.DeletedAtCleared() {
_spec.ClearField(fsevent.FieldDeletedAt, field.TypeTime)
}
if value, ok := feuo.mutation.Event(); ok {
_spec.SetField(fsevent.FieldEvent, field.TypeString, value)
}
if value, ok := feuo.mutation.Subscriber(); ok {
_spec.SetField(fsevent.FieldSubscriber, field.TypeUUID, value)
}
if feuo.mutation.UserCleared() {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
Inverse: true,
Table: fsevent.UserTable,
Columns: []string{fsevent.UserColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
}
if nodes := feuo.mutation.UserIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
Inverse: true,
Table: fsevent.UserTable,
Columns: []string{fsevent.UserColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges.Add = append(_spec.Edges.Add, edge)
}
_node = &FsEvent{config: feuo.config}
_spec.Assign = _node.assignValues
_spec.ScanValues = _node.scanValues
if err = sqlgraph.UpdateNode(ctx, feuo.driver, _spec); err != nil {
if _, ok := err.(*sqlgraph.NotFoundError); ok {
err = &NotFoundError{fsevent.Label}
} else if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
return nil, err
}
feuo.mutation.done = true
return _node, nil
}

View File

@ -57,6 +57,18 @@ func (f FileFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error)
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.FileMutation", m)
}
// The FsEventFunc type is an adapter to allow the use of ordinary
// function as FsEvent mutator.
type FsEventFunc func(context.Context, *ent.FsEventMutation) (ent.Value, error)
// Mutate calls f(ctx, m).
func (f FsEventFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
if mv, ok := m.(*ent.FsEventMutation); ok {
return f(ctx, mv)
}
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.FsEventMutation", m)
}
// The GroupFunc type is an adapter to allow the use of ordinary
// function as Group mutator.
type GroupFunc func(context.Context, *ent.GroupMutation) (ent.Value, error)

View File

@ -12,6 +12,7 @@ import (
"github.com/cloudreve/Cloudreve/v4/ent/directlink"
"github.com/cloudreve/Cloudreve/v4/ent/entity"
"github.com/cloudreve/Cloudreve/v4/ent/file"
"github.com/cloudreve/Cloudreve/v4/ent/fsevent"
"github.com/cloudreve/Cloudreve/v4/ent/group"
"github.com/cloudreve/Cloudreve/v4/ent/metadata"
"github.com/cloudreve/Cloudreve/v4/ent/node"
@ -188,6 +189,33 @@ func (f TraverseFile) Traverse(ctx context.Context, q ent.Query) error {
return fmt.Errorf("unexpected query type %T. expect *ent.FileQuery", q)
}
// The FsEventFunc type is an adapter to allow the use of ordinary function as a Querier.
type FsEventFunc func(context.Context, *ent.FsEventQuery) (ent.Value, error)
// Query calls f(ctx, q).
func (f FsEventFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
if q, ok := q.(*ent.FsEventQuery); ok {
return f(ctx, q)
}
return nil, fmt.Errorf("unexpected query type %T. expect *ent.FsEventQuery", q)
}
// The TraverseFsEvent type is an adapter to allow the use of ordinary function as Traverser.
type TraverseFsEvent func(context.Context, *ent.FsEventQuery) error
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
func (f TraverseFsEvent) Intercept(next ent.Querier) ent.Querier {
return next
}
// Traverse calls f(ctx, q).
func (f TraverseFsEvent) Traverse(ctx context.Context, q ent.Query) error {
if q, ok := q.(*ent.FsEventQuery); ok {
return f(ctx, q)
}
return fmt.Errorf("unexpected query type %T. expect *ent.FsEventQuery", q)
}
// The GroupFunc type is an adapter to allow the use of ordinary function as a Querier.
type GroupFunc func(context.Context, *ent.GroupQuery) (ent.Value, error)
@ -442,6 +470,8 @@ func NewQuery(q ent.Query) (Query, error) {
return &query[*ent.EntityQuery, predicate.Entity, entity.OrderOption]{typ: ent.TypeEntity, tq: q}, nil
case *ent.FileQuery:
return &query[*ent.FileQuery, predicate.File, file.OrderOption]{typ: ent.TypeFile, tq: q}, nil
case *ent.FsEventQuery:
return &query[*ent.FsEventQuery, predicate.FsEvent, fsevent.OrderOption]{typ: ent.TypeFsEvent, tq: q}, nil
case *ent.GroupQuery:
return &query[*ent.GroupQuery, predicate.Group, group.OrderOption]{typ: ent.TypeGroup, tq: q}, nil
case *ent.MetadataQuery:

File diff suppressed because one or more lines are too long

View File

@ -160,6 +160,30 @@ var (
},
},
}
// FsEventsColumns holds the columns for the "fs_events" table.
FsEventsColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"mysql": "datetime"}},
{Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"mysql": "datetime"}},
{Name: "deleted_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"mysql": "datetime"}},
{Name: "event", Type: field.TypeString, Size: 2147483647},
{Name: "subscriber", Type: field.TypeUUID},
{Name: "user_fsevent", Type: field.TypeInt, Nullable: true},
}
// FsEventsTable holds the schema information for the "fs_events" table.
FsEventsTable = &schema.Table{
Name: "fs_events",
Columns: FsEventsColumns,
PrimaryKey: []*schema.Column{FsEventsColumns[0]},
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "fs_events_users_fsevents",
Columns: []*schema.Column{FsEventsColumns[6]},
RefColumns: []*schema.Column{UsersColumns[0]},
OnDelete: schema.SetNull,
},
},
}
// GroupsColumns holds the columns for the "groups" table.
GroupsColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
@ -444,6 +468,7 @@ var (
DirectLinksTable,
EntitiesTable,
FilesTable,
FsEventsTable,
GroupsTable,
MetadataTable,
NodesTable,
@ -465,6 +490,7 @@ func init() {
FilesTable.ForeignKeys[0].RefTable = FilesTable
FilesTable.ForeignKeys[1].RefTable = StoragePoliciesTable
FilesTable.ForeignKeys[2].RefTable = UsersTable
FsEventsTable.ForeignKeys[0].RefTable = UsersTable
GroupsTable.ForeignKeys[0].RefTable = StoragePoliciesTable
MetadataTable.ForeignKeys[0].RefTable = FilesTable
PasskeysTable.ForeignKeys[0].RefTable = UsersTable

File diff suppressed because it is too large Load Diff

View File

@ -28,6 +28,12 @@ func (m *FileMutation) SetRawID(t int) {
// SetUpdatedAt sets the "updated_at" field.
func (m *FsEventMutation) SetRawID(t int) {
m.id = &t
}
// SetUpdatedAt sets the "updated_at" field.
func (m *GroupMutation) SetRawID(t int) {
m.id = &t
}

View File

@ -18,6 +18,9 @@ type Entity func(*sql.Selector)
// File is the predicate function for file builders.
type File func(*sql.Selector)
// FsEvent is the predicate function for fsevent builders.
type FsEvent func(*sql.Selector)
// Group is the predicate function for group builders.
type Group func(*sql.Selector)

View File

@ -9,6 +9,7 @@ import (
"github.com/cloudreve/Cloudreve/v4/ent/directlink"
"github.com/cloudreve/Cloudreve/v4/ent/entity"
"github.com/cloudreve/Cloudreve/v4/ent/file"
"github.com/cloudreve/Cloudreve/v4/ent/fsevent"
"github.com/cloudreve/Cloudreve/v4/ent/group"
"github.com/cloudreve/Cloudreve/v4/ent/metadata"
"github.com/cloudreve/Cloudreve/v4/ent/node"
@ -107,6 +108,25 @@ func init() {
fileDescIsSymbolic := fileFields[8].Descriptor()
// file.DefaultIsSymbolic holds the default value on creation for the is_symbolic field.
file.DefaultIsSymbolic = fileDescIsSymbolic.Default.(bool)
fseventMixin := schema.FsEvent{}.Mixin()
fseventMixinHooks0 := fseventMixin[0].Hooks()
fsevent.Hooks[0] = fseventMixinHooks0[0]
fseventMixinInters0 := fseventMixin[0].Interceptors()
fsevent.Interceptors[0] = fseventMixinInters0[0]
fseventMixinFields0 := fseventMixin[0].Fields()
_ = fseventMixinFields0
fseventFields := schema.FsEvent{}.Fields()
_ = fseventFields
// fseventDescCreatedAt is the schema descriptor for created_at field.
fseventDescCreatedAt := fseventMixinFields0[0].Descriptor()
// fsevent.DefaultCreatedAt holds the default value on creation for the created_at field.
fsevent.DefaultCreatedAt = fseventDescCreatedAt.Default.(func() time.Time)
// fseventDescUpdatedAt is the schema descriptor for updated_at field.
fseventDescUpdatedAt := fseventMixinFields0[1].Descriptor()
// fsevent.DefaultUpdatedAt holds the default value on creation for the updated_at field.
fsevent.DefaultUpdatedAt = fseventDescUpdatedAt.Default.(func() time.Time)
// fsevent.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.
fsevent.UpdateDefaultUpdatedAt = fseventDescUpdatedAt.UpdateDefault.(func() time.Time)
groupMixin := schema.Group{}.Mixin()
groupMixinHooks0 := groupMixin[0].Hooks()
group.Hooks[0] = groupMixinHooks0[0]

View File

@ -25,8 +25,9 @@ func (Entity) Fields() []ent.Field {
field.UUID("upload_session_id", uuid.Must(uuid.NewV4())).
Optional().
Nillable(),
field.JSON("recycle_options", &types.EntityRecycleOption{}).
Optional(),
field.JSON("props", &types.EntityProps{}).
Optional().
StorageKey("recycle_options"),
}
}

38
ent/schema/fsevent.go Normal file
View File

@ -0,0 +1,38 @@
package schema
import (
"entgo.io/ent"
"entgo.io/ent/schema/edge"
"entgo.io/ent/schema/field"
"github.com/gofrs/uuid"
)
// FsEvent holds the schema definition for the FsEvent entity.
type FsEvent struct {
ent.Schema
}
// Fields of the FsEvent.
func (FsEvent) Fields() []ent.Field {
return []ent.Field{
field.Text("event"),
field.UUID("subscriber", uuid.Must(uuid.NewV4())),
field.Int("user_fsevent").Optional(),
}
}
// Edges of the Task.
func (FsEvent) Edges() []ent.Edge {
return []ent.Edge{
edge.From("user", User.Type).
Ref("fsevents").
Field("user_fsevent").
Unique(),
}
}
func (FsEvent) Mixin() []ent.Mixin {
return []ent.Mixin{
CommonMixin{},
}
}

View File

@ -51,6 +51,7 @@ func (User) Edges() []ent.Edge {
edge.To("shares", Share.Type),
edge.To("passkey", Passkey.Type),
edge.To("tasks", Task.Type),
edge.To("fsevents", FsEvent.Type),
edge.To("entities", Entity.Type),
}
}

View File

@ -22,6 +22,8 @@ type Tx struct {
Entity *EntityClient
// File is the client for interacting with the File builders.
File *FileClient
// FsEvent is the client for interacting with the FsEvent builders.
FsEvent *FsEventClient
// Group is the client for interacting with the Group builders.
Group *GroupClient
// Metadata is the client for interacting with the Metadata builders.
@ -175,6 +177,7 @@ func (tx *Tx) init() {
tx.DirectLink = NewDirectLinkClient(tx.config)
tx.Entity = NewEntityClient(tx.config)
tx.File = NewFileClient(tx.config)
tx.FsEvent = NewFsEventClient(tx.config)
tx.Group = NewGroupClient(tx.config)
tx.Metadata = NewMetadataClient(tx.config)
tx.Node = NewNodeClient(tx.config)

View File

@ -64,11 +64,13 @@ type UserEdges struct {
Passkey []*Passkey `json:"passkey,omitempty"`
// Tasks holds the value of the tasks edge.
Tasks []*Task `json:"tasks,omitempty"`
// Fsevents holds the value of the fsevents edge.
Fsevents []*FsEvent `json:"fsevents,omitempty"`
// Entities holds the value of the entities edge.
Entities []*Entity `json:"entities,omitempty"`
// loadedTypes holds the information for reporting if a
// type was loaded (or requested) in eager-loading or not.
loadedTypes [7]bool
loadedTypes [8]bool
}
// GroupOrErr returns the Group value or an error if the edge
@ -129,10 +131,19 @@ func (e UserEdges) TasksOrErr() ([]*Task, error) {
return nil, &NotLoadedError{edge: "tasks"}
}
// FseventsOrErr returns the Fsevents value or an error if the edge
// was not loaded in eager-loading.
func (e UserEdges) FseventsOrErr() ([]*FsEvent, error) {
if e.loadedTypes[6] {
return e.Fsevents, nil
}
return nil, &NotLoadedError{edge: "fsevents"}
}
// EntitiesOrErr returns the Entities value or an error if the edge
// was not loaded in eager-loading.
func (e UserEdges) EntitiesOrErr() ([]*Entity, error) {
if e.loadedTypes[6] {
if e.loadedTypes[7] {
return e.Entities, nil
}
return nil, &NotLoadedError{edge: "entities"}
@ -290,6 +301,11 @@ func (u *User) QueryTasks() *TaskQuery {
return NewUserClient(u.config).QueryTasks(u)
}
// QueryFsevents queries the "fsevents" edge of the User entity.
func (u *User) QueryFsevents() *FsEventQuery {
return NewUserClient(u.config).QueryFsevents(u)
}
// QueryEntities queries the "entities" edge of the User entity.
func (u *User) QueryEntities() *EntityQuery {
return NewUserClient(u.config).QueryEntities(u)
@ -393,10 +409,16 @@ func (e *User) SetTasks(v []*Task) {
e.Edges.loadedTypes[5] = true
}
// SetFsevents manually set the edge as loaded state.
func (e *User) SetFsevents(v []*FsEvent) {
e.Edges.Fsevents = v
e.Edges.loadedTypes[6] = true
}
// SetEntities manually set the edge as loaded state.
func (e *User) SetEntities(v []*Entity) {
e.Edges.Entities = v
e.Edges.loadedTypes[6] = true
e.Edges.loadedTypes[7] = true
}
// Users is a parsable slice of User.

View File

@ -53,6 +53,8 @@ const (
EdgePasskey = "passkey"
// EdgeTasks holds the string denoting the tasks edge name in mutations.
EdgeTasks = "tasks"
// EdgeFsevents holds the string denoting the fsevents edge name in mutations.
EdgeFsevents = "fsevents"
// EdgeEntities holds the string denoting the entities edge name in mutations.
EdgeEntities = "entities"
// Table holds the table name of the user in the database.
@ -99,6 +101,13 @@ const (
TasksInverseTable = "tasks"
// TasksColumn is the table column denoting the tasks relation/edge.
TasksColumn = "user_tasks"
// FseventsTable is the table that holds the fsevents relation/edge.
FseventsTable = "fs_events"
// FseventsInverseTable is the table name for the FsEvent entity.
// It exists in this package in order to avoid circular dependency with the "fsevent" package.
FseventsInverseTable = "fs_events"
// FseventsColumn is the table column denoting the fsevents relation/edge.
FseventsColumn = "user_fsevent"
// EntitiesTable is the table that holds the entities relation/edge.
EntitiesTable = "entities"
// EntitiesInverseTable is the table name for the Entity entity.
@ -327,6 +336,20 @@ func ByTasks(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
}
}
// ByFseventsCount orders the results by fsevents count.
func ByFseventsCount(opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborsCount(s, newFseventsStep(), opts...)
}
}
// ByFsevents orders the results by fsevents terms.
func ByFsevents(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newFseventsStep(), append([]sql.OrderTerm{term}, terms...)...)
}
}
// ByEntitiesCount orders the results by entities count.
func ByEntitiesCount(opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
@ -382,6 +405,13 @@ func newTasksStep() *sqlgraph.Step {
sqlgraph.Edge(sqlgraph.O2M, false, TasksTable, TasksColumn),
)
}
func newFseventsStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(FseventsInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, FseventsTable, FseventsColumn),
)
}
func newEntitiesStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),

View File

@ -818,6 +818,29 @@ func HasTasksWith(preds ...predicate.Task) predicate.User {
})
}
// HasFsevents applies the HasEdge predicate on the "fsevents" edge.
func HasFsevents() predicate.User {
return predicate.User(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, FseventsTable, FseventsColumn),
)
sqlgraph.HasNeighbors(s, step)
})
}
// HasFseventsWith applies the HasEdge predicate on the "fsevents" edge with a given conditions (other predicates).
func HasFseventsWith(preds ...predicate.FsEvent) predicate.User {
return predicate.User(func(s *sql.Selector) {
step := newFseventsStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
}
})
})
}
// HasEntities applies the HasEdge predicate on the "entities" edge.
func HasEntities() predicate.User {
return predicate.User(func(s *sql.Selector) {

View File

@ -14,6 +14,7 @@ import (
"github.com/cloudreve/Cloudreve/v4/ent/davaccount"
"github.com/cloudreve/Cloudreve/v4/ent/entity"
"github.com/cloudreve/Cloudreve/v4/ent/file"
"github.com/cloudreve/Cloudreve/v4/ent/fsevent"
"github.com/cloudreve/Cloudreve/v4/ent/group"
"github.com/cloudreve/Cloudreve/v4/ent/passkey"
"github.com/cloudreve/Cloudreve/v4/ent/share"
@ -252,6 +253,21 @@ func (uc *UserCreate) AddTasks(t ...*Task) *UserCreate {
return uc.AddTaskIDs(ids...)
}
// AddFseventIDs adds the "fsevents" edge to the FsEvent entity by IDs.
func (uc *UserCreate) AddFseventIDs(ids ...int) *UserCreate {
uc.mutation.AddFseventIDs(ids...)
return uc
}
// AddFsevents adds the "fsevents" edges to the FsEvent entity.
func (uc *UserCreate) AddFsevents(f ...*FsEvent) *UserCreate {
ids := make([]int, len(f))
for i := range f {
ids[i] = f[i].ID
}
return uc.AddFseventIDs(ids...)
}
// AddEntityIDs adds the "entities" edge to the Entity entity by IDs.
func (uc *UserCreate) AddEntityIDs(ids ...int) *UserCreate {
uc.mutation.AddEntityIDs(ids...)
@ -549,6 +565,22 @@ func (uc *UserCreate) createSpec() (*User, *sqlgraph.CreateSpec) {
}
_spec.Edges = append(_spec.Edges, edge)
}
if nodes := uc.mutation.FseventsIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,
Inverse: false,
Table: user.FseventsTable,
Columns: []string{user.FseventsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(fsevent.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges = append(_spec.Edges, edge)
}
if nodes := uc.mutation.EntitiesIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,

View File

@ -14,6 +14,7 @@ import (
"github.com/cloudreve/Cloudreve/v4/ent/davaccount"
"github.com/cloudreve/Cloudreve/v4/ent/entity"
"github.com/cloudreve/Cloudreve/v4/ent/file"
"github.com/cloudreve/Cloudreve/v4/ent/fsevent"
"github.com/cloudreve/Cloudreve/v4/ent/group"
"github.com/cloudreve/Cloudreve/v4/ent/passkey"
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
@ -35,6 +36,7 @@ type UserQuery struct {
withShares *ShareQuery
withPasskey *PasskeyQuery
withTasks *TaskQuery
withFsevents *FsEventQuery
withEntities *EntityQuery
// intermediate query (i.e. traversal path).
sql *sql.Selector
@ -204,6 +206,28 @@ func (uq *UserQuery) QueryTasks() *TaskQuery {
return query
}
// QueryFsevents chains the current query on the "fsevents" edge.
func (uq *UserQuery) QueryFsevents() *FsEventQuery {
query := (&FsEventClient{config: uq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := uq.prepareQuery(ctx); err != nil {
return nil, err
}
selector := uq.sqlQuery(ctx)
if err := selector.Err(); err != nil {
return nil, err
}
step := sqlgraph.NewStep(
sqlgraph.From(user.Table, user.FieldID, selector),
sqlgraph.To(fsevent.Table, fsevent.FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, user.FseventsTable, user.FseventsColumn),
)
fromU = sqlgraph.SetNeighbors(uq.driver.Dialect(), step)
return fromU, nil
}
return query
}
// QueryEntities chains the current query on the "entities" edge.
func (uq *UserQuery) QueryEntities() *EntityQuery {
query := (&EntityClient{config: uq.config}).Query()
@ -424,6 +448,7 @@ func (uq *UserQuery) Clone() *UserQuery {
withShares: uq.withShares.Clone(),
withPasskey: uq.withPasskey.Clone(),
withTasks: uq.withTasks.Clone(),
withFsevents: uq.withFsevents.Clone(),
withEntities: uq.withEntities.Clone(),
// clone intermediate query.
sql: uq.sql.Clone(),
@ -497,6 +522,17 @@ func (uq *UserQuery) WithTasks(opts ...func(*TaskQuery)) *UserQuery {
return uq
}
// WithFsevents tells the query-builder to eager-load the nodes that are connected to
// the "fsevents" edge. The optional arguments are used to configure the query builder of the edge.
func (uq *UserQuery) WithFsevents(opts ...func(*FsEventQuery)) *UserQuery {
query := (&FsEventClient{config: uq.config}).Query()
for _, opt := range opts {
opt(query)
}
uq.withFsevents = query
return uq
}
// WithEntities tells the query-builder to eager-load the nodes that are connected to
// the "entities" edge. The optional arguments are used to configure the query builder of the edge.
func (uq *UserQuery) WithEntities(opts ...func(*EntityQuery)) *UserQuery {
@ -586,13 +622,14 @@ func (uq *UserQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*User, e
var (
nodes = []*User{}
_spec = uq.querySpec()
loadedTypes = [7]bool{
loadedTypes = [8]bool{
uq.withGroup != nil,
uq.withFiles != nil,
uq.withDavAccounts != nil,
uq.withShares != nil,
uq.withPasskey != nil,
uq.withTasks != nil,
uq.withFsevents != nil,
uq.withEntities != nil,
}
)
@ -655,6 +692,13 @@ func (uq *UserQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*User, e
return nil, err
}
}
if query := uq.withFsevents; query != nil {
if err := uq.loadFsevents(ctx, query, nodes,
func(n *User) { n.Edges.Fsevents = []*FsEvent{} },
func(n *User, e *FsEvent) { n.Edges.Fsevents = append(n.Edges.Fsevents, e) }); err != nil {
return nil, err
}
}
if query := uq.withEntities; query != nil {
if err := uq.loadEntities(ctx, query, nodes,
func(n *User) { n.Edges.Entities = []*Entity{} },
@ -845,6 +889,36 @@ func (uq *UserQuery) loadTasks(ctx context.Context, query *TaskQuery, nodes []*U
}
return nil
}
func (uq *UserQuery) loadFsevents(ctx context.Context, query *FsEventQuery, nodes []*User, init func(*User), assign func(*User, *FsEvent)) error {
fks := make([]driver.Value, 0, len(nodes))
nodeids := make(map[int]*User)
for i := range nodes {
fks = append(fks, nodes[i].ID)
nodeids[nodes[i].ID] = nodes[i]
if init != nil {
init(nodes[i])
}
}
if len(query.ctx.Fields) > 0 {
query.ctx.AppendFieldOnce(fsevent.FieldUserFsevent)
}
query.Where(predicate.FsEvent(func(s *sql.Selector) {
s.Where(sql.InValues(s.C(user.FseventsColumn), fks...))
}))
neighbors, err := query.All(ctx)
if err != nil {
return err
}
for _, n := range neighbors {
fk := n.UserFsevent
node, ok := nodeids[fk]
if !ok {
return fmt.Errorf(`unexpected referenced foreign-key "user_fsevent" returned %v for node %v`, fk, n.ID)
}
assign(node, n)
}
return nil
}
func (uq *UserQuery) loadEntities(ctx context.Context, query *EntityQuery, nodes []*User, init func(*User), assign func(*User, *Entity)) error {
fks := make([]driver.Value, 0, len(nodes))
nodeids := make(map[int]*User)

View File

@ -14,6 +14,7 @@ import (
"github.com/cloudreve/Cloudreve/v4/ent/davaccount"
"github.com/cloudreve/Cloudreve/v4/ent/entity"
"github.com/cloudreve/Cloudreve/v4/ent/file"
"github.com/cloudreve/Cloudreve/v4/ent/fsevent"
"github.com/cloudreve/Cloudreve/v4/ent/group"
"github.com/cloudreve/Cloudreve/v4/ent/passkey"
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
@ -297,6 +298,21 @@ func (uu *UserUpdate) AddTasks(t ...*Task) *UserUpdate {
return uu.AddTaskIDs(ids...)
}
// AddFseventIDs adds the "fsevents" edge to the FsEvent entity by IDs.
func (uu *UserUpdate) AddFseventIDs(ids ...int) *UserUpdate {
uu.mutation.AddFseventIDs(ids...)
return uu
}
// AddFsevents adds the "fsevents" edges to the FsEvent entity.
func (uu *UserUpdate) AddFsevents(f ...*FsEvent) *UserUpdate {
ids := make([]int, len(f))
for i := range f {
ids[i] = f[i].ID
}
return uu.AddFseventIDs(ids...)
}
// AddEntityIDs adds the "entities" edge to the Entity entity by IDs.
func (uu *UserUpdate) AddEntityIDs(ids ...int) *UserUpdate {
uu.mutation.AddEntityIDs(ids...)
@ -428,6 +444,27 @@ func (uu *UserUpdate) RemoveTasks(t ...*Task) *UserUpdate {
return uu.RemoveTaskIDs(ids...)
}
// ClearFsevents clears all "fsevents" edges to the FsEvent entity.
func (uu *UserUpdate) ClearFsevents() *UserUpdate {
uu.mutation.ClearFsevents()
return uu
}
// RemoveFseventIDs removes the "fsevents" edge to FsEvent entities by IDs.
func (uu *UserUpdate) RemoveFseventIDs(ids ...int) *UserUpdate {
uu.mutation.RemoveFseventIDs(ids...)
return uu
}
// RemoveFsevents removes "fsevents" edges to FsEvent entities.
func (uu *UserUpdate) RemoveFsevents(f ...*FsEvent) *UserUpdate {
ids := make([]int, len(f))
for i := range f {
ids[i] = f[i].ID
}
return uu.RemoveFseventIDs(ids...)
}
// ClearEntities clears all "entities" edges to the Entity entity.
func (uu *UserUpdate) ClearEntities() *UserUpdate {
uu.mutation.ClearEntities()
@ -828,6 +865,51 @@ func (uu *UserUpdate) sqlSave(ctx context.Context) (n int, err error) {
}
_spec.Edges.Add = append(_spec.Edges.Add, edge)
}
if uu.mutation.FseventsCleared() {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,
Inverse: false,
Table: user.FseventsTable,
Columns: []string{user.FseventsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(fsevent.FieldID, field.TypeInt),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
}
if nodes := uu.mutation.RemovedFseventsIDs(); len(nodes) > 0 && !uu.mutation.FseventsCleared() {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,
Inverse: false,
Table: user.FseventsTable,
Columns: []string{user.FseventsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(fsevent.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
}
if nodes := uu.mutation.FseventsIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,
Inverse: false,
Table: user.FseventsTable,
Columns: []string{user.FseventsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(fsevent.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges.Add = append(_spec.Edges.Add, edge)
}
if uu.mutation.EntitiesCleared() {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,
@ -1154,6 +1236,21 @@ func (uuo *UserUpdateOne) AddTasks(t ...*Task) *UserUpdateOne {
return uuo.AddTaskIDs(ids...)
}
// AddFseventIDs adds the "fsevents" edge to the FsEvent entity by IDs.
func (uuo *UserUpdateOne) AddFseventIDs(ids ...int) *UserUpdateOne {
uuo.mutation.AddFseventIDs(ids...)
return uuo
}
// AddFsevents adds the "fsevents" edges to the FsEvent entity.
func (uuo *UserUpdateOne) AddFsevents(f ...*FsEvent) *UserUpdateOne {
ids := make([]int, len(f))
for i := range f {
ids[i] = f[i].ID
}
return uuo.AddFseventIDs(ids...)
}
// AddEntityIDs adds the "entities" edge to the Entity entity by IDs.
func (uuo *UserUpdateOne) AddEntityIDs(ids ...int) *UserUpdateOne {
uuo.mutation.AddEntityIDs(ids...)
@ -1285,6 +1382,27 @@ func (uuo *UserUpdateOne) RemoveTasks(t ...*Task) *UserUpdateOne {
return uuo.RemoveTaskIDs(ids...)
}
// ClearFsevents clears all "fsevents" edges to the FsEvent entity.
func (uuo *UserUpdateOne) ClearFsevents() *UserUpdateOne {
uuo.mutation.ClearFsevents()
return uuo
}
// RemoveFseventIDs removes the "fsevents" edge to FsEvent entities by IDs.
func (uuo *UserUpdateOne) RemoveFseventIDs(ids ...int) *UserUpdateOne {
uuo.mutation.RemoveFseventIDs(ids...)
return uuo
}
// RemoveFsevents removes "fsevents" edges to FsEvent entities.
func (uuo *UserUpdateOne) RemoveFsevents(f ...*FsEvent) *UserUpdateOne {
ids := make([]int, len(f))
for i := range f {
ids[i] = f[i].ID
}
return uuo.RemoveFseventIDs(ids...)
}
// ClearEntities clears all "entities" edges to the Entity entity.
func (uuo *UserUpdateOne) ClearEntities() *UserUpdateOne {
uuo.mutation.ClearEntities()
@ -1715,6 +1833,51 @@ func (uuo *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error)
}
_spec.Edges.Add = append(_spec.Edges.Add, edge)
}
if uuo.mutation.FseventsCleared() {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,
Inverse: false,
Table: user.FseventsTable,
Columns: []string{user.FseventsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(fsevent.FieldID, field.TypeInt),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
}
if nodes := uuo.mutation.RemovedFseventsIDs(); len(nodes) > 0 && !uuo.mutation.FseventsCleared() {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,
Inverse: false,
Table: user.FseventsTable,
Columns: []string{user.FseventsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(fsevent.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
}
if nodes := uuo.mutation.FseventsIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,
Inverse: false,
Table: user.FseventsTable,
Columns: []string{user.FseventsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(fsevent.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges.Add = append(_spec.Edges.Add, edge)
}
if uuo.mutation.EntitiesCleared() {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,

61
go.mod
View File

@ -1,11 +1,12 @@
module github.com/cloudreve/Cloudreve/v4
go 1.23.0
go 1.24.0
toolchain go1.24.9
require (
entgo.io/ent v0.13.0
github.com/Masterminds/semver/v3 v3.3.1
github.com/abslant/gzip v0.0.9
github.com/aliyun/alibabacloud-oss-go-sdk-v2 v1.3.0
github.com/aws/aws-sdk-go v1.31.5
github.com/bodgit/sevenzip v1.6.0
@ -18,12 +19,13 @@ require (
github.com/dsoprea/go-tiff-image-structure v0.0.0-20221003165014-8ecc4f52edca
github.com/dsoprea/go-utility v0.0.0-20200711062821-fab8125e9bdf
github.com/fatih/color v1.18.0
github.com/gin-contrib/cors v1.3.0
github.com/gin-contrib/cors v1.6.0
github.com/gin-contrib/gzip v1.2.4
github.com/gin-contrib/sessions v1.0.2
github.com/gin-contrib/static v0.0.0-20191128031702-f81c604d8ac2
github.com/gin-gonic/gin v1.10.0
github.com/gin-gonic/gin v1.11.0
github.com/go-ini/ini v1.50.0
github.com/go-playground/validator/v10 v10.20.0
github.com/go-playground/validator/v10 v10.28.0
github.com/go-sql-driver/mysql v1.6.0
github.com/go-webauthn/webauthn v0.11.2
github.com/gofrs/uuid v4.0.0+incompatible
@ -50,16 +52,16 @@ require (
github.com/speps/go-hashids v2.0.0+incompatible
github.com/spf13/cobra v1.7.0
github.com/spf13/pflag v1.0.5
github.com/stretchr/testify v1.9.0
github.com/stretchr/testify v1.11.1
github.com/tencentyun/cos-go-sdk-v5 v0.7.54
github.com/ua-parser/uap-go v0.0.0-20250213224047-9c035f085b90
github.com/upyun/go-sdk v2.1.0+incompatible
github.com/wneessen/go-mail v0.6.2
github.com/wneessen/go-mail v0.7.1
golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e
golang.org/x/image v0.0.0-20211028202545-6944b10bf410
golang.org/x/text v0.23.0
golang.org/x/image v0.18.0
golang.org/x/text v0.30.0
golang.org/x/time v0.5.0
golang.org/x/tools v0.24.0
golang.org/x/tools v0.38.0
modernc.org/sqlite v1.30.0
)
@ -73,11 +75,11 @@ require (
github.com/bodgit/plumbing v1.3.0 // indirect
github.com/bodgit/windows v1.0.1 // indirect
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect
github.com/bytedance/sonic v1.11.6 // indirect
github.com/bytedance/sonic/loader v0.1.1 // indirect
github.com/bytedance/gopkg v0.1.3 // indirect
github.com/bytedance/sonic v1.14.1 // indirect
github.com/bytedance/sonic/loader v0.3.0 // indirect
github.com/clbanning/mxj v1.8.4 // indirect
github.com/cloudwego/base64x v0.1.4 // indirect
github.com/cloudwego/iasm v0.2.0 // indirect
github.com/cloudwego/base64x v0.1.6 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/denisenkom/go-mssqldb v0.0.0-20190515213511-eb9f6a1743f3 // indirect
github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 // indirect
@ -88,18 +90,19 @@ require (
github.com/dsoprea/go-utility/v2 v2.0.0-20221003172846-a3e1774ef349 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.3 // indirect
github.com/gin-contrib/sse v0.1.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.10 // indirect
github.com/gin-contrib/sse v1.1.0 // indirect
github.com/go-errors/errors v1.4.2 // indirect
github.com/go-openapi/inflect v0.19.0 // indirect
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-webauthn/x v0.1.14 // indirect
github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b // indirect
github.com/goccy/go-json v0.10.2 // indirect
github.com/goccy/go-json v0.10.5 // indirect
github.com/goccy/go-yaml v1.18.0 // indirect
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 // indirect
github.com/golang/geo v0.0.0-20210211234256-740aa86cb551 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/go-cmp v0.7.0 // indirect
github.com/google/go-tpm v0.9.1 // indirect
github.com/gorilla/context v1.1.2 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
@ -112,7 +115,7 @@ require (
github.com/jmespath/go-jmespath v0.3.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.17.11 // indirect
github.com/klauspost/cpuid/v2 v2.2.7 // indirect
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
github.com/klauspost/pgzip v1.2.6 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/leodido/go-urn v1.4.0 // indirect
@ -127,25 +130,27 @@ require (
github.com/mozillazg/go-httpheader v0.4.0 // indirect
github.com/ncruces/go-strftime v0.1.9 // indirect
github.com/nwaples/rardecode/v2 v2.1.0 // indirect
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
github.com/pierrec/lz4/v4 v4.1.21 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/quic-go/qpack v0.5.1 // indirect
github.com/quic-go/quic-go v0.55.0 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
github.com/sorairolake/lzip-go v0.3.5 // indirect
github.com/stretchr/objx v0.5.2 // indirect
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
github.com/ugorji/go/codec v1.2.12 // indirect
github.com/ugorji/go/codec v1.3.0 // indirect
github.com/ulikunitz/xz v0.5.12 // indirect
github.com/x448/float16 v0.8.4 // indirect
github.com/zclconf/go-cty v1.8.0 // indirect
go4.org v0.0.0-20230225012048-214862532bf5 // indirect
golang.org/x/arch v0.8.0 // indirect
golang.org/x/crypto v0.36.0 // indirect
golang.org/x/mod v0.20.0 // indirect
golang.org/x/net v0.38.0 // indirect
golang.org/x/sync v0.12.0 // indirect
golang.org/x/sys v0.31.0 // indirect
google.golang.org/protobuf v1.34.2 // indirect
golang.org/x/arch v0.22.0 // indirect
golang.org/x/crypto v0.43.0 // indirect
golang.org/x/mod v0.29.0 // indirect
golang.org/x/net v0.46.0 // indirect
golang.org/x/sync v0.17.0 // indirect
golang.org/x/sys v0.37.0 // indirect
google.golang.org/protobuf v1.36.10 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6 // indirect

167
go.sum
View File

@ -87,8 +87,6 @@ github.com/STARRY-S/zip v0.2.1/go.mod h1:xNvshLODWtC4EJ702g7cTYn13G53o1+X9BWnPFp
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
github.com/abslant/gzip v0.0.9 h1:zxuOQ8QmPwni7vwgE3EyOygdmeCo2UkCmO5t+7Ms6cA=
github.com/abslant/gzip v0.0.9/go.mod h1:IcN2c50tZn2y54oysNcIavbTAc1s0B2f5TqTEA+WCas=
github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
github.com/agext/levenshtein v1.2.1 h1:QmvMAjj2aEICytGiWzmxoE0x2KZvE0fvmqMOfy2tjT8=
github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
@ -148,10 +146,12 @@ github.com/bodgit/windows v1.0.1 h1:tF7K6KOluPYygXa3Z2594zxlkbKPAOvqr97etrGNIz4=
github.com/bodgit/windows v1.0.1/go.mod h1:a6JLwrB4KrTR5hBpp8FI9/9W9jJfeQ2h4XDXU74ZCdM=
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8WK8raXaxBx6fRVTlJILwEwQGL1I/ByEI=
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
github.com/bytedance/sonic v1.11.6 h1:oUp34TzMlL+OY1OUWxHqsdkgC/Zfc85zGqw9siXjrc0=
github.com/bytedance/sonic v1.11.6/go.mod h1:LysEHSvpvDySVdC2f87zGWf6CIKJcAvqab1ZaiQtds4=
github.com/bytedance/sonic/loader v0.1.1 h1:c+e5Pt1k/cy5wMveRDyk2X4B9hF4g7an8N3zCYjJFNM=
github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU=
github.com/bytedance/gopkg v0.1.3 h1:TPBSwH8RsouGCBcMBktLt1AymVo2TVsBVCY4b6TnZ/M=
github.com/bytedance/gopkg v0.1.3/go.mod h1:576VvJ+eJgyCzdjS+c4+77QF3p7ubbtiKARP3TxducM=
github.com/bytedance/sonic v1.14.1 h1:FBMC0zVz5XUmE4z9wF4Jey0An5FueFvOsTKKKtwIl7w=
github.com/bytedance/sonic v1.14.1/go.mod h1:gi6uhQLMbTdeP0muCnrjHLeCUPyb70ujhnNlhOylAFc=
github.com/bytedance/sonic/loader v0.3.0 h1:dskwH8edlzNMctoruo8FPTJDF3vLtDT0sXZwvZJyqeA=
github.com/bytedance/sonic/loader v0.3.0/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI=
github.com/caarlos0/ctrlc v1.0.0/go.mod h1:CdXpj4rmq0q/1Eb44M9zi2nKB0QraNKuRGYGrrHhcQw=
github.com/campoy/unique v0.0.0-20180121183637-88950e537e7e/go.mod h1:9IOqJGCPMSc6E5ydlp5NIonxObaeu/Iub/X03EKPVYo=
github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
@ -175,10 +175,8 @@ github.com/cloudflare/backoff v0.0.0-20161212185259-647f3cdfc87a/go.mod h1:rzgs2
github.com/cloudflare/cfssl v1.6.1 h1:aIOUjpeuDJOpWjVJFP2ByplF53OgqG8I1S40Ggdlk3g=
github.com/cloudflare/cfssl v1.6.1/go.mod h1:ENhCj4Z17+bY2XikpxVmTHDg/C2IsG2Q0ZBeXpAqhCk=
github.com/cloudflare/redoctober v0.0.0-20201013214028-99c99a8e7544/go.mod h1:6Se34jNoqrd8bTxrmJB2Bg2aoZ2CdSXonils9NsiNgo=
github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/0Y=
github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w=
github.com/cloudwego/iasm v0.2.0 h1:1KNIy1I1H9hNNFEEH3DVnI4UujN+1zjpuk6gwHLTssg=
github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY=
github.com/cloudwego/base64x v0.1.6 h1:t11wG9AECkCDk5fMSoxmufanudBtJ+/HemLstXDLI2M=
github.com/cloudwego/base64x v0.1.6/go.mod h1:OFcloc187FXDaYHvrNIjxSe8ncn0OOM8gEHfghB2IPU=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
@ -297,24 +295,24 @@ github.com/fullstorydev/grpcurl v1.8.0/go.mod h1:Mn2jWbdMrQGJQ8UD62uNyMumT2acsZU
github.com/fullstorydev/grpcurl v1.8.1/go.mod h1:3BWhvHZwNO7iLXaQlojdg5NA6SxUDePli4ecpK1N7gw=
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0=
github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk=
github.com/gabriel-vasile/mimetype v1.4.10 h1:zyueNbySn/z8mJZHLt6IPw0KoZsiQNszIpU+bX4+ZK0=
github.com/gabriel-vasile/mimetype v1.4.10/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s=
github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gin-contrib/cors v1.3.0 h1:PolezCc89peu+NgkIWt9OB01Kbzt6IP0J/JvkG6xxlg=
github.com/gin-contrib/cors v1.3.0/go.mod h1:artPvLlhkF7oG06nK8v3U8TNz6IeX+w1uzCSEId5/Vc=
github.com/gin-contrib/cors v1.6.0 h1:0Z7D/bVhE6ja07lI8CTjTonp6SB07o8bNuFyRbsBUQg=
github.com/gin-contrib/cors v1.6.0/go.mod h1:cI+h6iOAyxKRtUtC6iF/Si1KSFvGm/gK+kshxlCi8ro=
github.com/gin-contrib/gzip v1.2.4 h1:yNz4EhPC2kHSZJD1oc1zwp7MLEhEZ3goQeGM3a1b6jU=
github.com/gin-contrib/gzip v1.2.4/go.mod h1:aomRgR7ftdZV3uWY0gW/m8rChfxau0n8YVvwlOHONzw=
github.com/gin-contrib/sessions v1.0.2 h1:UaIjUvTH1cMeOdj3in6dl+Xb6It8RiKRF9Z1anbUyCA=
github.com/gin-contrib/sessions v1.0.2/go.mod h1:KxKxWqWP5LJVDCInulOl4WbLzK2KSPlLesfZ66wRvMs=
github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s=
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
github.com/gin-contrib/sse v1.1.0 h1:n0w2GMuUpWDVp7qSpvze6fAu9iRxJY4Hmj6AmBOU05w=
github.com/gin-contrib/sse v1.1.0/go.mod h1:hxRZ5gVpWMT7Z0B0gSNYqqsSCNIJMjzvm6fqCz9vjwM=
github.com/gin-contrib/static v0.0.0-20191128031702-f81c604d8ac2 h1:xLG16iua01X7Gzms9045s2Y2niNpvSY/Zb1oBwgNYZY=
github.com/gin-contrib/static v0.0.0-20191128031702-f81c604d8ac2/go.mod h1:VhW/Ch/3FhimwZb8Oj+qJmdMmoB8r7lmJ5auRjm50oQ=
github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM=
github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do=
github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M=
github.com/gin-gonic/gin v1.10.0 h1:nTuyha1TYqgedzytsKYqna+DfLos46nTv2ygFy86HFU=
github.com/gin-gonic/gin v1.10.0/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y=
github.com/gin-gonic/gin v1.11.0 h1:OW/6PLjyusp2PPXtyxKHU0RbX6I/l28FTdDlae5ueWk=
github.com/gin-gonic/gin v1.11.0/go.mod h1:+iq/FyxlGzII0KHiBGjuNn4UNENUlKbGlNmc+W50Dls=
github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
github.com/go-errors/errors v1.0.2/go.mod h1:psDX2osz5VnTOnFWbDeWwS7yejl+uV3FEWEp4lssFEs=
@ -348,10 +346,9 @@ github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+
github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA=
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI=
github.com/go-playground/validator/v10 v10.8.0/go.mod h1:9JhgTzTaE31GZDpH/HSvHiRJrJ3iKAgqqH0Bl/Ocjdk=
github.com/go-playground/validator/v10 v10.20.0 h1:K9ISHbSaI0lyB2eWMPJo+kOS/FBExVwjEviJTixqxL8=
github.com/go-playground/validator/v10 v10.20.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
github.com/go-playground/validator/v10 v10.28.0 h1:Q7ibns33JjyW48gHkuFT91qX48KG0ktULL6FgHdG688=
github.com/go-playground/validator/v10 v10.28.0/go.mod h1:GoI6I1SjPBh9p7ykNE/yj3fFYbyDOpwMn5KXd+m2hUU=
github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
@ -367,8 +364,10 @@ github.com/go-webauthn/x v0.1.14 h1:1wrB8jzXAofojJPAaRxnZhRgagvLGnLjhCAwg3kTpT0=
github.com/go-webauthn/x v0.1.14/go.mod h1:UuVvFZ8/NbOnkDz3y1NaxtUN87pmtpC1PQ+/5BBQRdc=
github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b h1:khEcpUM4yFcxg4/FHQWkvVRmgijNXRfzkIDHh23ggEo=
github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM=
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4=
github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw=
github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA=
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw=
@ -441,8 +440,8 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/go-github/v28 v28.1.1/go.mod h1:bsqJWQX05omyWVmc00nEUql9mhQyv38lDZ8kPZcQVoM=
github.com/google/go-licenses v0.0.0-20210329231322-ce1d9163b77d/go.mod h1:+TYOmkVoJOpwnS0wfdsJCV9CoD5nJYsHoFk/0CrTK4M=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
@ -623,12 +622,10 @@ github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM=
github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU=
github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
@ -649,7 +646,6 @@ github.com/kylelemons/go-gypsy v1.0.0/go.mod h1:chkXM0zjdpXOiqkCW1XcCHDfjfk14PH2
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw=
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
@ -675,11 +671,9 @@ github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HN
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
@ -783,8 +777,8 @@ github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FI
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
@ -840,6 +834,10 @@ github.com/qiniu/dyn v1.3.0/go.mod h1:E8oERcm8TtwJiZvkQPbcAh0RL8jO1G0VXJMW3FAWdk
github.com/qiniu/go-sdk/v7 v7.19.0 h1:k3AzDPil8QHIQnki6xXt4YRAjE52oRoBUXQ4bV+Wc5U=
github.com/qiniu/go-sdk/v7 v7.19.0/go.mod h1:nqoYCNo53ZlGA521RvRethvxUDvXKt4gtYXOwye868w=
github.com/qiniu/x v1.10.5/go.mod h1:03Ni9tj+N2h2aKnAz+6N0Xfl8FwMEDRC2PAlxekASDs=
github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI=
github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg=
github.com/quic-go/quic-go v0.55.0 h1:zccPQIqYCXDt5NmcEabyYvOnomjs8Tlwl7tISjJh9Mk=
github.com/quic-go/quic-go v0.55.0/go.mod h1:DR51ilwU1uE164KuWXhinFcKWGlEjzys2l8zUl5Ss1U=
github.com/rafaeljusto/redigomock v0.0.0-20191117212112-00b2509252a1 h1:leEwA4MD1ew0lNgzz6Q4G76G3AEfeci+TMggN6WuFRs=
github.com/rafaeljusto/redigomock v0.0.0-20191117212112-00b2509252a1/go.mod h1:JaY6n2sDr+z2WTsXkOmNRUfDy6FN0L6Nk7x06ndm4tY=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
@ -936,9 +934,8 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.563/go.mod h1:7sCQWVkxcsR38nffDW057DRGk8mUjK1Ing/EFOK8s8Y=
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/kms v1.0.563/go.mod h1:uom4Nvi9W+Qkom0exYiJ9VWJjXwyxtPYTkKkaLMlfE0=
@ -961,8 +958,8 @@ github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGr
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE=
github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
github.com/ugorji/go/codec v1.3.0 h1:Qd2W2sQawAfG8XSvzwhBeoGq71zXOC/Q1E9y/wUcsUA=
github.com/ugorji/go/codec v1.3.0/go.mod h1:pRBVtBSKl77K30Bv8R2P+cLSGaTtex6fsA2Wjqmfxj4=
github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8=
github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
@ -980,8 +977,8 @@ github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+
github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI=
github.com/weppos/publicsuffix-go v0.13.1-0.20210123135404-5fd73613514e/go.mod h1:HYux0V0Zi04bHNwOHy4cXJVz/TQjYonnF6aoYhj+3QE=
github.com/weppos/publicsuffix-go v0.15.1-0.20210511084619-b1f36a2d6c0b/go.mod h1:HYux0V0Zi04bHNwOHy4cXJVz/TQjYonnF6aoYhj+3QE=
github.com/wneessen/go-mail v0.6.2 h1:c6V7c8D2mz868z9WJ+8zDKtUyLfZ1++uAZmo2GRFji8=
github.com/wneessen/go-mail v0.6.2/go.mod h1:L/PYjPK3/2ZlNb2/FjEBIn9n1rUWjW+Toy531oVmeb4=
github.com/wneessen/go-mail v0.7.1 h1:rvy63sp14N06/kdGqCYwW8Na5gDCXjTQM1E7So4PuKk=
github.com/wneessen/go-mail v0.7.1/go.mod h1:+TkW6QP3EVkgTEqHtVmnAE/1MRhmzb8Y9/W3pweuS+k=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/xanzy/go-gitlab v0.31.0/go.mod h1:sPLojNBn68fMUWSxIJtdVVIP8uSBYqesTfDUseX11Ug=
@ -1032,6 +1029,8 @@ go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y=
go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
@ -1044,9 +1043,8 @@ go4.org v0.0.0-20200411211856-f5505b9728dd/go.mod h1:CIiUVy99QCPfoE13bO4EZaz5GZM
go4.org v0.0.0-20230225012048-214862532bf5 h1:nifaUDeh+rPaBCMPMQHZmvJf+QdpLFnuQPwx+LxVmtc=
go4.org v0.0.0-20230225012048-214862532bf5/go.mod h1:F57wTi5Lrj6WLyswp5EYV1ncrEbFGHD4hhz6S1ZYeaU=
gocloud.dev v0.19.0/go.mod h1:SmKwiR8YwIMMJvQBKLsC3fHNyMwXLw3PMDO+VVteJMI=
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/arch v0.8.0 h1:3wRIsP3pM4yUptoR96otTUOXI367OS0+c9eeRi9doIc=
golang.org/x/arch v0.8.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys=
golang.org/x/arch v0.22.0 h1:c/Zle32i5ttqRXjdLyyHZESLD/bB90DCU1g9l/0YBDI=
golang.org/x/arch v0.22.0/go.mod h1:dNHoOeKiyja7GTvF9NJS1l3Z2yntpQNzgrjh1cU103A=
golang.org/x/crypto v0.0.0-20180501155221-613d6eafa307/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
@ -1069,12 +1067,8 @@ golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf/go.mod h1:P+XmwS30IXTQdn5
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M=
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04=
golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@ -1091,8 +1085,8 @@ golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e/go.mod h1:akd2r19cwCdwSwWeId
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190501045829-6d32002ffd75/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/image v0.0.0-20211028202545-6944b10bf410 h1:hTftEOvwiOq2+O8k2D5/Q7COC7k5Qcrgc2TFURJYnvQ=
golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
golang.org/x/image v0.18.0 h1:jGzIakQa/ZXI1I0Fxvaa9W7yP25TqT6cHIHn+6CqvSQ=
golang.org/x/image v0.18.0/go.mod h1:4yyo5vMFQjVjUcVk4jEQcU9MGy/rulF5WvUILseCM2E=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@ -1117,12 +1111,8 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0=
golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -1177,14 +1167,9 @@ golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20221002022538-bcab6841153b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4=
golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -1213,13 +1198,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -1251,7 +1231,6 @@ golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -1294,24 +1273,13 @@ golang.org/x/sys v0.0.0-20220928140112-f11e5e49a4ec/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -1323,13 +1291,8 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k=
golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@ -1403,11 +1366,8 @@ golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4f
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24=
golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ=
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -1539,8 +1499,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba
google.golang.org/protobuf v1.25.1-0.20200805231151-a709e31e5d12/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@ -1553,7 +1513,6 @@ gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=
gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y=
gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ=
gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno=
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
@ -1613,10 +1572,8 @@ modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA=
modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0=
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50=
pack.ag/amqp v0.11.2/go.mod h1:4/cbmt4EJXSKlG6LCfWHoqmN0uFdy5i/+YFz+fTfhV4=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=

View File

@ -130,6 +130,7 @@ type (
Size int64
UploadSessionID uuid.UUID
Importing bool
EncryptMetadata *types.EncryptMetadata
}
RelocateEntityParameter struct {
@ -188,7 +189,7 @@ type FileClient interface {
// Copy copies a layer of file to its corresponding destination folder. dstMap is a map from src parent ID to dst parent Files.
Copy(ctx context.Context, files []*ent.File, dstMap map[int][]*ent.File) (map[int][]*ent.File, StorageDiff, error)
// Delete deletes a group of files (and related models) with given entity recycle option
Delete(ctx context.Context, files []*ent.File, options *types.EntityRecycleOption) ([]*ent.Entity, StorageDiff, error)
Delete(ctx context.Context, files []*ent.File, options *types.EntityProps) ([]*ent.Entity, StorageDiff, error)
// StaleEntities returns stale entities of a given file. If ID is not provided, all entities
// will be examined.
StaleEntities(ctx context.Context, ids ...int) ([]*ent.Entity, error)
@ -469,7 +470,7 @@ func (f *fileClient) DeleteByUser(ctx context.Context, uid int) error {
return nil
}
func (f *fileClient) Delete(ctx context.Context, files []*ent.File, options *types.EntityRecycleOption) ([]*ent.Entity, StorageDiff, error) {
func (f *fileClient) Delete(ctx context.Context, files []*ent.File, options *types.EntityProps) ([]*ent.Entity, StorageDiff, error) {
// 1. Decrease reference count for all entities;
// entities stores the relation between its reference count in `files` and entity ID.
entities := make(map[int]int)
@ -525,7 +526,7 @@ func (f *fileClient) Delete(ctx context.Context, files []*ent.File, options *typ
for _, chunk := range chunks {
if err := f.client.Entity.Update().
Where(entity.IDIn(chunk...)).
SetRecycleOptions(options).
SetProps(options).
Exec(ctx); err != nil {
return nil, nil, fmt.Errorf("failed to update recycle options for entities %v: %w", chunk, err)
}
@ -884,6 +885,17 @@ func (f *fileClient) RemoveStaleEntities(ctx context.Context, file *ent.File) (S
func (f *fileClient) CreateEntity(ctx context.Context, file *ent.File, args *EntityParameters) (*ent.Entity, StorageDiff, error) {
createdBy := UserFromContext(ctx)
var opt *types.EntityProps
if args.EncryptMetadata != nil {
opt = &types.EntityProps{
EncryptMetadata: &types.EncryptMetadata{
Algorithm: args.EncryptMetadata.Algorithm,
Key: args.EncryptMetadata.Key,
IV: args.EncryptMetadata.IV,
},
}
}
stm := f.client.Entity.
Create().
SetType(int(args.EntityType)).
@ -891,6 +903,10 @@ func (f *fileClient) CreateEntity(ctx context.Context, file *ent.File, args *Ent
SetSize(args.Size).
SetStoragePolicyID(args.StoragePolicyID)
if opt != nil {
stm.SetProps(opt)
}
if createdBy != nil && !IsAnonymousUser(createdBy) {
stm.SetUser(createdBy)
}

View File

@ -78,7 +78,7 @@ func (f *fileClient) searchQuery(q *ent.FileQuery, args *SearchFileParameters, p
return metadata.And(metadata.NameEQ(item.Key), metadata.ValueEQ(item.Value))
}
nameEq := metadata.NameEQ(item.Key)
nameEq := metadata.And(metadata.IsPublic(true), metadata.NameEQ(item.Key))
if item.Value == "" {
return nameEq
} else {
@ -86,8 +86,9 @@ func (f *fileClient) searchQuery(q *ent.FileQuery, args *SearchFileParameters, p
return metadata.And(nameEq, valueContain)
}
})
metaPredicates = append(metaPredicates, metadata.IsPublic(true))
q.Where(file.HasMetadataWith(metadata.And(metaPredicates...)))
q.Where(file.And(lo.Map(metaPredicates, func(item predicate.Metadata, index int) predicate.File {
return file.HasMetadataWith(item)
})...))
}
if args.SizeLte > 0 || args.SizeGte > 0 {

81
inventory/fs_event.go Normal file
View File

@ -0,0 +1,81 @@
package inventory
import (
"context"
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/ent/fsevent"
"github.com/cloudreve/Cloudreve/v4/ent/schema"
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
"github.com/gofrs/uuid"
"github.com/samber/lo"
)
type FsEventClient interface {
TxOperator
// Create a new FsEvent
Create(ctx context.Context, uid int, subscriberId uuid.UUID, events ...string) error
// Delete all FsEvents by subscriber
DeleteBySubscriber(ctx context.Context, subscriberId uuid.UUID) error
// Delete all FsEvents
DeleteAll(ctx context.Context) error
// Get all FsEvents by subscriber and user
TakeBySubscriber(ctx context.Context, subscriberId uuid.UUID, userId int) ([]*ent.FsEvent, error)
}
func NewFsEventClient(client *ent.Client, dbType conf.DBType) FsEventClient {
return &fsEventClient{client: client, maxSQlParam: sqlParamLimit(dbType)}
}
type fsEventClient struct {
maxSQlParam int
client *ent.Client
}
func (c *fsEventClient) SetClient(newClient *ent.Client) TxOperator {
return &fsEventClient{client: newClient, maxSQlParam: c.maxSQlParam}
}
func (c *fsEventClient) GetClient() *ent.Client {
return c.client
}
func (c *fsEventClient) Create(ctx context.Context, uid int, subscriberId uuid.UUID, events ...string) error {
stms := lo.Map(events, func(event string, index int) *ent.FsEventCreate {
res := c.client.FsEvent.
Create().
SetUserFsevent(uid).
SetEvent(event).
SetSubscriber(subscriberId).SetEvent(event)
return res
})
_, err := c.client.FsEvent.CreateBulk(stms...).Save(ctx)
return err
}
func (c *fsEventClient) DeleteBySubscriber(ctx context.Context, subscriberId uuid.UUID) error {
_, err := c.client.FsEvent.Delete().Where(fsevent.Subscriber(subscriberId)).Exec(schema.SkipSoftDelete(ctx))
return err
}
func (c *fsEventClient) DeleteAll(ctx context.Context) error {
_, err := c.client.FsEvent.Delete().Exec(schema.SkipSoftDelete(ctx))
return err
}
func (c *fsEventClient) TakeBySubscriber(ctx context.Context, subscriberId uuid.UUID, userId int) ([]*ent.FsEvent, error) {
res, err := c.client.FsEvent.Query().Where(fsevent.Subscriber(subscriberId), fsevent.UserFsevent(userId)).All(ctx)
if err != nil {
return nil, err
}
// Delete the FsEvents
_, err = c.client.FsEvent.Delete().Where(fsevent.Subscriber(subscriberId), fsevent.UserFsevent(userId)).Exec(schema.SkipSoftDelete(ctx))
if err != nil {
return nil, err
}
return res, nil
}

View File

@ -477,6 +477,23 @@ var patches = []Patch{
return fmt.Errorf("failed to update mail_reset_template setting: %w", err)
}
return nil
},
},
{
Name: "apply_thumb_path_magic_var",
EndVersion: "4.10.0",
Func: func(l logging.Logger, client *ent.Client, ctx context.Context) error {
thumbSuffixSetting, err := client.Setting.Query().Where(setting.Name("thumb_entity_suffix")).First(ctx)
if err != nil {
return fmt.Errorf("failed to query thumb_entity_suffix setting: %w", err)
}
newThumbSuffix := fmt.Sprintf("{blob_path}/{blob_name}%s", thumbSuffixSetting.Value)
if _, err := client.Setting.UpdateOne(thumbSuffixSetting).SetValue(newThumbSuffix).Save(ctx); err != nil {
return fmt.Errorf("failed to update thumb_entity_suffix setting: %w", err)
}
return nil
},
},

View File

@ -2,8 +2,11 @@ package inventory
import (
"context"
"crypto/rand"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/ent/setting"
@ -552,7 +555,7 @@ var DefaultSettings = map[string]string{
"captcha_cap_asset_server": "jsdelivr",
"thumb_width": "400",
"thumb_height": "300",
"thumb_entity_suffix": "._thumb",
"thumb_entity_suffix": "{blob_path}/{blob_name}._thumb",
"thumb_slave_sidecar_suffix": "._thumb_sidecar",
"thumb_encode_method": "png",
"thumb_gc_after_gen": "0",
@ -661,6 +664,15 @@ var DefaultSettings = map[string]string{
"headless_footer_html": "",
"headless_bottom_html": "",
"sidebar_bottom_html": "",
"encrypt_master_key": "",
"encrypt_master_key_vault": "setting",
"encrypt_master_key_file": "",
"show_encryption_status": "1",
}
var RedactedSettings = map[string]struct{}{
"encrypt_master_key": {},
"secret_key": {},
}
func init() {
@ -721,4 +733,10 @@ func init() {
panic(err)
}
DefaultSettings["mail_reset_template"] = string(mailResetTemplates)
key := make([]byte, 32)
if _, err := io.ReadFull(rand.Reader, key); err != nil {
panic(err)
}
DefaultSettings["encrypt_master_key"] = base64.StdEncoding.EncodeToString(key)
}

View File

@ -103,6 +103,8 @@ type (
QiniuUploadCdn bool `json:"qiniu_upload_cdn,omitempty"`
// ChunkConcurrency the number of chunks to upload concurrently.
ChunkConcurrency int `json:"chunk_concurrency,omitempty"`
// Whether to enable file encryption.
Encryption bool `json:"encryption,omitempty"`
}
FileType int
@ -154,8 +156,18 @@ type (
MasterSiteVersion string `json:"master_site_version,omitempty"`
}
EntityRecycleOption struct {
UnlinkOnly bool `json:"unlink_only,omitempty"`
EntityProps struct {
UnlinkOnly bool `json:"unlink_only,omitempty"`
EncryptMetadata *EncryptMetadata `json:"encrypt_metadata,omitempty"`
}
Cipher string
EncryptMetadata struct {
Algorithm Cipher `json:"algorithm"`
Key []byte `json:"key"`
KeyPlainText []byte `json:"key_plain_text,omitempty"`
IV []byte `json:"iv"`
}
DavAccountProps struct {
@ -347,3 +359,7 @@ const (
ProfileAllShare = ShareLinksInProfileLevel("all_share")
ProfileHideShare = ShareLinksInProfileLevel("hide_share")
)
const (
CipherAES256CTR Cipher = "aes-256-ctr"
)

View File

@ -1,8 +1,10 @@
//go:debug rsa1024min=0
package main
import (
_ "embed"
"flag"
"github.com/cloudreve/Cloudreve/v4/cmd"
"github.com/cloudreve/Cloudreve/v4/pkg/util"
)

View File

@ -103,6 +103,7 @@ func InitializeHandling(dep dependency.Dep) gin.HandlerFunc {
IP: clientIp,
Host: c.Request.Host,
UserAgent: c.Request.UserAgent(),
ClientID: c.GetHeader(request.ClientIDHeader),
}
cid := uuid.FromStringOrNil(c.GetHeader(request.CorrelationHeader))
if cid == uuid.Nil {

View File

@ -22,4 +22,6 @@ type RequestInfo struct {
Host string
IP string
UserAgent string
// ID of sync client
ClientID string
}

View File

@ -347,7 +347,7 @@ func (handler *Driver) Source(ctx context.Context, e fs.Entity, args *driver.Get
var contentDescription *string
if args.IsDownload {
encodedFilename := url.PathEscape(args.DisplayName)
contentDescription = aws.String(fmt.Sprintf(`attachment; filename="%s"`, encodedFilename))
contentDescription = aws.String(fmt.Sprintf(`attachment; filename=%s`, encodedFilename))
}
// 确保过期时间不小于 0 ,如果小于则设置为 7 天

View File

@ -1,13 +1,14 @@
package local
import (
"os"
"time"
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
"github.com/cloudreve/Cloudreve/v4/pkg/util"
"github.com/gofrs/uuid"
"os"
"time"
)
// NewLocalFileEntity creates a new local file entity.
@ -73,3 +74,11 @@ func (l *localFileEntity) UploadSessionID() *uuid.UUID {
func (l *localFileEntity) Model() *ent.Entity {
return nil
}
func (l *localFileEntity) Props() *types.EntityProps {
return nil
}
func (l *localFileEntity) Encrypted() bool {
return false
}

View File

@ -10,12 +10,13 @@ import (
"encoding/pem"
"errors"
"fmt"
"github.com/cloudreve/Cloudreve/v4/pkg/cache"
"github.com/cloudreve/Cloudreve/v4/pkg/request"
"io"
"net/http"
"net/url"
"strings"
"github.com/cloudreve/Cloudreve/v4/pkg/cache"
"github.com/cloudreve/Cloudreve/v4/pkg/request"
)
const (

View File

@ -65,12 +65,12 @@ type Driver struct {
type key int
const (
chunkRetrySleep = time.Duration(5) * time.Second
uploadIdParam = "uploadId"
partNumberParam = "partNumber"
callbackParam = "callback"
completeAllHeader = "x-oss-complete-all"
maxDeleteBatch = 1000
chunkRetrySleep = time.Duration(5) * time.Second
maxDeleteBatch = 1000
maxSignTTL = time.Duration(24) * time.Hour * 7
completeAllHeader = "x-oss-complete-all"
forbidOverwriteHeader = "x-oss-forbid-overwrite"
trafficLimitHeader = "x-oss-traffic-limit"
// MultiPartUploadThreshold 服务端使用分片上传的阈值
MultiPartUploadThreshold int64 = 5 * (1 << 30) // 5GB
@ -431,16 +431,22 @@ func (handler *Driver) Source(ctx context.Context, e fs.Entity, args *driver.Get
if args.Speed > 838860800 {
args.Speed = 838860800
}
req.TrafficLimit = args.Speed
req.Parameters = map[string]string{
trafficLimitHeader: strconv.FormatInt(args.Speed, 10),
}
}
return handler.signSourceURL(ctx, e.Source(), args.Expire, req, false)
}
func (handler *Driver) signSourceURL(ctx context.Context, path string, expire *time.Time, req *oss.GetObjectRequest, forceSign bool) (string, error) {
ttl := time.Duration(24) * time.Hour * 365 * 20
// V4 Sign 最大过期时间为7天
ttl := maxSignTTL
if expire != nil {
ttl = time.Until(*expire)
if ttl > maxSignTTL {
ttl = maxSignTTL
}
}
if req == nil {
@ -465,10 +471,12 @@ func (handler *Driver) signSourceURL(ctx context.Context, path string, expire *t
// 公有空间替换掉Key及不支持的头
if !handler.policy.IsPrivate && !forceSign {
query := finalURL.Query()
query.Del("OSSAccessKeyId")
query.Del("Signature")
query.Del("x-oss-credential")
query.Del("x-oss-date")
query.Del("x-oss-expires")
query.Del("x-oss-signature")
query.Del("x-oss-signature-version")
query.Del("response-content-disposition")
query.Del("x-oss-traffic-limit")
finalURL.RawQuery = query.Encode()
}
return finalURL.String(), nil
@ -530,6 +538,11 @@ func (handler *Driver) Token(ctx context.Context, uploadSession *fs.UploadSessio
UploadId: imur.UploadId,
PartNumber: int32(c.Index() + 1),
Body: chunk,
RequestCommon: oss.RequestCommon{
Headers: map[string]string{
"Content-Type": "application/octet-stream",
},
},
}, oss.PresignExpires(ttl))
if err != nil {
return err
@ -545,12 +558,19 @@ func (handler *Driver) Token(ctx context.Context, uploadSession *fs.UploadSessio
// 签名完成分片上传的URL
completeURL, err := handler.client.Presign(ctx, &oss.CompleteMultipartUploadRequest{
Bucket: &handler.policy.BucketName,
Key: &file.Props.SavePath,
UploadId: imur.UploadId,
CompleteAll: oss.Ptr("yes"),
ForbidOverwrite: oss.Ptr(strconv.FormatBool(true)),
Callback: oss.Ptr(callbackPolicyEncoded),
Bucket: &handler.policy.BucketName,
Key: &file.Props.SavePath,
UploadId: imur.UploadId,
RequestCommon: oss.RequestCommon{
Parameters: map[string]string{
"callback": callbackPolicyEncoded,
},
Headers: map[string]string{
"Content-Type": "application/octet-stream",
completeAllHeader: "yes",
forbidOverwriteHeader: "true",
},
},
}, oss.PresignExpires(ttl))
if err != nil {
return nil, err
@ -562,6 +582,7 @@ func (handler *Driver) Token(ctx context.Context, uploadSession *fs.UploadSessio
CompleteURL: completeURL.URL,
SessionID: uploadSession.Props.UploadSessionID,
ChunkSize: handler.chunkSize,
Callback: callbackPolicyEncoded,
}, nil
}

View File

@ -0,0 +1,360 @@
// Package encrypt provides AES-256-CTR encryption and decryption functionality
// compatible with the JavaScript EncryptedBlob implementation.
//
// # Usage Example
//
// Basic usage with encrypted metadata:
//
// // Create AES256CTR instance
// aes := NewAES256CTR(masterKeyVault)
//
// // Load encrypted metadata (key is encrypted with master key)
// err := aes.LoadMetadata(ctx, encryptedMetadata, masterKeyVault)
// if err != nil {
// return err
// }
//
// // Set encrypted source stream
// err = aes.SetSource(encryptedStream, 0)
// if err != nil {
// return err
// }
//
// // Read decrypted data
// decryptedData, err := io.ReadAll(aes)
// if err != nil {
// return err
// }
// aes.Close()
//
// Usage with plain metadata (already decrypted):
//
// aes := NewAES256CTR(masterKeyVault)
// err := aes.LoadPlainMetadata(plainMetadata)
// err = aes.SetSource(encryptedStream, 0)
// // Read decrypted data...
//
// Usage with counter offset (for chunked/sliced streams):
//
// // If reading from byte offset 1048576 (1MB) of the encrypted file
// aes := NewAES256CTR(masterKeyVault)
// err := aes.LoadPlainMetadata(metadata)
// err = aes.SetSource(encryptedStreamStartingAt1MB, 1048576)
// // This ensures proper counter alignment for correct decryption
//
// Using the Seeker interface (requires seekable source):
//
// aes := NewAES256CTR(masterKeyVault)
// err := aes.LoadPlainMetadata(metadata)
// err = aes.SetSource(seekableEncryptedStream, 0)
// aes.SetSize(totalFileSize) // Required for io.SeekEnd
//
// // Seek to position 1048576
// newPos, err := aes.Seek(1048576, io.SeekStart)
// // Read from that position...
//
// // Seek relative to current position
// newPos, err = aes.Seek(100, io.SeekCurrent)
//
// // Seek from end (requires SetSize to be called first)
// newPos, err = aes.Seek(-1024, io.SeekEnd)
//
// Using the factory pattern:
//
// factory := NewDecrypterFactory(masterKeyVault)
// decrypter, err := factory(types.CipherAES256CTR)
// if err != nil {
// return err
// }
// err = decrypter.LoadMetadata(ctx, encryptedMetadata, masterKeyVault)
// err = decrypter.SetSource(encryptedStream, 0)
// defer decrypter.Close()
package encrypt
import (
"context"
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"fmt"
"io"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
)
// AES256CTR provides both encryption and decryption for AES-256-CTR.
// It implements both Cryptor and Decrypter interfaces.
type AES256CTR struct {
masterKeyVault MasterEncryptKeyVault
// Decryption fields
src io.ReadCloser // Source encrypted stream
seeker io.Seeker // Seeker for the source stream
stream cipher.Stream // AES-CTR cipher stream
metadata *types.EncryptMetadata
counterOffset int64 // Byte offset for sliced streams
pos int64 // Current read position relative to counterOffset
size int64 // Total size of encrypted data (for SeekEnd support, -1 if unknown)
eof bool // EOF flag
}
func NewAES256CTR(masterKeyVault MasterEncryptKeyVault) *AES256CTR {
return &AES256CTR{
masterKeyVault: masterKeyVault,
size: -1, // Unknown by default
}
}
func (e *AES256CTR) GenerateMetadata(ctx context.Context) (*types.EncryptMetadata, error) {
// Generate random 32-byte key for AES-256
key := make([]byte, 32)
if _, err := io.ReadFull(rand.Reader, key); err != nil {
return nil, err
}
// Generate random 16-byte IV for CTR mode
iv := make([]byte, 16)
if _, err := io.ReadFull(rand.Reader, iv); err != nil {
return nil, err
}
// Get master key from vault
masterKey, err := e.masterKeyVault.GetMasterKey(ctx)
if err != nil {
return nil, err
}
// Encrypt the key with master key
encryptedKey, err := EncryptWithMasterKey(masterKey, key)
if err != nil {
return nil, err
}
return &types.EncryptMetadata{
Algorithm: types.CipherAES256CTR,
Key: encryptedKey,
KeyPlainText: key,
IV: iv,
}, nil
}
// LoadMetadata loads and decrypts the encryption metadata using the master key.
func (e *AES256CTR) LoadMetadata(ctx context.Context, encryptedMetadata *types.EncryptMetadata) error {
if encryptedMetadata == nil {
return fmt.Errorf("encryption metadata is nil")
}
if encryptedMetadata.Algorithm != types.CipherAES256CTR {
return fmt.Errorf("unsupported algorithm: %s", encryptedMetadata.Algorithm)
}
if len(encryptedMetadata.KeyPlainText) > 0 {
e.metadata = encryptedMetadata
return nil
}
// Decrypt the encryption key
decryptedKey, err := DecriptKey(ctx, e.masterKeyVault, encryptedMetadata.Key)
if err != nil {
return fmt.Errorf("failed to decrypt encryption key: %w", err)
}
// Store decrypted metadata
e.metadata = &types.EncryptMetadata{
Algorithm: encryptedMetadata.Algorithm,
KeyPlainText: decryptedKey,
IV: encryptedMetadata.IV,
}
return nil
}
// SetSource sets the encrypted data source and initializes the cipher stream.
// The counterOffset parameter allows for proper decryption of sliced streams,
// where the stream doesn't start at byte 0 of the original file.
//
// For non-block-aligned offsets (offset % 16 != 0), this method advances the
// cipher stream to the correct position within the block to ensure proper decryption.
func (e *AES256CTR) SetSource(src io.ReadCloser, seeker io.Seeker, size, counterOffset int64) error {
if e.metadata == nil {
return fmt.Errorf("metadata not loaded, call LoadMetadata first")
}
e.src = src
e.seeker = seeker
e.counterOffset = counterOffset
e.pos = 0 // Reset position to start
e.eof = false // Reset EOF flag
e.size = size
// Initialize cipher stream at counterOffset position
return e.initCipherStream(counterOffset)
}
// Read implements io.Reader interface to read decrypted data.
// It reads encrypted data from the source and decrypts it on-the-fly.
func (e *AES256CTR) Read(p []byte) (int, error) {
if e.src == nil {
return 0, fmt.Errorf("source not set, call SetSource first")
}
if e.eof {
return 0, io.EOF
}
// Read encrypted data from source
n, err := e.src.Read(p)
if err != nil {
if err == io.EOF {
e.eof = true
if n == 0 {
return 0, io.EOF
}
} else {
return n, err
}
}
// Decrypt data in place
if n > 0 {
e.stream.XORKeyStream(p[:n], p[:n])
e.pos += int64(n) // Update current position
}
return n, err
}
// Close implements io.Closer interface.
func (e *AES256CTR) Close() error {
if e.src != nil {
return e.src.Close()
}
return nil
}
// Seek implements io.Seeker interface for seeking within the encrypted stream.
// It properly adjusts the AES-CTR counter based on the seek position.
//
// Parameters:
// - offset: byte offset relative to whence
// - whence: io.SeekStart, io.SeekCurrent, or io.SeekEnd
//
// Returns the new absolute position (relative to counterOffset start).
//
// Note: For io.SeekEnd to work, you must call SetSize() first, otherwise it returns an error.
// Also note that seeking requires the underlying source to support seeking (io.Seeker).
func (e *AES256CTR) Seek(offset int64, whence int) (int64, error) {
if e.metadata == nil {
return 0, fmt.Errorf("metadata not loaded, call LoadMetadata first")
}
if e.src == nil {
return 0, fmt.Errorf("source not set, call SetSource first")
}
// Check if source supports seeking
if e.seeker == nil {
return 0, fmt.Errorf("source does not support seeking")
}
// Calculate new absolute position
var newPos int64
switch whence {
case io.SeekStart:
newPos = offset
case io.SeekCurrent:
newPos = e.pos + offset
case io.SeekEnd:
if e.size < 0 {
return 0, fmt.Errorf("size unknown, call SetSize before using SeekEnd")
}
newPos = e.size + offset
default:
return 0, fmt.Errorf("invalid whence: %d", whence)
}
// Validate new position
if newPos < 0 {
return 0, fmt.Errorf("negative position: %d", newPos)
}
// Seek in the underlying source stream
// The absolute position in the source is counterOffset + newPos
absPos := e.counterOffset + newPos
_, err := e.seeker.Seek(absPos, io.SeekStart)
if err != nil {
return 0, fmt.Errorf("failed to seek source: %w", err)
}
// Reinitialize cipher stream with new counter position
if err := e.initCipherStream(absPos); err != nil {
return 0, fmt.Errorf("failed to reinitialize cipher stream: %w", err)
}
// Update position and reset EOF flag
e.pos = newPos
e.eof = false
return newPos, nil
}
// initCipherStream initializes the cipher stream with proper counter alignment
// for the given absolute byte position.
func (e *AES256CTR) initCipherStream(absolutePosition int64) error {
// Create AES cipher block
block, err := aes.NewCipher(e.metadata.KeyPlainText)
if err != nil {
return fmt.Errorf("failed to create AES cipher: %w", err)
}
// Create counter value (16 bytes IV) and apply offset for position
counter := make([]byte, 16)
copy(counter, e.metadata.IV)
// Apply counter offset based on byte position (each block is 16 bytes)
if absolutePosition > 0 {
blockOffset := absolutePosition / 16
incrementCounter(counter, blockOffset)
}
// Create CTR cipher stream
e.stream = cipher.NewCTR(block, counter)
// For non-block-aligned offsets, we need to advance the stream position
// within the current block to match the offset
offsetInBlock := absolutePosition % 16
if offsetInBlock > 0 {
// Create a dummy buffer to advance the stream
dummy := make([]byte, offsetInBlock)
e.stream.XORKeyStream(dummy, dummy)
}
return nil
}
// incrementCounter increments a counter ([]byte) by a given number of blocks.
// This matches the JavaScript implementation's incrementCounter function.
// The counter is treated as a big-endian 128-bit integer.
func incrementCounter(counter []byte, blocks int64) {
// Convert blocks to add into bytes (big-endian)
// We only need to handle the lower 64 bits since blocks is int64
for i := 15; i >= 0 && blocks > 0; i-- {
// Add the lowest byte of blocks to current counter byte
sum := uint64(counter[i]) + uint64(blocks&0xff)
counter[i] = byte(sum & 0xff)
// Shift blocks right by 8 bits for next iteration
blocks = blocks >> 8
// Add carry from this position to the next
if sum > 0xff {
carry := sum >> 8
// Propagate carry to higher bytes
for j := i - 1; j >= 0 && carry > 0; j-- {
sum = uint64(counter[j]) + carry
counter[j] = byte(sum & 0xff)
carry = sum >> 8
}
}
}
}

View File

@ -0,0 +1,97 @@
package encrypt
import (
"context"
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"fmt"
"io"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
)
type (
Cryptor interface {
io.ReadCloser
io.Seeker
// LoadMetadata loads and decrypts the encryption metadata using the master key
LoadMetadata(ctx context.Context, encryptedMetadata *types.EncryptMetadata) error
// SetSource sets the encrypted data source and initializes the cipher stream
SetSource(src io.ReadCloser, seeker io.Seeker, size, counterOffset int64) error
// GenerateMetadata generates a new encryption metadata
GenerateMetadata(ctx context.Context) (*types.EncryptMetadata, error)
}
CryptorFactory func(algorithm types.Cipher) (Cryptor, error)
)
func NewCryptorFactory(masterKeyVault MasterEncryptKeyVault) CryptorFactory {
return func(algorithm types.Cipher) (Cryptor, error) {
switch algorithm {
case types.CipherAES256CTR:
return NewAES256CTR(masterKeyVault), nil
default:
return nil, fmt.Errorf("unknown algorithm: %s", algorithm)
}
}
}
// EncryptWithMasterKey encrypts data using the master key with AES-256-CTR
// Returns: [16-byte IV] + [encrypted data]
func EncryptWithMasterKey(masterKey, data []byte) ([]byte, error) {
// Create AES cipher with master key
block, err := aes.NewCipher(masterKey)
if err != nil {
return nil, err
}
// Generate random IV for encryption
iv := make([]byte, 16)
if _, err := io.ReadFull(rand.Reader, iv); err != nil {
return nil, err
}
// Encrypt data
stream := cipher.NewCTR(block, iv)
encrypted := make([]byte, len(data))
stream.XORKeyStream(encrypted, data)
// Return IV + encrypted data
result := append(iv, encrypted...)
return result, nil
}
func DecriptKey(ctx context.Context, keyVault MasterEncryptKeyVault, encryptedKey []byte) ([]byte, error) {
masterKey, err := keyVault.GetMasterKey(ctx)
if err != nil {
return nil, fmt.Errorf("failed to get master key: %w", err)
}
return DecryptWithMasterKey(masterKey, encryptedKey)
}
// DecryptWithMasterKey decrypts data using the master key with AES-256-CTR
// Input format: [16-byte IV] + [encrypted data]
func DecryptWithMasterKey(masterKey, encryptedData []byte) ([]byte, error) {
// Validate input length
if len(encryptedData) < 16 {
return nil, aes.KeySizeError(len(encryptedData))
}
// Extract IV and encrypted data
iv := encryptedData[:16]
encrypted := encryptedData[16:]
// Create AES cipher with master key
block, err := aes.NewCipher(masterKey)
if err != nil {
return nil, err
}
// Decrypt data
stream := cipher.NewCTR(block, iv)
decrypted := make([]byte, len(encrypted))
stream.XORKeyStream(decrypted, encrypted)
return decrypted, nil
}

View File

@ -0,0 +1,105 @@
package encrypt
import (
"context"
"encoding/base64"
"errors"
"fmt"
"os"
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
)
const (
EnvMasterEncryptKey = "CR_ENCRYPT_MASTER_KEY"
)
// MasterEncryptKeyVault is a vault for the master encrypt key.
type MasterEncryptKeyVault interface {
GetMasterKey(ctx context.Context) ([]byte, error)
}
func NewMasterEncryptKeyVault(ctx context.Context, settings setting.Provider) MasterEncryptKeyVault {
vaultType := settings.MasterEncryptKeyVault(ctx)
switch vaultType {
case setting.MasterEncryptKeyVaultTypeEnv:
return NewEnvMasterEncryptKeyVault()
case setting.MasterEncryptKeyVaultTypeFile:
return NewFileMasterEncryptKeyVault(settings.MasterEncryptKeyFile(ctx))
default:
return NewSettingMasterEncryptKeyVault(settings)
}
}
// settingMasterEncryptKeyVault is a vault for the master encrypt key that gets the key from the setting KV.
type settingMasterEncryptKeyVault struct {
setting setting.Provider
}
func NewSettingMasterEncryptKeyVault(setting setting.Provider) MasterEncryptKeyVault {
return &settingMasterEncryptKeyVault{setting: setting}
}
func (v *settingMasterEncryptKeyVault) GetMasterKey(ctx context.Context) ([]byte, error) {
key := v.setting.MasterEncryptKey(ctx)
if key == nil {
return nil, errors.New("master encrypt key is not set")
}
return key, nil
}
func NewEnvMasterEncryptKeyVault() MasterEncryptKeyVault {
return &envMasterEncryptKeyVault{}
}
type envMasterEncryptKeyVault struct {
}
var envMasterKeyCache = []byte{}
func (v *envMasterEncryptKeyVault) GetMasterKey(ctx context.Context) ([]byte, error) {
if len(envMasterKeyCache) > 0 {
return envMasterKeyCache, nil
}
key := os.Getenv(EnvMasterEncryptKey)
if key == "" {
return nil, errors.New("master encrypt key is not set")
}
decodedKey, err := base64.StdEncoding.DecodeString(key)
if err != nil {
return nil, fmt.Errorf("failed to decode master encrypt key: %w", err)
}
envMasterKeyCache = decodedKey
return decodedKey, nil
}
func NewFileMasterEncryptKeyVault(path string) MasterEncryptKeyVault {
return &fileMasterEncryptKeyVault{path: path}
}
var fileMasterKeyCache = []byte{}
type fileMasterEncryptKeyVault struct {
path string
}
func (v *fileMasterEncryptKeyVault) GetMasterKey(ctx context.Context) ([]byte, error) {
if len(fileMasterKeyCache) > 0 {
return fileMasterKeyCache, nil
}
key, err := os.ReadFile(v.path)
if err != nil {
return nil, fmt.Errorf("invalid master encrypt key file")
}
decodedKey, err := base64.StdEncoding.DecodeString(string(key))
if err != nil {
return nil, fmt.Errorf("invalid master encrypt key")
}
fileMasterKeyCache = decodedKey
return fileMasterKeyCache, nil
}

View File

@ -0,0 +1,193 @@
package eventhub
import "errors"
type (
Event struct {
Type EventType `json:"type"`
FileID string `json:"file_id"`
From string `json:"from"`
To string `json:"to"`
}
EventType string
)
const (
EventTypeCreate = "create"
EventTypeModify = "modify"
EventTypeRename = "rename"
EventTypeDelete = "delete"
)
var (
// ErrEventHubClosed is returned when operations are attempted on a closed EventHub.
ErrEventHubClosed = errors.New("event hub is closed")
)
// eventState tracks the accumulated state for each file
type eventState struct {
baseType EventType // The base event type (Create, Delete, or first event type)
originalSrc string // Original source path (for Create or first Rename)
currentDst string // Current destination path
}
/*
Modify + Modify keep only the last Modify;
Create + Modify fold into a single Create with final metadata/content.
Create + Rename(ab) Create at b.
Create + Delete drop both (ephemeral object never needs to reach clients).
Modify + Delete Delete (intermediate Modify is irrelevant to final state).
Rename(ab) + Rename(bc) Rename(ac).
Rename(ab) + Modify emit Rename(ab) then a single Modify at b (or fold Modify into Create if the chain starts with Create).
Rename(ab) + Delete emit only Delete(object_id);
Rename(ab) + Rename(ba) with no intervening Modify drop both (rename there-and-back is a no-op).
Delete + Create might be a valid case, e.g. user restore same file from trash bin.
*/
// DebounceEvents takes time-ordered events and returns debounced/merged events.
func DebounceEvents(in []*Event) []*Event {
if len(in) == 0 {
return nil
}
states := make(map[string]*eventState) // keyed by FileID
order := make([]string, 0) // to preserve order of first appearance
for _, e := range in {
state, exists := states[e.FileID]
if !exists {
// First event for this file
order = append(order, e.FileID)
states[e.FileID] = &eventState{
baseType: e.Type,
originalSrc: e.From,
currentDst: e.To,
}
continue
}
switch e.Type {
case EventTypeCreate:
// Delete + Create → keep as Create (e.g. restore from trash)
if state.baseType == EventTypeDelete {
state.baseType = EventTypeCreate
state.originalSrc = e.From
state.currentDst = ""
}
case EventTypeModify:
switch state.baseType {
case EventTypeCreate:
// Create + Modify → fold into Create (no change needed, Create already implies content)
case EventTypeModify:
// Modify + Modify → keep only last Modify (state already correct)
case EventTypeRename:
// Rename + Modify → fold into first Rename
case EventTypeDelete:
// Delete + Modify → should not happen, but ignore Modify
}
case EventTypeRename:
switch state.baseType {
case EventTypeCreate:
// Create + Rename(a→b) → Create at b
state.originalSrc = e.To
state.currentDst = ""
case EventTypeModify:
// Modify + Rename → emit Rename only
state.baseType = EventTypeRename
state.currentDst = e.To
state.originalSrc = e.From
case EventTypeRename:
// Rename(a→b) + Rename(b→c) → Rename(a→c)
// Check for no-op: Rename(a→b) + Rename(b→a) → drop both
if state.originalSrc == e.To {
// Rename there-and-back, drop both
delete(states, e.FileID)
// Remove from order
for i, id := range order {
if id == e.FileID {
order = append(order[:i], order[i+1:]...)
break
}
}
} else {
state.currentDst = e.To
}
case EventTypeDelete:
// Delete + Rename → should not happen, ignore
}
case EventTypeDelete:
switch state.baseType {
case EventTypeCreate:
// Create + Delete → drop both (ephemeral object)
delete(states, e.FileID)
// Remove from order
for i, id := range order {
if id == e.FileID {
order = append(order[:i], order[i+1:]...)
break
}
}
case EventTypeModify:
// Modify + Delete → Delete
state.baseType = EventTypeDelete
state.originalSrc = e.From
state.currentDst = ""
case EventTypeRename:
// Rename + Delete → Delete only
state.baseType = EventTypeDelete
state.originalSrc = e.From
state.currentDst = ""
case EventTypeDelete:
// Delete + Delete → keep Delete (should not happen normally)
}
}
}
// Build output events in order
result := make([]*Event, 0, len(order))
for _, fileID := range order {
state, exists := states[fileID]
if !exists {
continue
}
switch state.baseType {
case EventTypeCreate:
result = append(result, &Event{
Type: EventTypeCreate,
FileID: fileID,
From: state.originalSrc,
})
case EventTypeModify:
result = append(result, &Event{
Type: EventTypeModify,
FileID: fileID,
From: state.originalSrc,
})
case EventTypeRename:
// If hasModify and base was originally Modify (converted to Rename),
// we need to emit Modify first at original location
// But in our current logic, Modify+Rename sets hasModify=true
// We emit Rename, then Modify if needed
result = append(result, &Event{
Type: EventTypeRename,
FileID: fileID,
From: state.originalSrc,
To: state.currentDst,
})
case EventTypeDelete:
result = append(result, &Event{
Type: EventTypeDelete,
FileID: fileID,
From: state.originalSrc,
})
}
}
return result
}

View File

@ -0,0 +1,199 @@
package eventhub
import (
"context"
"sync"
"time"
"github.com/cloudreve/Cloudreve/v4/inventory"
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
)
type (
EventHub interface {
// Subscribe to a topic and return a channel to receive events.
// If a subscriber with the same ID already exists and is offline,
// it will be reactivated and any buffered events will be flushed.
Subscribe(ctx context.Context, topic int, id string) (chan *Event, bool, error)
// Unsubscribe marks the subscriber as offline instead of removing it.
// Buffered events will be kept for when the subscriber reconnects.
// Subscribers that remain offline for more than 14 days will be permanently removed.
Unsubscribe(ctx context.Context, topic int, id string)
// Get subscribers of a topic.
GetSubscribers(ctx context.Context, topic int) []Subscriber
// Close shuts down the event hub and disconnects all subscribers.
Close()
}
)
const (
bufSize = 16
cleanupPeriod = 1 * time.Hour
)
type eventHub struct {
mu sync.RWMutex
topics map[int]map[string]*subscriber
userClient inventory.UserClient
fsEventClient inventory.FsEventClient
closed bool
closeCh chan struct{}
wg sync.WaitGroup
}
func NewEventHub(userClient inventory.UserClient, fsEventClient inventory.FsEventClient) EventHub {
e := &eventHub{
topics: make(map[int]map[string]*subscriber),
userClient: userClient,
fsEventClient: fsEventClient,
closeCh: make(chan struct{}),
}
// Remove all existing FsEvents
fsEventClient.DeleteAll(context.Background())
// Start background cleanup goroutine
e.wg.Add(1)
go e.cleanupLoop()
return e
}
// cleanupLoop periodically removes subscribers that have been offline for too long.
func (e *eventHub) cleanupLoop() {
defer e.wg.Done()
ticker := time.NewTicker(cleanupPeriod)
defer ticker.Stop()
for {
select {
case <-e.closeCh:
return
case <-ticker.C:
e.cleanupExpiredSubscribers()
}
}
}
// cleanupExpiredSubscribers removes subscribers that have been offline for more than 14 days.
func (e *eventHub) cleanupExpiredSubscribers() {
e.mu.Lock()
defer e.mu.Unlock()
if e.closed {
return
}
for topic, subs := range e.topics {
for id, sub := range subs {
if sub.shouldExpire() {
sub.close()
delete(subs, id)
}
}
if len(subs) == 0 {
delete(e.topics, topic)
}
}
}
func (e *eventHub) GetSubscribers(ctx context.Context, topic int) []Subscriber {
e.mu.RLock()
defer e.mu.RUnlock()
subs := make([]Subscriber, 0, len(e.topics[topic]))
for _, v := range e.topics[topic] {
subs = append(subs, v)
}
return subs
}
func (e *eventHub) Subscribe(ctx context.Context, topic int, id string) (chan *Event, bool, error) {
l := logging.FromContext(ctx)
l.Info("Subscribing to event hub for topic %d with id %s", topic, id)
e.mu.Lock()
defer e.mu.Unlock()
if e.closed {
return nil, false, ErrEventHubClosed
}
subs, ok := e.topics[topic]
if !ok {
subs = make(map[string]*subscriber)
e.topics[topic] = subs
}
// Check if subscriber already exists
if existingSub, ok := subs[id]; ok {
if existingSub.isClosed() {
// Subscriber was closed, create a new one
delete(subs, id)
} else {
// Reactivate the offline subscriber
l.Info("Reactivating offline subscriber %s for topic %d", id, topic)
existingSub.setOnline(ctx)
return existingSub.ch, true, nil
}
}
sub, err := newSubscriber(ctx, id, e.userClient, e.fsEventClient)
if err != nil {
return nil, false, err
}
e.topics[topic][id] = sub
return sub.ch, false, nil
}
func (e *eventHub) Unsubscribe(ctx context.Context, topic int, id string) {
l := logging.FromContext(ctx)
l.Info("Marking subscriber offline for topic %d with id %s", topic, id)
e.mu.Lock()
defer e.mu.Unlock()
if e.closed {
return
}
subs, ok := e.topics[topic]
if !ok {
return
}
if sub, ok := subs[id]; ok {
// Stop debounce timer but keep events in buffer
sub.Stop()
// Mark as offline instead of deleting
sub.setOffline()
}
}
// Close shuts down the event hub and disconnects all subscribers.
func (e *eventHub) Close() {
e.mu.Lock()
if e.closed {
e.mu.Unlock()
return
}
e.closed = true
close(e.closeCh)
// Close all subscribers
for _, subs := range e.topics {
for _, sub := range subs {
sub.close()
}
}
e.topics = nil
e.mu.Unlock()
// Wait for cleanup goroutine to finish
e.wg.Wait()
}

View File

@ -0,0 +1,317 @@
package eventhub
import (
"context"
"encoding/json"
"errors"
"fmt"
"sync"
"time"
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/inventory"
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
"github.com/gofrs/uuid"
"github.com/samber/lo"
)
type Subscriber interface {
ID() string
Ch() chan *Event
Publish(evt Event)
Stop()
Buffer() []*Event
// Owner returns the owner of the subscriber.
Owner() (*ent.User, error)
// Online returns whether the subscriber is online.
Online() bool
// OfflineSince returns when the subscriber went offline.
// Returns zero time if the subscriber is online.
OfflineSince() time.Time
}
const (
debounceDelay = 5 * time.Second
userCacheTTL = 1 * time.Hour
offlineMaxAge = 14 * 24 * time.Hour // 14 days
)
type subscriber struct {
mu sync.Mutex
userClient inventory.UserClient
fsEventClient inventory.FsEventClient
id string
uid int
ch chan *Event
// Online status
online bool
offlineSince time.Time
// Debounce buffer for pending events
buffer []*Event
timer *time.Timer
// Owner info
ownerCached *ent.User
cachedAt time.Time
// Close signal
closed bool
closedCh chan struct{}
}
func newSubscriber(ctx context.Context, id string, userClient inventory.UserClient, fsEventClient inventory.FsEventClient) (*subscriber, error) {
user := inventory.UserFromContext(ctx)
if user == nil || inventory.IsAnonymousUser(user) {
return nil, errors.New("user not found")
}
return &subscriber{
id: id,
ch: make(chan *Event, bufSize),
userClient: userClient,
fsEventClient: fsEventClient,
ownerCached: user,
uid: user.ID,
cachedAt: time.Now(),
online: true,
closedCh: make(chan struct{}),
}, nil
}
func (s *subscriber) ID() string {
return s.id
}
func (s *subscriber) Ch() chan *Event {
return s.ch
}
func (s *subscriber) Online() bool {
s.mu.Lock()
defer s.mu.Unlock()
return s.online
}
func (s *subscriber) OfflineSince() time.Time {
s.mu.Lock()
defer s.mu.Unlock()
return s.offlineSince
}
func (s *subscriber) Owner() (*ent.User, error) {
s.mu.Lock()
defer s.mu.Unlock()
if time.Since(s.cachedAt) > userCacheTTL || s.ownerCached == nil {
user, err := s.userClient.GetLoginUserByID(context.Background(), s.uid)
if err != nil {
return nil, fmt.Errorf("failed to get login user: %w", err)
}
s.ownerCached = user
s.cachedAt = time.Now()
}
return s.ownerCached, nil
}
// Publish adds an event to the buffer and starts/resets the debounce timer.
// Events will be flushed to the channel after the debounce delay.
// If the subscriber is offline, events are kept in the buffer only.
func (s *subscriber) Publish(evt Event) {
s.mu.Lock()
defer s.mu.Unlock()
if s.closed {
return
}
s.publishLocked(evt)
}
// publishLocked adds an event to the buffer and manages the debounce timer.
// Caller must hold s.mu.
func (s *subscriber) publishLocked(evt Event) {
// Add event to buffer
s.buffer = append(s.buffer, &evt)
// Reset or start the debounce timer
if s.timer != nil {
s.timer.Stop()
}
s.timer = time.AfterFunc(debounceDelay, s.flush)
}
// flush sends all buffered events to the channel.
// Called by the debounce timer.
func (s *subscriber) flush() {
s.mu.Lock()
defer s.mu.Unlock()
s.flushLocked(context.Background())
}
// flushLocked sends all buffered events to the channel.
// Caller must hold s.mu.
func (s *subscriber) flushLocked(ctx context.Context) {
if len(s.buffer) == 0 || s.closed {
return
}
if !s.online {
_ = s.fsEventClient.Create(ctx, s.ownerCached.ID, uuid.FromStringOrNil(s.id), lo.Map(s.buffer, func(item *Event, index int) string {
res, _ := json.Marshal(item)
return string(res)
})...)
} else {
// TODO: implement event merging logic here
// For now, send all buffered events individually
debouncedEvents := DebounceEvents(s.buffer)
for _, evt := range debouncedEvents {
select {
case s.ch <- evt:
default:
// Non-blocking send; drop if subscriber is slow
}
}
}
// Clear the buffer
s.buffer = nil
s.timer = nil
}
// Stop cancels any pending debounce timer and flushes remaining events.
// Should be called before closing the subscriber.
func (s *subscriber) Stop() {
s.mu.Lock()
defer s.mu.Unlock()
if s.timer != nil {
s.timer.Stop()
s.timer = nil
}
// Flush any remaining events before stopping
s.flushLocked(context.Background())
}
// setOnline marks the subscriber as online and flushes any buffered events.
func (s *subscriber) setOnline(ctx context.Context) {
l := logging.FromContext(ctx)
s.mu.Lock()
defer s.mu.Unlock()
if s.closed {
return
}
s.online = true
s.ownerCached = nil
s.offlineSince = time.Time{}
// Retrieve events from inventory
events, err := s.fsEventClient.TakeBySubscriber(ctx, uuid.FromStringOrNil(s.id), s.uid)
if err != nil {
l.Error("Failed to get events from inventory: %s", err)
return
}
// Append events to buffer
for _, event := range events {
var eventParsed Event
err := json.Unmarshal([]byte(event.Event), &eventParsed)
if err != nil {
l.Error("Failed to unmarshal event: %s", err)
continue
}
s.buffer = append(s.buffer, &eventParsed)
}
// Flush buffered events if any
if len(s.buffer) > 0 {
if s.timer != nil {
s.timer.Stop()
}
s.timer = time.AfterFunc(debounceDelay, s.flush)
}
}
// setOffline marks the subscriber as offline.
func (s *subscriber) setOffline() {
s.mu.Lock()
defer s.mu.Unlock()
if s.closed {
return
}
s.online = false
s.offlineSince = time.Now()
// Stop the timer, events will be kept in buffer
if s.timer != nil {
s.timer.Stop()
s.timer = nil
}
// flush the buffer
s.flushLocked(context.Background())
}
// close permanently closes the subscriber.
func (s *subscriber) close() {
s.mu.Lock()
defer s.mu.Unlock()
if s.closed {
return
}
s.closed = true
if s.timer != nil {
s.timer.Stop()
s.timer = nil
}
// Delete the FsEvent
s.fsEventClient.DeleteBySubscriber(context.Background(), uuid.FromStringOrNil(s.id))
// Signal close and close the channel
close(s.closedCh)
close(s.ch)
s.buffer = nil
}
// isClosed returns whether the subscriber is closed.
func (s *subscriber) isClosed() bool {
s.mu.Lock()
defer s.mu.Unlock()
return s.closed
}
// shouldExpire returns whether the subscriber should be expired (offline for too long).
func (s *subscriber) shouldExpire() bool {
s.mu.Lock()
defer s.mu.Unlock()
return !s.online && !s.offlineSince.IsZero() && time.Since(s.offlineSince) > offlineMaxAge
}
// Buffer returns a copy of the current buffered events.
// Useful for debugging or implementing custom merging logic.
func (s *subscriber) Buffer() []*Event {
s.mu.Lock()
defer s.mu.Unlock()
if len(s.buffer) == 0 {
return nil
}
// Return a copy to avoid data races
buf := make([]*Event, len(s.buffer))
copy(buf, s.buffer)
return buf
}

View File

@ -4,12 +4,8 @@ import (
"context"
"errors"
"fmt"
"math/rand"
"path"
"path/filepath"
"regexp"
"strconv"
"strings"
"sync"
"time"
@ -18,6 +14,8 @@ import (
"github.com/cloudreve/Cloudreve/v4/inventory"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/cache"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/encrypt"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/eventhub"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/lock"
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
@ -47,7 +45,7 @@ type (
func NewDatabaseFS(u *ent.User, fileClient inventory.FileClient, shareClient inventory.ShareClient,
l logging.Logger, ls lock.LockSystem, settingClient setting.Provider,
storagePolicyClient inventory.StoragePolicyClient, hasher hashid.Encoder, userClient inventory.UserClient,
cache, stateKv cache.Driver, directLinkClient inventory.DirectLinkClient) fs.FileSystem {
cache, stateKv cache.Driver, directLinkClient inventory.DirectLinkClient, encryptorFactory encrypt.CryptorFactory, eventHub eventhub.EventHub) fs.FileSystem {
return &DBFS{
user: u,
navigators: make(map[string]Navigator),
@ -62,6 +60,8 @@ func NewDatabaseFS(u *ent.User, fileClient inventory.FileClient, shareClient inv
cache: cache,
stateKv: stateKv,
directLinkClient: directLinkClient,
encryptorFactory: encryptorFactory,
eventHub: eventHub,
}
}
@ -80,6 +80,8 @@ type DBFS struct {
cache cache.Driver
stateKv cache.Driver
mu sync.Mutex
encryptorFactory encrypt.CryptorFactory
eventHub eventhub.EventHub
}
func (f *DBFS) Recycle() {
@ -123,7 +125,7 @@ func (f *DBFS) List(ctx context.Context, path *fs.URI, opts ...fs.Option) (fs.Fi
parent, err := f.getFileByPath(ctx, navigator, path)
if err != nil {
return nil, nil, fmt.Errorf("Parent not exist: %w", err)
return nil, nil, fmt.Errorf("parent not exist: %w", err)
}
pageSize := 0
@ -287,6 +289,7 @@ func (f *DBFS) CreateEntity(ctx context.Context, file fs.File, policy *ent.Stora
Source: req.Props.SavePath,
Size: req.Props.Size,
UploadSessionID: uuid.FromStringOrNil(o.UploadRequest.Props.UploadSessionID),
EncryptMetadata: o.encryptMetadata,
})
if err != nil {
_ = inventory.Rollback(tx)
@ -617,6 +620,7 @@ func (f *DBFS) createFile(ctx context.Context, parent *File, name string, fileTy
ModifiedAt: o.UploadRequest.Props.LastModified,
UploadSessionID: uuid.FromStringOrNil(o.UploadRequest.Props.UploadSessionID),
Importing: o.UploadRequest.ImportFrom != nil,
EncryptMetadata: o.encryptMetadata,
}
}
@ -642,7 +646,23 @@ func (f *DBFS) createFile(ctx context.Context, parent *File, name string, fileTy
}
file.SetEntities([]*ent.Entity{entity})
return newFile(parent, file), nil
newFile := newFile(parent, file)
f.emitFileCreated(ctx, newFile)
return newFile, nil
}
func (f *DBFS) generateEncryptMetadata(ctx context.Context, uploadRequest *fs.UploadRequest, policy *ent.StoragePolicy) (*types.EncryptMetadata, error) {
relayEnabled := policy.Settings != nil && policy.Settings.Relay
if (len(uploadRequest.Props.EncryptionSupported) > 0 && uploadRequest.Props.EncryptionSupported[0] == types.CipherAES256CTR) || relayEnabled {
encryptor, err := f.encryptorFactory(types.CipherAES256CTR)
if err != nil {
return nil, fmt.Errorf("failed to get encryptor: %w", err)
}
return encryptor.GenerateMetadata(ctx)
}
return nil, nil
}
// getPreferredPolicy tries to get the preferred storage policy for the given file.
@ -768,71 +788,16 @@ func (f *DBFS) navigatorId(path *fs.URI) string {
// generateSavePath generates the physical save path for the upload request.
func generateSavePath(policy *ent.StoragePolicy, req *fs.UploadRequest, user *ent.User) string {
currentTime := time.Now()
originName := req.Props.Uri.Name()
dynamicReplace := func(regPattern string, rule string, pathAvailable bool) string {
re := regexp.MustCompile(regPattern)
return re.ReplaceAllStringFunc(rule, func(match string) string {
switch match {
case "{timestamp}":
return strconv.FormatInt(currentTime.Unix(), 10)
case "{timestamp_nano}":
return strconv.FormatInt(currentTime.UnixNano(), 10)
case "{datetime}":
return currentTime.Format("20060102150405")
case "{date}":
return currentTime.Format("20060102")
case "{year}":
return currentTime.Format("2006")
case "{month}":
return currentTime.Format("01")
case "{day}":
return currentTime.Format("02")
case "{hour}":
return currentTime.Format("15")
case "{minute}":
return currentTime.Format("04")
case "{second}":
return currentTime.Format("05")
case "{uid}":
return strconv.Itoa(user.ID)
case "{randomkey16}":
return util.RandStringRunes(16)
case "{randomkey8}":
return util.RandStringRunes(8)
case "{randomnum8}":
return strconv.Itoa(rand.Intn(8))
case "{randomnum4}":
return strconv.Itoa(rand.Intn(4))
case "{randomnum3}":
return strconv.Itoa(rand.Intn(3))
case "{randomnum2}":
return strconv.Itoa(rand.Intn(2))
case "{uuid}":
return uuid.Must(uuid.NewV4()).String()
case "{path}":
if pathAvailable {
return req.Props.Uri.Dir() + fs.Separator
}
return match
case "{originname}":
return originName
case "{ext}":
return filepath.Ext(originName)
case "{originname_without_ext}":
return strings.TrimSuffix(originName, filepath.Ext(originName))
default:
return match
}
})
dynamicReplace := func(rule string, pathAvailable bool) string {
return util.ReplaceMagicVar(rule, fs.Separator, pathAvailable, false, currentTime, user.ID, req.Props.Uri.Name(), req.Props.Uri.Dir(), "")
}
dirRule := policy.DirNameRule
dirRule = filepath.ToSlash(dirRule)
dirRule = dynamicReplace(`\{[^{}]+\}`, dirRule, true)
dirRule = dynamicReplace(dirRule, true)
nameRule := policy.FileNameRule
nameRule = dynamicReplace(`\{[^{}]+\}`, nameRule, false)
nameRule = dynamicReplace(nameRule, false)
return path.Join(path.Clean(dirRule), nameRule)
}

View File

@ -0,0 +1,150 @@
package dbfs
import (
"context"
"path"
"strings"
"github.com/cloudreve/Cloudreve/v4/pkg/auth/requestinfo"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/eventhub"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
"github.com/samber/lo"
)
func (f *DBFS) emitFileCreated(ctx context.Context, file *File) {
subscribers := f.getEligibleSubscriber(ctx, file, true)
for _, subscriber := range subscribers {
subscriber.Publish(eventhub.Event{
Type: eventhub.EventTypeCreate,
FileID: hashid.EncodeFileID(f.hasher, file.Model.ID),
From: subscriber.relativePath(file),
})
}
}
func (f *DBFS) emitFileModified(ctx context.Context, file *File) {
subscribers := f.getEligibleSubscriber(ctx, file, true)
for _, subscriber := range subscribers {
subscriber.Publish(eventhub.Event{
Type: eventhub.EventTypeModify,
FileID: hashid.EncodeFileID(f.hasher, file.Model.ID),
From: subscriber.relativePath(file),
})
}
}
func (f *DBFS) emitFileRenamed(ctx context.Context, file *File, newName string) {
subscribers := f.getEligibleSubscriber(ctx, file, true)
for _, subscriber := range subscribers {
from := subscriber.relativePath(file)
to := strings.TrimSuffix(from, file.Name()) + newName
subscriber.Publish(eventhub.Event{
Type: eventhub.EventTypeRename,
FileID: hashid.EncodeFileID(f.hasher, file.Model.ID),
From: subscriber.relativePath(file),
To: to,
})
}
}
func (f *DBFS) emitFileDeleted(ctx context.Context, files ...*File) {
for _, file := range files {
subscribers := f.getEligibleSubscriber(ctx, file, true)
for _, subscriber := range subscribers {
subscriber.Publish(eventhub.Event{
Type: eventhub.EventTypeDelete,
FileID: hashid.EncodeFileID(f.hasher, file.Model.ID),
From: subscriber.relativePath(file),
})
}
}
}
func (f *DBFS) emitFileMoved(ctx context.Context, src, dst *File) {
srcSubMap := lo.SliceToMap(f.getEligibleSubscriber(ctx, src, true), func(subscriber foundSubscriber) (string, *foundSubscriber) {
return subscriber.ID(), &subscriber
})
dstSubMap := lo.SliceToMap(f.getEligibleSubscriber(ctx, dst, false), func(subscriber foundSubscriber) (string, *foundSubscriber) {
return subscriber.ID(), &subscriber
})
for _, subscriber := range srcSubMap {
subId := subscriber.ID()
if dstSub, ok := dstSubMap[subId]; ok {
// Src and Dst subscribed by the same subscriber
subscriber.Publish(eventhub.Event{
Type: eventhub.EventTypeRename,
FileID: hashid.EncodeFileID(f.hasher, src.Model.ID),
From: subscriber.relativePath(src),
To: path.Join(dstSub.relativePath(dst), src.Name()),
})
delete(dstSubMap, subId)
} else {
// Only Src is subscribed by the subscriber
subscriber.Publish(eventhub.Event{
Type: eventhub.EventTypeDelete,
FileID: hashid.EncodeFileID(f.hasher, src.Model.ID),
From: subscriber.relativePath(src),
})
}
}
for _, subscriber := range dstSubMap {
// Only Dst is subscribed by the subscriber
subscriber.Publish(eventhub.Event{
Type: eventhub.EventTypeCreate,
FileID: hashid.EncodeFileID(f.hasher, src.Model.ID),
From: path.Join(subscriber.relativePath(dst), src.Name()),
})
}
}
func (f *DBFS) getEligibleSubscriber(ctx context.Context, file *File, checkParentPerm bool) []foundSubscriber {
roots := file.Ancestors()
if !checkParentPerm {
// Include file itself
roots = file.AncestorsChain()
}
requestInfo := requestinfo.RequestInfoFromContext(ctx)
eligibleSubscribers := make([]foundSubscriber, 0)
for _, root := range roots {
subscribers := f.eventHub.GetSubscribers(ctx, root.Model.ID)
subscribers = lo.Filter(subscribers, func(subscriber eventhub.Subscriber, index int) bool {
// Exlucde self from subscribers
if requestInfo != nil && subscriber.ID() == requestInfo.ClientID {
return false
}
return true
})
eligibleSubscribers = append(eligibleSubscribers, lo.Map(subscribers, func(subscriber eventhub.Subscriber, index int) foundSubscriber {
return foundSubscriber{
Subscriber: subscriber,
root: root,
}
})...)
}
return eligibleSubscribers
}
type foundSubscriber struct {
eventhub.Subscriber
root *File
}
func (s *foundSubscriber) relativePath(file *File) string {
res := strings.TrimPrefix(file.Uri(true).Path(), s.root.Uri(true).Path())
if res == "" {
res = fs.Separator
}
if res[0] != fs.Separator[0] {
res = fs.Separator + res
}
return res
}

View File

@ -119,6 +119,7 @@ func (f *DBFS) Create(ctx context.Context, path *fs.URI, fileType types.FileType
}
ancestor = newFile(ancestor, newFolder)
f.emitFileCreated(ctx, ancestor)
} else {
// valide file name
policy, err := f.getPreferredPolicy(ctx, ancestor)
@ -225,6 +226,8 @@ func (f *DBFS) Rename(ctx context.Context, path *fs.URI, newName string) (fs.Fil
return nil, serializer.NewError(serializer.CodeDBError, "Failed to commit rename change", err)
}
f.emitFileRenamed(ctx, target, newName)
return target.Replace(updated), nil
}
@ -303,6 +306,8 @@ func (f *DBFS) SoftDelete(ctx context.Context, path ...*fs.URI) error {
return serializer.NewError(serializer.CodeDBError, "Failed to commit soft-delete change", err)
}
f.emitFileDeleted(ctx, targets...)
return ae.Aggregate()
}
@ -312,9 +317,9 @@ func (f *DBFS) Delete(ctx context.Context, path []*fs.URI, opts ...fs.Option) ([
o.apply(opt)
}
var opt *types.EntityRecycleOption
var opt *types.EntityProps
if o.UnlinkOnly {
opt = &types.EntityRecycleOption{
opt = &types.EntityProps{
UnlinkOnly: true,
}
}
@ -385,7 +390,7 @@ func (f *DBFS) Delete(ctx context.Context, path []*fs.URI, opts ...fs.Option) ([
if err := inventory.CommitWithStorageDiff(ctx, tx, f.l, f.userClient); err != nil {
return nil, serializer.NewError(serializer.CodeDBError, "Failed to commit delete change", err)
}
f.emitFileDeleted(ctx, targets...)
return newStaleEntities, ae.Aggregate()
}
@ -603,10 +608,11 @@ func (f *DBFS) MoveOrCopy(ctx context.Context, path []*fs.URI, dst *fs.URI, isCo
}
var (
storageDiff inventory.StorageDiff
copiedNewTargetsMap map[int]*ent.File
storageDiff inventory.StorageDiff
)
if isCopy {
_, storageDiff, err = f.copyFiles(ctx, fileNavGroup, destination, fc)
copiedNewTargetsMap, storageDiff, err = f.copyFiles(ctx, fileNavGroup, destination, fc)
} else {
storageDiff, err = f.moveFiles(ctx, targets, destination, fc, dstNavigator)
}
@ -621,6 +627,14 @@ func (f *DBFS) MoveOrCopy(ctx context.Context, path []*fs.URI, dst *fs.URI, isCo
return serializer.NewError(serializer.CodeDBError, "Failed to commit move change", err)
}
for _, target := range targets {
if isCopy {
f.emitFileCreated(ctx, newFile(destination, copiedNewTargetsMap[target.ID()]))
} else {
f.emitFileMoved(ctx, target, destination)
}
}
// TODO: after move, dbfs cache should be cleared
}
@ -716,6 +730,8 @@ func (f *DBFS) deleteEntity(ctx context.Context, target *File, entityId int) (in
return nil, serializer.NewError(serializer.CodeDBError, "Failed to remove upload session metadata", err)
}
}
f.emitFileModified(ctx, target)
return diff, nil
}
@ -753,10 +769,11 @@ func (f *DBFS) setCurrentVersion(ctx context.Context, target *File, versionId in
return serializer.NewError(serializer.CodeDBError, "Failed to commit set current version", err)
}
f.emitFileModified(ctx, target)
return nil
}
func (f *DBFS) deleteFiles(ctx context.Context, targets map[Navigator][]*File, fc inventory.FileClient, opt *types.EntityRecycleOption) ([]fs.Entity, inventory.StorageDiff, error) {
func (f *DBFS) deleteFiles(ctx context.Context, targets map[Navigator][]*File, fc inventory.FileClient, opt *types.EntityProps) ([]fs.Entity, inventory.StorageDiff, error) {
if f.user.Edges.Group == nil {
return nil, nil, fmt.Errorf("user group not loaded")
}

View File

@ -2,6 +2,7 @@ package dbfs
import (
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
)
@ -26,6 +27,7 @@ type dbfsOption struct {
streamListResponseCallback func(parent fs.File, file []fs.File)
ancestor *File
notRoot bool
encryptMetadata *types.EncryptMetadata
}
func newDbfsOption() *dbfsOption {
@ -50,6 +52,13 @@ func (f optionFunc) Apply(o any) {
}
}
// WithEncryptMetadata sets the encrypt metadata for the upload operation.
func WithEncryptMetadata(encryptMetadata *types.EncryptMetadata) fs.Option {
return optionFunc(func(o *dbfsOption) {
o.encryptMetadata = encryptMetadata
})
}
// WithFilePublicMetadata enables loading file public metadata.
func WithFilePublicMetadata() fs.Option {
return optionFunc(func(o *dbfsOption) {

View File

@ -129,6 +129,20 @@ func (f *DBFS) PrepareUpload(ctx context.Context, req *fs.UploadRequest, opts ..
return nil, err
}
// Encryption setting
var (
encryptMetadata *types.EncryptMetadata
)
if !policy.Settings.Encryption || req.ImportFrom != nil || len(req.Props.EncryptionSupported) == 0 {
req.Props.EncryptionSupported = nil
} else {
res, err := f.generateEncryptMetadata(ctx, req, policy)
if err != nil {
return nil, serializer.NewError(serializer.CodeInternalSetting, "Failed to generate encrypt metadata", err)
}
encryptMetadata = res
}
// validate upload request
if err := validateNewFile(req.Props.Uri.Name(), req.Props.Size, policy); err != nil {
return nil, err
@ -146,7 +160,7 @@ func (f *DBFS) PrepareUpload(ctx context.Context, req *fs.UploadRequest, opts ..
if req.Props.SavePath == "" || isThumbnailAndPolicyNotAvailable {
req.Props.SavePath = generateSavePath(policy, req, f.user)
if isThumbnailAndPolicyNotAvailable {
req.Props.SavePath = req.Props.SavePath + f.settingClient.ThumbEntitySuffix(ctx)
req.Props.SavePath = path.Clean(util.ReplaceMagicVar(f.settingClient.ThumbEntitySuffix(ctx), fs.Separator, true, true, time.Now(), f.user.ID, req.Props.Uri.Name(), req.Props.Uri.Path(), req.Props.SavePath))
}
}
@ -170,6 +184,7 @@ func (f *DBFS) PrepareUpload(ctx context.Context, req *fs.UploadRequest, opts ..
entity, err := f.CreateEntity(ctx, ancestor, policy, entityType, req,
WithPreviousVersion(req.Props.PreviousVersion),
fs.WithUploadRequest(req),
WithEncryptMetadata(encryptMetadata),
WithRemoveStaleEntities(),
)
if err != nil {
@ -185,6 +200,7 @@ func (f *DBFS) PrepareUpload(ctx context.Context, req *fs.UploadRequest, opts ..
WithPreferredStoragePolicy(policy),
WithErrorOnConflict(),
WithAncestor(ancestor),
WithEncryptMetadata(encryptMetadata),
)
if err != nil {
_ = inventory.Rollback(dbTx)
@ -215,14 +231,15 @@ func (f *DBFS) PrepareUpload(ctx context.Context, req *fs.UploadRequest, opts ..
session := &fs.UploadSession{
Props: &fs.UploadProps{
Uri: req.Props.Uri,
Size: req.Props.Size,
SavePath: req.Props.SavePath,
LastModified: req.Props.LastModified,
UploadSessionID: req.Props.UploadSessionID,
ExpireAt: req.Props.ExpireAt,
EntityType: req.Props.EntityType,
Metadata: req.Props.Metadata,
Uri: req.Props.Uri,
Size: req.Props.Size,
SavePath: req.Props.SavePath,
LastModified: req.Props.LastModified,
UploadSessionID: req.Props.UploadSessionID,
ExpireAt: req.Props.ExpireAt,
EntityType: req.Props.EntityType,
Metadata: req.Props.Metadata,
ClientSideEncrypted: req.Props.ClientSideEncrypted,
},
FileID: fileId,
NewFileCreated: !fileExisted,
@ -234,6 +251,10 @@ func (f *DBFS) PrepareUpload(ctx context.Context, req *fs.UploadRequest, opts ..
LockToken: lockToken, // Prevent lock being released.
}
if encryptMetadata != nil {
session.EncryptMetadata = encryptMetadata
}
// TODO: frontend should create new upload session if resumed session does not exist.
return session, nil
}
@ -334,6 +355,8 @@ func (f *DBFS) CompleteUpload(ctx context.Context, session *fs.UploadSession) (f
}
}
f.emitFileModified(ctx, filePrivate)
file, err = f.Get(ctx, session.Props.Uri, WithFileEntities(), WithNotRoot())
if err != nil {
return nil, fmt.Errorf("failed to get updated file: %w", err)

View File

@ -183,6 +183,8 @@ type (
UploadSessionID() *uuid.UUID
CreatedBy() *ent.User
Model() *ent.Entity
Props() *types.EntityProps
Encrypted() bool
}
FileExtendedInfo struct {
@ -238,38 +240,40 @@ type (
// UploadCredential for uploading files in client side.
UploadCredential struct {
SessionID string `json:"session_id"`
ChunkSize int64 `json:"chunk_size"` // 分块大小0 为部分快
Expires int64 `json:"expires"` // 上传凭证过期时间, Unix 时间戳
UploadURLs []string `json:"upload_urls,omitempty"`
Credential string `json:"credential,omitempty"`
UploadID string `json:"uploadID,omitempty"`
Callback string `json:"callback,omitempty"` // 回调地址
Uri string `json:"uri,omitempty"` // 存储路径
AccessKey string `json:"ak,omitempty"`
KeyTime string `json:"keyTime,omitempty"` // COS用有效期
CompleteURL string `json:"completeURL,omitempty"`
StoragePolicy *ent.StoragePolicy
CallbackSecret string `json:"callback_secret,omitempty"`
MimeType string `json:"mime_type,omitempty"` // Expected mimetype
UploadPolicy string `json:"upload_policy,omitempty"` // Upyun upload policy
SessionID string `json:"session_id"`
ChunkSize int64 `json:"chunk_size"` // 分块大小0 为部分快
Expires int64 `json:"expires"` // 上传凭证过期时间, Unix 时间戳
UploadURLs []string `json:"upload_urls,omitempty"`
Credential string `json:"credential,omitempty"`
UploadID string `json:"uploadID,omitempty"`
Callback string `json:"callback,omitempty"`
Uri string `json:"uri,omitempty"` // 存储路径
AccessKey string `json:"ak,omitempty"`
KeyTime string `json:"keyTime,omitempty"` // COS用有效期
CompleteURL string `json:"completeURL,omitempty"`
StoragePolicy *ent.StoragePolicy
CallbackSecret string `json:"callback_secret,omitempty"`
MimeType string `json:"mime_type,omitempty"` // Expected mimetype
UploadPolicy string `json:"upload_policy,omitempty"` // Upyun upload policy
EncryptMetadata *types.EncryptMetadata `json:"encrypt_metadata,omitempty"`
}
// UploadSession stores the information of an upload session, used in server side.
UploadSession struct {
UID int // 发起者
Policy *ent.StoragePolicy
FileID int // ID of the placeholder file
EntityID int // ID of the new entity
Callback string // 回调 URL 地址
CallbackSecret string // Callback secret
UploadID string // Multi-part upload ID
UploadURL string
Credential string
ChunkSize int64
SentinelTaskID int
NewFileCreated bool // If new file is created for this session
Importing bool // If the upload is importing from another file
UID int // 发起者
Policy *ent.StoragePolicy
FileID int // ID of the placeholder file
EntityID int // ID of the new entity
Callback string // 回调 URL 地址
CallbackSecret string // Callback secret
UploadID string // Multi-part upload ID
UploadURL string
Credential string
ChunkSize int64
SentinelTaskID int
NewFileCreated bool // If new file is created for this session
Importing bool // If the upload is importing from another file
EncryptMetadata *types.EncryptMetadata
LockToken string // Token of the locked placeholder file
Props *UploadProps
@ -288,8 +292,10 @@ type (
PreviousVersion string
// EntityType is the type of the entity to be created. If not set, a new file will be created
// with a default version entity. This will be set in update request for existing files.
EntityType *types.EntityType
ExpireAt time.Time
EntityType *types.EntityType
ExpireAt time.Time
EncryptionSupported []types.Cipher
ClientSideEncrypted bool // Whether the file stream is already encrypted by client side.
}
// FsOption options for underlying file system.
@ -782,6 +788,14 @@ func (e *DbEntity) Model() *ent.Entity {
return e.model
}
func (e *DbEntity) Props() *types.EntityProps {
return e.model.Props
}
func (e *DbEntity) Encrypted() bool {
return e.model.Props != nil && e.model.Props.EncryptMetadata != nil
}
func NewEmptyEntity(u *ent.User) Entity {
return &DbEntity{
model: &ent.Entity{

View File

@ -120,7 +120,7 @@ func (m *manager) GetDirectLink(ctx context.Context, urls ...*fs.URI) ([]DirectL
}
source := entitysource.NewEntitySource(target, d, policy, m.auth, m.settings, m.hasher, m.dep.RequestClient(),
m.l, m.config, m.dep.MimeDetector(ctx))
m.l, m.config, m.dep.MimeDetector(ctx), m.dep.EncryptorFactory(ctx))
sourceUrl, err := source.Url(ctx,
entitysource.WithSpeedLimit(int64(m.user.Edges.Group.SpeedLimit)),
entitysource.WithDisplayName(file.Name()),
@ -182,7 +182,7 @@ func (m *manager) GetUrlForRedirectedDirectLink(ctx context.Context, dl *ent.Dir
}
source := entitysource.NewEntitySource(primaryEntity, d, policy, m.auth, m.settings, m.hasher, m.dep.RequestClient(),
m.l, m.config, m.dep.MimeDetector(ctx))
m.l, m.config, m.dep.MimeDetector(ctx), m.dep.EncryptorFactory(ctx))
downloadUrl, err := source.Url(ctx,
entitysource.WithExpire(o.Expire),
entitysource.WithDownload(o.IsDownload),
@ -282,7 +282,7 @@ func (m *manager) GetEntityUrls(ctx context.Context, args []GetEntityUrlArgs, op
// Cache miss, Generate new url
source := entitysource.NewEntitySource(target, d, policy, m.auth, m.settings, m.hasher, m.dep.RequestClient(),
m.l, m.config, m.dep.MimeDetector(ctx))
m.l, m.config, m.dep.MimeDetector(ctx), m.dep.EncryptorFactory(ctx))
downloadUrl, err := source.Url(ctx,
entitysource.WithExpire(o.Expire),
entitysource.WithDownload(o.IsDownload),
@ -349,7 +349,7 @@ func (m *manager) GetEntitySource(ctx context.Context, entityID int, opts ...fs.
}
return entitysource.NewEntitySource(entity, handler, policy, m.auth, m.settings, m.hasher, m.dep.RequestClient(), m.l,
m.config, m.dep.MimeDetector(ctx), entitysource.WithContext(ctx), entitysource.WithThumb(o.IsThumb)), nil
m.config, m.dep.MimeDetector(ctx), m.dep.EncryptorFactory(ctx), entitysource.WithContext(ctx), entitysource.WithThumb(o.IsThumb)), nil
}
func (l *manager) SetCurrentVersion(ctx context.Context, path *fs.URI, version int) error {

View File

@ -22,6 +22,7 @@ import (
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver/local"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/encrypt"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/mime"
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
@ -83,6 +84,7 @@ type EntitySourceOptions struct {
OneTimeDownloadKey string
Ctx context.Context
IsThumb bool
DisableCryptor bool
}
type EntityUrl struct {
@ -143,22 +145,31 @@ func WithThumb(isThumb bool) EntitySourceOption {
})
}
// WithDisableCryptor disable cryptor for file source, file stream will be
// presented as is.
func WithDisableCryptor() EntitySourceOption {
return EntitySourceOptionFunc(func(option any) {
option.(*EntitySourceOptions).DisableCryptor = true
})
}
func (f EntitySourceOptionFunc) Apply(option any) {
f(option)
}
type (
entitySource struct {
e fs.Entity
handler driver.Handler
policy *ent.StoragePolicy
generalAuth auth.Auth
settings setting.Provider
hasher hashid.Encoder
c request.Client
l logging.Logger
config conf.ConfigProvider
mime mime.MimeDetector
e fs.Entity
handler driver.Handler
policy *ent.StoragePolicy
generalAuth auth.Auth
settings setting.Provider
hasher hashid.Encoder
c request.Client
l logging.Logger
config conf.ConfigProvider
mime mime.MimeDetector
encryptorFactory encrypt.CryptorFactory
rsc io.ReadCloser
pos int64
@ -197,20 +208,22 @@ func NewEntitySource(
l logging.Logger,
config conf.ConfigProvider,
mime mime.MimeDetector,
encryptorFactory encrypt.CryptorFactory,
opts ...EntitySourceOption,
) EntitySource {
s := &entitySource{
e: e,
handler: handler,
policy: policy,
generalAuth: generalAuth,
settings: settings,
hasher: hasher,
c: c,
config: config,
l: l,
mime: mime,
o: &EntitySourceOptions{},
e: e,
handler: handler,
policy: policy,
generalAuth: generalAuth,
settings: settings,
hasher: hasher,
c: c,
config: config,
l: l,
mime: mime,
encryptorFactory: encryptorFactory,
o: &EntitySourceOptions{},
}
for _, opt := range opts {
opt.Apply(s.o)
@ -237,7 +250,7 @@ func (f *entitySource) CloneToLocalSrc(t types.EntityType, src string) (EntitySo
policy := &ent.StoragePolicy{Type: types.PolicyTypeLocal}
handler := local.New(policy, f.l, f.config)
newSrc := NewEntitySource(e, handler, policy, f.generalAuth, f.settings, f.hasher, f.c, f.l, f.config, f.mime).(*entitySource)
newSrc := NewEntitySource(e, handler, policy, f.generalAuth, f.settings, f.hasher, f.c, f.l, f.config, f.mime, f.encryptorFactory).(*entitySource)
newSrc.o = f.o
return newSrc, nil
}
@ -328,6 +341,20 @@ func (f *entitySource) Serve(w http.ResponseWriter, r *http.Request, opts ...Ent
response.Header.Del("ETag")
response.Header.Del("Content-Disposition")
response.Header.Del("Cache-Control")
// If the response is successful, decrypt the body if needed
if response.StatusCode >= 200 && response.StatusCode < 300 {
// Parse offset from Content-Range header if present
offset := parseContentRangeOffset(response.Header.Get("Content-Range"))
body, err := f.getDecryptedRsc(response.Body, offset)
if err != nil {
return fmt.Errorf("failed to get decrypted rsc: %w", err)
}
response.Body = body
}
logging.Request(f.l,
false,
response.StatusCode,
@ -554,7 +581,7 @@ func (f *entitySource) ShouldInternalProxy(opts ...EntitySourceOption) bool {
}
handlerCapability := f.handler.Capabilities()
return f.e.ID() == 0 || handlerCapability.StaticFeatures.Enabled(int(driver.HandlerCapabilityProxyRequired)) ||
f.policy.Settings.InternalProxy && !f.o.NoInternalProxy
(f.policy.Settings.InternalProxy || f.e.Encrypted()) && !f.o.NoInternalProxy
}
func (f *entitySource) Url(ctx context.Context, opts ...EntitySourceOption) (*EntityUrl, error) {
@ -582,6 +609,7 @@ func (f *entitySource) Url(ctx context.Context, opts ...EntitySourceOption) (*En
// 1. Internal proxy is required by driver's definition
// 2. Internal proxy is enabled in Policy setting and not disabled by option
// 3. It's an empty entity.
// 4. The entity is encrypted and internal proxy not disabled by option
handlerCapability := f.handler.Capabilities()
if f.ShouldInternalProxy() {
siteUrl := f.settings.SiteURL(ctx)
@ -655,6 +683,7 @@ func (f *entitySource) resetRequest() error {
func (f *entitySource) getRsc(pos int64) (io.ReadCloser, error) {
// For inbound files, we can use the handler to open the file directly
var rsc io.ReadCloser
if f.IsLocal() {
file, err := f.handler.Open(f.o.Ctx, f.e.Source())
if err != nil {
@ -670,46 +699,75 @@ func (f *entitySource) getRsc(pos int64) (io.ReadCloser, error) {
if f.o.SpeedLimit > 0 {
bucket := ratelimit.NewBucketWithRate(float64(f.o.SpeedLimit), f.o.SpeedLimit)
return lrs{file, ratelimit.Reader(file, bucket)}, nil
rsc = lrs{file, ratelimit.Reader(file, bucket)}
} else {
return file, nil
rsc = file
}
}
var urlStr string
now := time.Now()
// Check if we have a valid cached URL and expiry
if f.cachedUrl != "" && now.Before(f.cachedExpiry.Add(-time.Minute)) {
// Use cached URL if it's still valid (with 1 minute buffer before expiry)
urlStr = f.cachedUrl
} else {
// Generate new URL and cache it
expire := now.Add(defaultUrlExpire)
u, err := f.Url(driver.WithForcePublicEndpoint(f.o.Ctx, false), WithNoInternalProxy(), WithExpire(&expire))
if err != nil {
return nil, fmt.Errorf("failed to generate download url: %w", err)
var urlStr string
now := time.Now()
// Check if we have a valid cached URL and expiry
if f.cachedUrl != "" && now.Before(f.cachedExpiry.Add(-time.Minute)) {
// Use cached URL if it's still valid (with 1 minute buffer before expiry)
urlStr = f.cachedUrl
} else {
// Generate new URL and cache it
expire := now.Add(defaultUrlExpire)
u, err := f.Url(driver.WithForcePublicEndpoint(f.o.Ctx, false), WithNoInternalProxy(), WithExpire(&expire))
if err != nil {
return nil, fmt.Errorf("failed to generate download url: %w", err)
}
// Cache the URL and expiry
f.cachedUrl = u.Url
f.cachedExpiry = expire
urlStr = u.Url
}
// Cache the URL and expiry
f.cachedUrl = u.Url
f.cachedExpiry = expire
urlStr = u.Url
h := http.Header{}
h.Set("Range", fmt.Sprintf("bytes=%d-", pos))
resp := f.c.Request(http.MethodGet, urlStr, nil,
request.WithContext(f.o.Ctx),
request.WithLogger(f.l),
request.WithHeader(h),
).CheckHTTPResponse(http.StatusOK, http.StatusPartialContent)
if resp.Err != nil {
return nil, fmt.Errorf("failed to request download url: %w", resp.Err)
}
rsc = resp.Response.Body
}
h := http.Header{}
h.Set("Range", fmt.Sprintf("bytes=%d-", pos))
resp := f.c.Request(http.MethodGet, urlStr, nil,
request.WithContext(f.o.Ctx),
request.WithLogger(f.l),
request.WithHeader(h),
).CheckHTTPResponse(http.StatusOK, http.StatusPartialContent)
if resp.Err != nil {
return nil, fmt.Errorf("failed to request download url: %w", resp.Err)
var err error
rsc, err = f.getDecryptedRsc(rsc, pos)
if err != nil {
return nil, fmt.Errorf("failed to get decrypted rsc: %w", err)
}
return resp.Response.Body, nil
return rsc, nil
}
func (f *entitySource) getDecryptedRsc(rsc io.ReadCloser, pos int64) (io.ReadCloser, error) {
props := f.e.Props()
if props != nil && props.EncryptMetadata != nil && !f.o.DisableCryptor {
cryptor, err := f.encryptorFactory(props.EncryptMetadata.Algorithm)
if err != nil {
return nil, fmt.Errorf("failed to create decryptor: %w", err)
}
err = cryptor.LoadMetadata(f.o.Ctx, props.EncryptMetadata)
if err != nil {
return nil, fmt.Errorf("failed to load metadata: %w", err)
}
if err := cryptor.SetSource(rsc, nil, f.e.Size(), pos); err != nil {
return nil, fmt.Errorf("failed to set source: %w", err)
}
return cryptor, nil
}
return rsc, nil
}
// capExpireTime make sure expire time is not too long or too short (if min or max is set)
@ -1002,6 +1060,33 @@ func sumRangesSize(ranges []httpRange) (size int64) {
return
}
// parseContentRangeOffset parses the start offset from a Content-Range header.
// Content-Range format: "bytes start-end/total" (e.g., "bytes 100-200/1000")
// Returns 0 if the header is empty, invalid, or cannot be parsed.
func parseContentRangeOffset(contentRange string) int64 {
if contentRange == "" {
return 0
}
// Content-Range format: "bytes start-end/total"
if !strings.HasPrefix(contentRange, "bytes ") {
return 0
}
rangeSpec := strings.TrimPrefix(contentRange, "bytes ")
dashPos := strings.Index(rangeSpec, "-")
if dashPos <= 0 {
return 0
}
start, err := strconv.ParseInt(rangeSpec[:dashPos], 10, 64)
if err != nil {
return 0
}
return start
}
// countingWriter counts how many bytes have been written to it.
type countingWriter int64

View File

@ -147,7 +147,8 @@ func NewFileManager(dep dependency.Dep, u *ent.User) FileManager {
user: u,
settings: dep.SettingProvider(),
fs: dbfs.NewDatabaseFS(u, dep.FileClient(), dep.ShareClient(), dep.Logger(), dep.LockSystem(),
dep.SettingProvider(), dep.StoragePolicyClient(), dep.HashIDEncoder(), dep.UserClient(), dep.KV(), dep.NavigatorStateKV(), dep.DirectLinkClient()),
dep.SettingProvider(), dep.StoragePolicyClient(), dep.HashIDEncoder(), dep.UserClient(), dep.KV(), dep.NavigatorStateKV(),
dep.DirectLinkClient(), dep.EncryptorFactory(context.TODO()), dep.EventHub()),
kv: dep.KV(),
config: config,
auth: dep.GeneralAuth(),

View File

@ -222,7 +222,7 @@ func (m *manager) RecycleEntities(ctx context.Context, force bool, entityIDs ...
toBeDeletedSrc := lo.Map(lo.Filter(chunk, func(item fs.Entity, index int) bool {
// Only delete entities that are not marked as "unlink only"
return item.Model().RecycleOptions == nil || !item.Model().RecycleOptions.UnlinkOnly
return item.Model().Props == nil || !item.Model().Props.UnlinkOnly
}), func(entity fs.Entity, index int) string {
return entity.Source()
})

View File

@ -4,8 +4,8 @@ import (
"context"
"errors"
"fmt"
"github.com/cloudreve/Cloudreve/v4/pkg/thumb"
"os"
"path"
"runtime"
"time"
@ -18,6 +18,7 @@ import (
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/manager/entitysource"
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
"github.com/cloudreve/Cloudreve/v4/pkg/queue"
"github.com/cloudreve/Cloudreve/v4/pkg/thumb"
"github.com/cloudreve/Cloudreve/v4/pkg/util"
"github.com/samber/lo"
)
@ -64,7 +65,8 @@ func (m *manager) Thumbnail(ctx context.Context, uri *fs.URI) (entitysource.Enti
capabilities := handler.Capabilities()
// Check if file extension and size is supported by native policy generator.
if capabilities.ThumbSupportAllExts || util.IsInExtensionList(capabilities.ThumbSupportedExts, file.DisplayName()) &&
(capabilities.ThumbMaxSize == 0 || latest.Size() <= capabilities.ThumbMaxSize) {
(capabilities.ThumbMaxSize == 0 || latest.Size() <= capabilities.ThumbMaxSize) &&
!latest.Encrypted() {
thumbSource, err := m.GetEntitySource(ctx, 0, fs.WithEntity(latest), fs.WithUseThumb(true))
if err != nil {
return nil, fmt.Errorf("failed to get latest entity source: %w", err)
@ -184,7 +186,7 @@ func (m *manager) generateThumb(ctx context.Context, uri *fs.URI, ext string, es
Props: &fs.UploadProps{
Uri: uri,
Size: fileInfo.Size(),
SavePath: es.Entity().Source() + m.settings.ThumbEntitySuffix(ctx),
SavePath: path.Clean(util.ReplaceMagicVar(m.settings.ThumbEntitySuffix(ctx), fs.Separator, true, true, time.Now(), m.user.ID, uri.Name(), uri.Path(), es.Entity().Source())),
MimeType: m.dep.MimeDetector(ctx).TypeByName("thumb.jpg"),
EntityType: &entityType,
},

View File

@ -29,7 +29,7 @@ type (
// ConfirmUploadSession confirms whether upload session is valid for upload.
ConfirmUploadSession(ctx context.Context, session *fs.UploadSession, chunkIndex int) (fs.File, error)
// Upload uploads file data to storage
Upload(ctx context.Context, req *fs.UploadRequest, policy *ent.StoragePolicy) error
Upload(ctx context.Context, req *fs.UploadRequest, policy *ent.StoragePolicy, session *fs.UploadSession) error
// CompleteUpload completes upload session and returns file object
CompleteUpload(ctx context.Context, session *fs.UploadSession) (fs.File, error)
// CancelUploadSession cancels upload session
@ -93,7 +93,8 @@ func (m *manager) CreateUploadSession(ctx context.Context, req *fs.UploadRequest
uploadSession.ChunkSize = uploadSession.Policy.Settings.ChunkSize
// Create upload credential for underlying storage driver
credential := &fs.UploadCredential{}
if !uploadSession.Policy.Settings.Relay || m.stateless {
unrelayed := !uploadSession.Policy.Settings.Relay || m.stateless
if unrelayed {
credential, err = d.Token(ctx, uploadSession, req)
if err != nil {
m.OnUploadFailed(ctx, uploadSession)
@ -103,12 +104,18 @@ func (m *manager) CreateUploadSession(ctx context.Context, req *fs.UploadRequest
// For relayed upload, we don't need to create credential
uploadSession.ChunkSize = 0
credential.ChunkSize = 0
credential.EncryptMetadata = nil
uploadSession.Props.ClientSideEncrypted = false
}
credential.SessionID = uploadSession.Props.UploadSessionID
credential.Expires = req.Props.ExpireAt.Unix()
credential.StoragePolicy = uploadSession.Policy
credential.CallbackSecret = uploadSession.CallbackSecret
credential.Uri = uploadSession.Props.Uri.String()
credential.EncryptMetadata = uploadSession.EncryptMetadata
if !unrelayed {
credential.EncryptMetadata = nil
}
// If upload sentinel check is required, queue a check task
if d.Capabilities().StaticFeatures.Enabled(int(driver.HandlerCapabilityUploadSentinelRequired)) {
@ -178,12 +185,34 @@ func (m *manager) PrepareUpload(ctx context.Context, req *fs.UploadRequest, opts
return m.fs.PrepareUpload(ctx, req, opts...)
}
func (m *manager) Upload(ctx context.Context, req *fs.UploadRequest, policy *ent.StoragePolicy) error {
func (m *manager) Upload(ctx context.Context, req *fs.UploadRequest, policy *ent.StoragePolicy, session *fs.UploadSession) error {
d, err := m.GetStorageDriver(ctx, m.CastStoragePolicyOnSlave(ctx, policy))
if err != nil {
return err
}
if session != nil && session.EncryptMetadata != nil && !req.Props.ClientSideEncrypted {
cryptor, err := m.dep.EncryptorFactory(ctx)(session.EncryptMetadata.Algorithm)
if err != nil {
return fmt.Errorf("failed to create cryptor: %w", err)
}
err = cryptor.LoadMetadata(ctx, session.EncryptMetadata)
if err != nil {
return fmt.Errorf("failed to load encrypt metadata: %w", err)
}
if err := cryptor.SetSource(req.File, req.Seeker, req.Props.Size, 0); err != nil {
return fmt.Errorf("failed to set source: %w", err)
}
req.File = cryptor
if req.Seeker != nil {
req.Seeker = cryptor
}
}
if err := d.Put(ctx, req); err != nil {
return serializer.NewError(serializer.CodeIOFailed, "Failed to upload file", err)
}
@ -301,6 +330,8 @@ func (m *manager) Update(ctx context.Context, req *fs.UploadRequest, opts ...fs.
}
req.Props.UploadSessionID = uuid.Must(uuid.NewV4()).String()
// Sever side supported encryption algorithms
req.Props.EncryptionSupported = []types.Cipher{types.CipherAES256CTR}
if m.stateless {
return m.updateStateless(ctx, req, o)
@ -312,7 +343,7 @@ func (m *manager) Update(ctx context.Context, req *fs.UploadRequest, opts ...fs.
return nil, fmt.Errorf("faield to prepare uplaod: %w", err)
}
if err := m.Upload(ctx, req, uploadSession.Policy); err != nil {
if err := m.Upload(ctx, req, uploadSession.Policy, uploadSession); err != nil {
m.OnUploadFailed(ctx, uploadSession)
return nil, fmt.Errorf("failed to upload new entity: %w", err)
}
@ -368,7 +399,7 @@ func (m *manager) updateStateless(ctx context.Context, req *fs.UploadRequest, o
}
req.Props = res.Req.Props
if err := m.Upload(ctx, req, res.Session.Policy); err != nil {
if err := m.Upload(ctx, req, res.Session.Policy, res.Session); err != nil {
if err := o.Node.OnUploadFailed(ctx, &fs.StatelessOnUploadFailedService{
UploadSession: res.Session,
UserID: o.StatelessUserID,

View File

@ -18,6 +18,7 @@ import (
"github.com/cloudreve/Cloudreve/v4/inventory"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/cluster"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/encrypt"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/manager"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/manager/entitysource"
@ -217,11 +218,18 @@ func (m *CreateArchiveTask) listEntitiesAndSendToSlave(ctx context.Context, dep
user := inventory.UserFromContext(ctx)
fm := manager.NewFileManager(dep, user)
storagePolicyClient := dep.StoragePolicyClient()
masterKey, _ := dep.MasterEncryptKeyVault(ctx).GetMasterKey(ctx)
failed, err := fm.CreateArchive(ctx, uris, io.Discard,
fs.WithDryRun(func(name string, e fs.Entity) {
entityModel, err := decryptEntityKeyIfNeeded(masterKey, e.Model())
if err != nil {
m.l.Warning("Failed to decrypt entity key for %q: %s", name, err)
return
}
payload.Entities = append(payload.Entities, SlaveCreateArchiveEntity{
Entity: e.Model(),
Entity: entityModel,
Path: name,
})
if _, ok := payload.Policies[e.PolicyID()]; !ok {
@ -680,3 +688,18 @@ func (m *SlaveCreateArchiveTask) Progress(ctx context.Context) queue.Progresses
return m.progress
}
func decryptEntityKeyIfNeeded(masterKey []byte, entity *ent.Entity) (*ent.Entity, error) {
if entity.Props == nil || entity.Props.EncryptMetadata == nil || entity.Props.EncryptMetadata.KeyPlainText != nil {
return entity, nil
}
decryptedKey, err := encrypt.DecryptWithMasterKey(masterKey, entity.Props.EncryptMetadata.Key)
if err != nil {
return nil, fmt.Errorf("failed to decrypt entity key: %w", err)
}
entity.Props.EncryptMetadata.KeyPlainText = decryptedKey
entity.Props.EncryptMetadata.Key = nil
return entity, nil
}

View File

@ -194,9 +194,15 @@ func (m *ExtractArchiveTask) createSlaveExtractTask(ctx context.Context, dep dep
return task.StatusError, fmt.Errorf("failed to get policy: %w", err)
}
masterKey, _ := dep.MasterEncryptKeyVault(ctx).GetMasterKey(ctx)
entityModel, err := decryptEntityKeyIfNeeded(masterKey, archiveFile.PrimaryEntity().Model())
if err != nil {
return task.StatusError, fmt.Errorf("failed to decrypt entity key for archive file %q: %s", archiveFile.DisplayName(), err)
}
payload := &SlaveExtractArchiveTaskState{
FileName: archiveFile.DisplayName(),
Entity: archiveFile.PrimaryEntity().Model(),
Entity: entityModel,
Policy: policy,
Encoding: m.state.Encoding,
Dst: m.state.Dst,

View File

@ -100,7 +100,7 @@ func (f *ffprobeExtractor) Extract(ctx context.Context, ext string, source entit
}
var input string
if source.IsLocal() {
if source.IsLocal() && !source.Entity().Encrypted() {
input = source.LocalPath(ctx)
} else {
expire := time.Now().Add(UrlExpire)

View File

@ -4,6 +4,7 @@ import (
"context"
"errors"
"fmt"
"runtime/debug"
"sync"
"sync/atomic"
"time"
@ -284,6 +285,7 @@ func (q *queue) run(ctx context.Context, t Task) (task.Status, error) {
// handle panic issue
defer func() {
if p := recover(); p != nil {
q.logger.Error("panic in queue %q: %s", q.name, debug.Stack())
panicChan <- p
}
}()

View File

@ -30,6 +30,7 @@ const (
SiteVersionHeader = constants.CrHeaderPrefix + "Version"
SiteIDHeader = constants.CrHeaderPrefix + "Site-Id"
SlaveNodeIDHeader = constants.CrHeaderPrefix + "Node-Id"
ClientIDHeader = constants.CrHeaderPrefix + "Client-Id"
LocalIP = "localhost"
)

View File

@ -2,13 +2,14 @@ package setting
import (
"context"
"os"
"strings"
"github.com/cloudreve/Cloudreve/v4/inventory"
"github.com/cloudreve/Cloudreve/v4/pkg/cache"
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
"github.com/samber/lo"
"os"
"strings"
)
const (

View File

@ -2,6 +2,7 @@ package setting
import (
"context"
"encoding/base64"
"encoding/json"
"fmt"
"net/url"
@ -10,7 +11,6 @@ import (
"time"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/auth/requestinfo"
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
)
@ -208,6 +208,14 @@ type (
CustomHTML(ctx context.Context) *CustomHTML
// FFMpegExtraArgs returns the extra arguments of ffmpeg thumb generator.
FFMpegExtraArgs(ctx context.Context) string
// MasterEncryptKey returns the master encrypt key.
MasterEncryptKey(ctx context.Context) []byte
// MasterEncryptKeyVault returns the master encrypt key vault type.
MasterEncryptKeyVault(ctx context.Context) MasterEncryptKeyVaultType
// MasterEncryptKeyFile returns the master encrypt key file.
MasterEncryptKeyFile(ctx context.Context) string
// ShowEncryptionStatus returns true if encryption status is shown.
ShowEncryptionStatus(ctx context.Context) bool
}
UseFirstSiteUrlCtxKey = struct{}
)
@ -235,6 +243,27 @@ type (
}
)
func (s *settingProvider) ShowEncryptionStatus(ctx context.Context) bool {
return s.getBoolean(ctx, "show_encryption_status", true)
}
func (s *settingProvider) MasterEncryptKeyFile(ctx context.Context) string {
return s.getString(ctx, "encrypt_master_key_file", "")
}
func (s *settingProvider) MasterEncryptKeyVault(ctx context.Context) MasterEncryptKeyVaultType {
return MasterEncryptKeyVaultType(s.getString(ctx, "encrypt_master_key_vault", "setting"))
}
func (s *settingProvider) MasterEncryptKey(ctx context.Context) []byte {
encoded := s.getString(ctx, "encrypt_master_key", "")
key, err := base64.StdEncoding.DecodeString(encoded)
if err != nil {
return nil
}
return key
}
func (s *settingProvider) CustomHTML(ctx context.Context) *CustomHTML {
return &CustomHTML{
HeadlessFooter: s.getString(ctx, "headless_footer_html", ""),
@ -481,7 +510,7 @@ func (s *settingProvider) ThumbEncode(ctx context.Context) *ThumbEncode {
}
func (s *settingProvider) ThumbEntitySuffix(ctx context.Context) string {
return s.getString(ctx, "thumb_entity_suffix", "._thumb")
return s.getString(ctx, "thumb_entity_suffix", "{blob_path}/{blob_name}._thumb")
}
func (s *settingProvider) ThumbSlaveSidecarSuffix(ctx context.Context) string {

View File

@ -223,3 +223,11 @@ type CustomHTML struct {
HeadlessBody string `json:"headless_bottom,omitempty"`
SidebarBottom string `json:"sidebar_bottom,omitempty"`
}
type MasterEncryptKeyVaultType string
const (
MasterEncryptKeyVaultTypeSetting = MasterEncryptKeyVaultType("setting")
MasterEncryptKeyVaultTypeEnv = MasterEncryptKeyVaultType("env")
MasterEncryptKeyVaultTypeFile = MasterEncryptKeyVaultType("file")
)

View File

@ -9,7 +9,6 @@ import (
"strings"
"time"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/manager/entitysource"
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
@ -51,10 +50,17 @@ func (f *FfmpegGenerator) Generate(ctx context.Context, es entitysource.EntitySo
input := ""
expire := time.Now().Add(urlTimeout)
if es.IsLocal() {
if es.IsLocal() && !es.Entity().Encrypted() {
input = es.LocalPath(ctx)
} else {
src, err := es.Url(driver.WithForcePublicEndpoint(ctx, false), entitysource.WithNoInternalProxy(), entitysource.WithContext(ctx), entitysource.WithExpire(&expire))
opts := []entitysource.EntitySourceOption{
entitysource.WithContext(ctx),
entitysource.WithExpire(&expire),
}
if !es.Entity().Encrypted() {
opts = append(opts, entitysource.WithNoInternalProxy())
}
src, err := es.Url(ctx, opts...)
if err != nil {
return &Result{Path: tempOutputPath}, fmt.Errorf("failed to get entity url: %w", err)
}

View File

@ -42,7 +42,7 @@ func (l *LibreOfficeGenerator) Generate(ctx context.Context, es entitysource.Ent
)
tempInputPath := ""
if es.IsLocal() {
if es.IsLocal() && !es.Entity().Encrypted() {
tempInputPath = es.LocalPath(ctx)
} else {
// If not local policy files, download to temp folder

View File

@ -46,7 +46,7 @@ func (v *VipsGenerator) Generate(ctx context.Context, es entitysource.EntitySour
usePipe := true
if runtime.GOOS == "windows" {
// Pipe IO is not working on Windows for VIPS
if es.IsLocal() {
if es.IsLocal() && !es.Entity().Encrypted() {
// escape [ and ] in file name
input = fmt.Sprintf("[filename=\"%s\"]", es.LocalPath(ctx))
usePipe = false

View File

@ -3,12 +3,17 @@ package util
import (
"context"
"fmt"
"github.com/gin-gonic/gin"
"math/rand"
"path"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
"unicode/utf8"
"github.com/gin-gonic/gin"
"github.com/gofrs/uuid"
)
func init() {
@ -95,6 +100,80 @@ func Replace(table map[string]string, s string) string {
return s
}
// ReplaceMagicVar 动态替换字符串中的魔法变量
func ReplaceMagicVar(rawString string, fsSeparator string, pathAvailable bool, blobAvailable bool,
timeConst time.Time, userId int, originName string, originPath string, completeBlobPath string) string {
re := regexp.MustCompile(`\{[^{}]+\}`)
return re.ReplaceAllStringFunc(rawString, func(match string) string {
switch match {
case "{randomkey16}":
return RandStringRunes(16)
case "{randomkey8}":
return RandStringRunes(8)
case "{timestamp}":
return strconv.FormatInt(timeConst.Unix(), 10)
case "{timestamp_nano}":
return strconv.FormatInt(timeConst.UnixNano(), 10)
case "{randomnum2}":
return strconv.Itoa(rand.Intn(2))
case "{randomnum3}":
return strconv.Itoa(rand.Intn(3))
case "{randomnum4}":
return strconv.Itoa(rand.Intn(4))
case "{randomnum8}":
return strconv.Itoa(rand.Intn(8))
case "{uid}":
return strconv.Itoa(userId)
case "{datetime}":
return timeConst.Format("20060102150405")
case "{date}":
return timeConst.Format("20060102")
case "{year}":
return timeConst.Format("2006")
case "{month}":
return timeConst.Format("01")
case "{day}":
return timeConst.Format("02")
case "{hour}":
return timeConst.Format("15")
case "{minute}":
return timeConst.Format("04")
case "{second}":
return timeConst.Format("05")
case "{uuid}":
return uuid.Must(uuid.NewV4()).String()
case "{ext}":
return filepath.Ext(originName)
case "{originname}":
return originName
case "{originname_without_ext}":
return strings.TrimSuffix(originName, filepath.Ext(originName))
case "{path}":
if pathAvailable {
return originPath + fsSeparator
}
return match
case "{blob_name}":
if blobAvailable {
return filepath.Base(completeBlobPath)
}
return match
case "{blob_name_without_ext}":
if blobAvailable {
return strings.TrimSuffix(filepath.Base(completeBlobPath), filepath.Ext(completeBlobPath))
}
return match
case "{blob_path}":
if blobAvailable {
return path.Dir(completeBlobPath) + fsSeparator
}
return match
default:
return match
}
})
}
// BuildRegexp 构建用于SQL查询用的多条件正则
func BuildRegexp(search []string, prefix, suffix, condition string) string {
var res string

View File

@ -212,7 +212,13 @@ func handleMkcol(c *gin.Context, user *ent.User, fm manager.FileManager) (status
_, err = fm.Create(ctx, uri, types.FileTypeFolder, dbfs.WithNoChainedCreation(), dbfs.WithErrorOnConflict())
if err != nil {
return purposeStatusCodeFromError(err), err
code := purposeStatusCodeFromError(err)
if code == http.StatusNotFound {
// When the MKCOL operation creates a new collection resource, all ancestors MUST already exist,
// or the method MUST fail with a 409 (Conflict) status code.
return http.StatusConflict, err
}
return code, err
}
return http.StatusCreated, nil

View File

@ -426,3 +426,13 @@ func ListArchiveFiles(c *gin.Context) {
Data: resp,
})
}
func HandleExplorerEventsPush(c *gin.Context) {
service := ParametersFromContext[*explorer.ExplorerEventService](c, explorer.ExplorerEventParamCtx{})
err := service.HandleExplorerEventsPush(c)
if err != nil {
c.JSON(200, serializer.Err(c, err))
c.Abort()
return
}
}

View File

@ -3,7 +3,6 @@ package routers
import (
"net/http"
"github.com/abslant/gzip"
"github.com/cloudreve/Cloudreve/v4/application/constants"
"github.com/cloudreve/Cloudreve/v4/application/dependency"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
@ -24,6 +23,7 @@ import (
sharesvc "github.com/cloudreve/Cloudreve/v4/service/share"
usersvc "github.com/cloudreve/Cloudreve/v4/service/user"
"github.com/gin-contrib/cors"
"github.com/gin-contrib/gzip"
"github.com/gin-gonic/gin"
)
@ -206,9 +206,9 @@ func initMasterRouter(dep dependency.Dep) *gin.Engine {
/*
静态资源
*/
r.Use(gzip.GzipHandler()) // Done
r.Use(middleware.FrontendFileHandler(dep)) // Done
r.GET("manifest.json", controllers.Manifest) // Done
r.Use(gzip.Gzip(gzip.DefaultCompression, gzip.WithExcludedPaths([]string{"/api/"})))
r.Use(middleware.FrontendFileHandler(dep))
r.GET("manifest.json", controllers.Manifest)
noAuth := r.Group(constants.APIPrefix)
wopi := noAuth.Group("file/wopi", middleware.HashID(hashid.FileID), middleware.ViewerSessionValidation())
@ -727,6 +727,13 @@ func initMasterRouter(dep dependency.Dep) *gin.Engine {
controllers.FromJSON[explorer.PatchViewService](explorer.PatchViewParameterCtx{}),
controllers.PatchView,
)
// Server event push
file.GET("events",
middleware.LoginRequired(),
controllers.FromQuery[explorer.ExplorerEventService](explorer.ExplorerEventParamCtx{}),
controllers.HandleExplorerEventsPush,
)
}
// 分享相关

View File

@ -347,7 +347,7 @@ func (s *SingleFileService) Url(c *gin.Context) (string, error) {
}
es := entitysource.NewEntitySource(fs.NewEntity(primaryEntity), driver, policy, dep.GeneralAuth(),
dep.SettingProvider(), dep.HashIDEncoder(), dep.RequestClient(), dep.Logger(), dep.ConfigProvider(), dep.MimeDetector(ctx))
dep.SettingProvider(), dep.HashIDEncoder(), dep.RequestClient(), dep.Logger(), dep.ConfigProvider(), dep.MimeDetector(ctx), dep.EncryptorFactory(ctx))
expire := time.Now().Add(time.Hour * 1)
url, err := es.Url(ctx, entitysource.WithExpire(&expire), entitysource.WithDisplayName(file.Name))
@ -547,7 +547,7 @@ func (s *SingleEntityService) Url(c *gin.Context) (string, error) {
}
es := entitysource.NewEntitySource(fs.NewEntity(entity), driver, policy, dep.GeneralAuth(),
dep.SettingProvider(), dep.HashIDEncoder(), dep.RequestClient(), dep.Logger(), dep.ConfigProvider(), dep.MimeDetector(c))
dep.SettingProvider(), dep.HashIDEncoder(), dep.RequestClient(), dep.Logger(), dep.ConfigProvider(), dep.MimeDetector(c), dep.EncryptorFactory(c))
expire := time.Now().Add(time.Hour * 1)
url, err := es.Url(c, entitysource.WithDownload(true), entitysource.WithExpire(&expire), entitysource.WithDisplayName(path.Base(entity.Source)))

View File

@ -193,7 +193,8 @@ type (
func (s *GetSettingService) GetSetting(c *gin.Context) (map[string]string, error) {
dep := dependency.FromContext(c)
res, err := dep.SettingClient().Gets(c, lo.Filter(s.Keys, func(item string, index int) bool {
return item != "secret_key"
_, ok := inventory.RedactedSettings[strings.ToLower(item)]
return !ok
}))
if err != nil {
return nil, serializer.NewError(serializer.CodeDBError, "Failed to get settings", err)

View File

@ -43,16 +43,17 @@ type SiteConfig struct {
PrivacyPolicyUrl string `json:"privacy_policy_url,omitempty"`
// Explorer section
Icons string `json:"icons,omitempty"`
EmojiPreset string `json:"emoji_preset,omitempty"`
MapProvider setting.MapProvider `json:"map_provider,omitempty"`
GoogleMapTileType setting.MapGoogleTileType `json:"google_map_tile_type,omitempty"`
MapboxAK string `json:"mapbox_ak,omitempty"`
FileViewers []types.ViewerGroup `json:"file_viewers,omitempty"`
MaxBatchSize int `json:"max_batch_size,omitempty"`
ThumbnailWidth int `json:"thumbnail_width,omitempty"`
ThumbnailHeight int `json:"thumbnail_height,omitempty"`
CustomProps []types.CustomProps `json:"custom_props,omitempty"`
Icons string `json:"icons,omitempty"`
EmojiPreset string `json:"emoji_preset,omitempty"`
MapProvider setting.MapProvider `json:"map_provider,omitempty"`
GoogleMapTileType setting.MapGoogleTileType `json:"google_map_tile_type,omitempty"`
MapboxAK string `json:"mapbox_ak,omitempty"`
FileViewers []types.ViewerGroup `json:"file_viewers,omitempty"`
MaxBatchSize int `json:"max_batch_size,omitempty"`
ThumbnailWidth int `json:"thumbnail_width,omitempty"`
ThumbnailHeight int `json:"thumbnail_height,omitempty"`
CustomProps []types.CustomProps `json:"custom_props,omitempty"`
ShowEncryptionStatus bool `json:"show_encryption_status,omitempty"`
// Thumbnail section
ThumbExts []string `json:"thumb_exts,omitempty"`
@ -100,6 +101,7 @@ func (s *GetSettingService) GetSiteConfig(c *gin.Context) (*SiteConfig, error) {
fileViewers := settings.FileViewers(c)
customProps := settings.CustomProps(c)
maxBatchSize := settings.MaxBatchedFile(c)
showEncryptionStatus := settings.ShowEncryptionStatus(c)
w, h := settings.ThumbSize(c)
for i := range fileViewers {
for j := range fileViewers[i].Viewers {
@ -107,15 +109,16 @@ func (s *GetSettingService) GetSiteConfig(c *gin.Context) (*SiteConfig, error) {
}
}
return &SiteConfig{
MaxBatchSize: maxBatchSize,
FileViewers: fileViewers,
Icons: explorerSettings.Icons,
MapProvider: mapSettings.Provider,
GoogleMapTileType: mapSettings.GoogleTileType,
MapboxAK: mapSettings.MapboxAK,
ThumbnailWidth: w,
ThumbnailHeight: h,
CustomProps: customProps,
MaxBatchSize: maxBatchSize,
FileViewers: fileViewers,
Icons: explorerSettings.Icons,
MapProvider: mapSettings.Provider,
GoogleMapTileType: mapSettings.GoogleTileType,
MapboxAK: mapSettings.MapboxAK,
ThumbnailWidth: w,
ThumbnailHeight: h,
CustomProps: customProps,
ShowEncryptionStatus: showEncryptionStatus,
}, nil
case "emojis":
emojis := settings.EmojiPresets(c)

105
service/explorer/events.go Normal file
View File

@ -0,0 +1,105 @@
package explorer
import (
"time"
"github.com/cloudreve/Cloudreve/v4/application/dependency"
"github.com/cloudreve/Cloudreve/v4/inventory"
"github.com/cloudreve/Cloudreve/v4/pkg/auth/requestinfo"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/manager"
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
"github.com/gin-gonic/gin"
"github.com/gofrs/uuid"
)
type (
ExplorerEventService struct {
Uri string `form:"uri" binding:"required"`
}
ExplorerEventParamCtx struct{}
)
func (s *ExplorerEventService) HandleExplorerEventsPush(c *gin.Context) error {
dep := dependency.FromContext(c)
user := inventory.UserFromContext(c)
m := manager.NewFileManager(dep, user)
l := logging.FromContext(c)
defer m.Recycle()
uri, err := fs.NewUriFromString(s.Uri)
if err != nil {
return serializer.NewError(serializer.CodeParamErr, "Unknown uri", err)
}
// Make sure target is a valid folder that the user can listen to
parent, _, err := m.List(c, uri, &manager.ListArgs{
Page: 0,
PageSize: 1,
})
if err != nil {
return serializer.NewError(serializer.CodeParamErr, "Requested uri not available", err)
}
requestInfo := requestinfo.RequestInfoFromContext(c)
if requestInfo.ClientID == "" {
return serializer.NewError(serializer.CodeParamErr, "Client ID is required", nil)
}
// Client ID must be a valid UUID
if _, err := uuid.FromString(requestInfo.ClientID); err != nil {
return serializer.NewError(serializer.CodeParamErr, "Invalid client ID", err)
}
// Subscribe
eventHub := dep.EventHub()
rx, resumed, err := eventHub.Subscribe(c, parent.ID(), requestInfo.ClientID)
if err != nil {
return serializer.NewError(serializer.CodeInternalSetting, "Failed to subscribe to events", err)
}
// SSE Headers
c.Writer.Header().Set("Content-Type", "text/event-stream")
c.Writer.Header().Set("Cache-Control", "no-cache")
c.Writer.Header().Set("Connection", "keep-alive")
c.Writer.Header().Set("X-Accel-Buffering", "no")
keepAliveTicker := time.NewTicker(30 * time.Second)
defer keepAliveTicker.Stop()
if resumed {
c.SSEvent("resumed", nil)
c.Writer.Flush()
} else {
c.SSEvent("subscribed", nil)
c.Writer.Flush()
}
for {
select {
// TODO: close connection after access token expired
case <-c.Request.Context().Done():
// Server shutdown or request cancelled
eventHub.Unsubscribe(c, parent.ID(), requestInfo.ClientID)
l.Debug("Request context done, unsubscribed from event hub")
return nil
case <-c.Writer.CloseNotify():
eventHub.Unsubscribe(c, parent.ID(), requestInfo.ClientID)
l.Debug("Unsubscribed from event hub")
return nil
case evt, ok := <-rx:
if !ok {
// Channel closed, EventHub is shutting down
l.Debug("Event hub closed, disconnecting client")
return nil
}
c.SSEvent("event", evt)
l.Debug("Event sent: %+v", evt)
c.Writer.Flush()
case <-keepAliveTicker.C:
c.SSEvent("keep-alive", nil)
c.Writer.Flush()
}
}
}

View File

@ -126,37 +126,49 @@ func BuildTaskResponse(task queue.Task, node *ent.Node, hasher hashid.Encoder) *
}
type UploadSessionResponse struct {
SessionID string `json:"session_id"`
UploadID string `json:"upload_id"`
ChunkSize int64 `json:"chunk_size"` // 分块大小0 为部分快
Expires int64 `json:"expires"` // 上传凭证过期时间, Unix 时间戳
UploadURLs []string `json:"upload_urls,omitempty"`
Credential string `json:"credential,omitempty"`
AccessKey string `json:"ak,omitempty"`
KeyTime string `json:"keyTime,omitempty"` // COS用有效期
CompleteURL string `json:"completeURL,omitempty"`
StoragePolicy *StoragePolicy `json:"storage_policy,omitempty"`
Uri string `json:"uri"`
CallbackSecret string `json:"callback_secret"`
MimeType string `json:"mime_type,omitempty"`
UploadPolicy string `json:"upload_policy,omitempty"`
SessionID string `json:"session_id"`
UploadID string `json:"upload_id"`
ChunkSize int64 `json:"chunk_size"` // 分块大小0 为部分快
Expires int64 `json:"expires"` // 上传凭证过期时间, Unix 时间戳
UploadURLs []string `json:"upload_urls,omitempty"`
Credential string `json:"credential,omitempty"`
AccessKey string `json:"ak,omitempty"`
KeyTime string `json:"keyTime,omitempty"` // COS用有效期
CompleteURL string `json:"completeURL,omitempty"`
StoragePolicy *StoragePolicy `json:"storage_policy,omitempty"`
Uri string `json:"uri"`
CallbackSecret string `json:"callback_secret"`
MimeType string `json:"mime_type,omitempty"`
UploadPolicy string `json:"upload_policy,omitempty"`
EncryptMetadata *types.EncryptMetadata `json:"encrypt_metadata,omitempty"`
}
func BuildUploadSessionResponse(session *fs.UploadCredential, hasher hashid.Encoder) *UploadSessionResponse {
return &UploadSessionResponse{
SessionID: session.SessionID,
ChunkSize: session.ChunkSize,
Expires: session.Expires,
UploadURLs: session.UploadURLs,
Credential: session.Credential,
CompleteURL: session.CompleteURL,
Uri: session.Uri,
UploadID: session.UploadID,
StoragePolicy: BuildStoragePolicy(session.StoragePolicy, hasher),
CallbackSecret: session.CallbackSecret,
MimeType: session.MimeType,
UploadPolicy: session.UploadPolicy,
res := &UploadSessionResponse{
SessionID: session.SessionID,
ChunkSize: session.ChunkSize,
Expires: session.Expires,
UploadURLs: session.UploadURLs,
Credential: session.Credential,
CompleteURL: session.CompleteURL,
Uri: session.Uri,
UploadID: session.UploadID,
StoragePolicy: BuildStoragePolicy(session.StoragePolicy, hasher),
CallbackSecret: session.CallbackSecret,
MimeType: session.MimeType,
UploadPolicy: session.UploadPolicy,
EncryptMetadata: session.EncryptMetadata,
}
if session.EncryptMetadata != nil {
res.EncryptMetadata = &types.EncryptMetadata{
Algorithm: session.EncryptMetadata.Algorithm,
KeyPlainText: session.EncryptMetadata.KeyPlainText,
IV: session.EncryptMetadata.IV,
}
}
return res
}
// WopiFileInfo Response for `CheckFileInfo`
@ -270,6 +282,7 @@ type StoragePolicy struct {
MaxSize int64 `json:"max_size"`
Relay bool `json:"relay,omitempty"`
ChunkConcurrency int `json:"chunk_concurrency,omitempty"`
Encryption bool `json:"encryption,omitempty"`
}
type Entity struct {
@ -279,6 +292,7 @@ type Entity struct {
CreatedAt time.Time `json:"created_at"`
StoragePolicy *StoragePolicy `json:"storage_policy,omitempty"`
CreatedBy *user.User `json:"created_by,omitempty"`
EncryptedWith types.Cipher `json:"encrypted_with,omitempty"`
}
type Share struct {
@ -439,6 +453,12 @@ func BuildEntity(extendedInfo *fs.FileExtendedInfo, e fs.Entity, hasher hashid.E
userRedacted := user.BuildUserRedacted(e.CreatedBy(), user.RedactLevelAnonymous, hasher)
u = &userRedacted
}
encryptedWith := types.Cipher("")
if e.Encrypted() {
encryptedWith = e.Props().EncryptMetadata.Algorithm
}
return Entity{
ID: hashid.EncodeEntityID(hasher, e.ID()),
Type: e.Type(),
@ -446,6 +466,7 @@ func BuildEntity(extendedInfo *fs.FileExtendedInfo, e fs.Entity, hasher hashid.E
StoragePolicy: BuildStoragePolicy(extendedInfo.EntityStoragePolicies[e.PolicyID()], hasher),
Size: e.Size(),
CreatedBy: u,
EncryptedWith: encryptedWith,
}
}
@ -469,6 +490,7 @@ func BuildStoragePolicy(sp *ent.StoragePolicy, hasher hashid.Encoder) *StoragePo
MaxSize: sp.MaxSize,
Relay: sp.Settings.Relay,
ChunkConcurrency: sp.Settings.ChunkConcurrency,
Encryption: sp.Settings.Encryption,
}
if sp.Settings.IsFileTypeDenyList {

View File

@ -3,6 +3,9 @@ package explorer
import (
"context"
"fmt"
"strconv"
"time"
"github.com/cloudreve/Cloudreve/v4/application/dependency"
"github.com/cloudreve/Cloudreve/v4/inventory"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
@ -13,21 +16,21 @@ import (
"github.com/cloudreve/Cloudreve/v4/pkg/request"
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
"github.com/gin-gonic/gin"
"strconv"
"time"
)
// CreateUploadSessionService 获取上传凭证服务
type (
CreateUploadSessionParameterCtx struct{}
CreateUploadSessionService struct {
Uri string `json:"uri" binding:"required"`
Size int64 `json:"size" binding:"min=0"`
LastModified int64 `json:"last_modified"`
MimeType string `json:"mime_type"`
PolicyID string `json:"policy_id"`
Metadata map[string]string `json:"metadata" binding:"max=256"`
EntityType string `json:"entity_type" binding:"eq=|eq=live_photo|eq=version"`
Uri string `json:"uri" binding:"required"`
Size int64 `json:"size" binding:"min=0"`
LastModified int64 `json:"last_modified"`
MimeType string `json:"mime_type"`
PolicyID string `json:"policy_id"`
Metadata map[string]string `json:"metadata" binding:"max=256"`
EntityType string `json:"entity_type" binding:"eq=|eq=live_photo|eq=version"`
EncryptionSupported []types.Cipher `json:"encryption_supported"`
Previous string `form:"previous"`
}
)
@ -54,9 +57,12 @@ func (service *CreateUploadSessionService) Create(c context.Context) (*UploadSes
}
hasher := dep.HashIDEncoder()
policyId, err := hasher.Decode(service.PolicyID, hashid.PolicyID)
if err != nil {
return nil, serializer.NewError(serializer.CodeParamErr, "unknown policy id", err)
policyId := 0
if service.PolicyID != "" {
policyId, err = hasher.Decode(service.PolicyID, hashid.PolicyID)
if err != nil {
return nil, serializer.NewError(serializer.CodeParamErr, "unknown policy id", err)
}
}
uploadRequest := &fs.UploadRequest{
@ -64,10 +70,13 @@ func (service *CreateUploadSessionService) Create(c context.Context) (*UploadSes
Uri: uri,
Size: service.Size,
PreviousVersion: service.Previous,
MimeType: service.MimeType,
Metadata: service.Metadata,
EntityType: entityType,
PreferredStoragePolicy: policyId,
EncryptionSupported: service.EncryptionSupported,
ClientSideEncrypted: len(service.EncryptionSupported) > 0,
},
}
@ -133,6 +142,7 @@ func (service *UploadService) SlaveUpload(c *gin.Context) error {
}
uploadSession := uploadSessionRaw.(fs.UploadSession)
uploadSession.Props.ClientSideEncrypted = true
// Parse chunk index from query
service.Index, _ = strconv.Atoi(c.Query("chunk"))
@ -175,7 +185,7 @@ func processChunkUpload(c *gin.Context, m manager.FileManager, session *fs.Uploa
// 执行上传
ctx := context.WithValue(c, cluster.SlaveNodeIDCtx{}, strconv.Itoa(session.Policy.NodeID))
err = m.Upload(ctx, req, session.Policy)
err = m.Upload(ctx, req, session.Policy, session)
if err != nil {
return err
}