mirror of
https://github.com/cloudreve/Cloudreve.git
synced 2025-12-26 08:32:59 +00:00
Compare commits
No commits in common. "master" and "4.6.0" have entirely different histories.
|
|
@ -38,12 +38,12 @@
|
|||
|
||||
## :sparkles: Features
|
||||
|
||||
- :cloud: Support storing files into Local, Remote node, OneDrive, S3 compatible API, Qiniu Kodo, Aliyun OSS, Tencent COS, Huawei Cloud OBS, Kingsoft Cloud KS3, Upyun.
|
||||
- :cloud: Support storing files into Local, Remote node, OneDrive, S3 compatible API, Qiniu, Aliyun OSS, Tencent COS, Upyun.
|
||||
- :outbox_tray: Upload/Download in directly transmission from client to storage providers.
|
||||
- 💾 Integrate with Aria2/qBittorrent to download files in background, use multiple download nodes to share the load.
|
||||
- 📚 Compress/Extract/Preview archived files, download files in batch.
|
||||
- 📚 Compress/Extract files, download files in batch.
|
||||
- 💻 WebDAV support covering all storage providers.
|
||||
- :zap:Drag&Drop to upload files or folders, with parallel resumable upload support.
|
||||
- :zap:Drag&Drop to upload files or folders, with resumable upload support.
|
||||
- :card_file_box: Extract media metadata from files, search files by metadata or tags.
|
||||
- :family_woman_girl_boy: Multi-users with multi-groups.
|
||||
- :link: Create share links for files and folders with expiration date.
|
||||
|
|
|
|||
|
|
@ -39,12 +39,12 @@
|
|||
|
||||
## :sparkles: 特性
|
||||
|
||||
- :cloud: 支持本机、从机、七牛 Kodo、阿里云 OSS、腾讯云 COS、华为云 OBS、金山云 KS3、又拍云、OneDrive (包括世纪互联版) 、S3 兼容协议 作为存储端
|
||||
- :cloud: 支持本机、从机、七牛、阿里云 OSS、腾讯云 COS、华为云 OBS、又拍云、OneDrive (包括世纪互联版) 、S3 兼容协议 作为存储端
|
||||
- :outbox_tray: 上传/下载 支持客户端直传,支持下载限速
|
||||
- 💾 可对接 Aria2/qBittorrent 离线下载,可使用多个从机节点分担下载任务
|
||||
- 📚 在线 压缩/解压缩/压缩包预览、多文件打包下载
|
||||
- 💾 可对接 Aria2 离线下载,可使用多个从机节点分担下载任务
|
||||
- 📚 在线 压缩/解压缩、多文件打包下载
|
||||
- 💻 覆盖全部存储策略的 WebDAV 协议支持
|
||||
- :zap: 拖拽上传、目录上传、并行分片上传
|
||||
- :zap: 拖拽上传、目录上传、分片上传
|
||||
- :card_file_box: 提取媒体元数据,通过元数据或标签搜索文件
|
||||
- :family_woman_girl_boy: 多用户、用户组、多存储策略
|
||||
- :link: 创建文件、目录的分享链接,可设定自动过期
|
||||
|
|
|
|||
|
|
@ -178,8 +178,6 @@ func (s *server) Close() {
|
|||
defer cancel()
|
||||
}
|
||||
|
||||
s.dep.EventHub().Close()
|
||||
|
||||
// Shutdown http server
|
||||
if s.server != nil {
|
||||
err := s.server.Shutdown(ctx)
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ package constants
|
|||
// These values will be injected at build time, DO NOT EDIT.
|
||||
|
||||
// BackendVersion 当前后端版本号
|
||||
var BackendVersion = "4.7.0"
|
||||
var BackendVersion = "4.1.0"
|
||||
|
||||
// IsPro 是否为Pro版本
|
||||
var IsPro = "false"
|
||||
|
|
|
|||
|
|
@ -17,8 +17,6 @@ import (
|
|||
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/credmanager"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/email"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/encrypt"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/eventhub"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/mime"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/lock"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
|
||||
|
|
@ -131,63 +129,55 @@ type Dep interface {
|
|||
WebAuthn(ctx context.Context) (*webauthn.WebAuthn, error)
|
||||
// UAParser Get a singleton uaparser.Parser instance for user agent parsing.
|
||||
UAParser() *uaparser.Parser
|
||||
// MasterEncryptKeyVault Get a singleton encrypt.MasterEncryptKeyVault instance for master encrypt key vault.
|
||||
MasterEncryptKeyVault(ctx context.Context) encrypt.MasterEncryptKeyVault
|
||||
// EncryptorFactory Get a new encrypt.CryptorFactory instance.
|
||||
EncryptorFactory(ctx context.Context) encrypt.CryptorFactory
|
||||
// EventHub Get a singleton eventhub.EventHub instance for event publishing.
|
||||
EventHub() eventhub.EventHub
|
||||
}
|
||||
|
||||
type dependency struct {
|
||||
configProvider conf.ConfigProvider
|
||||
logger logging.Logger
|
||||
statics iofs.FS
|
||||
serverStaticFS static.ServeFileSystem
|
||||
dbClient *ent.Client
|
||||
rawEntClient *ent.Client
|
||||
kv cache.Driver
|
||||
navigatorStateKv cache.Driver
|
||||
settingClient inventory.SettingClient
|
||||
fileClient inventory.FileClient
|
||||
shareClient inventory.ShareClient
|
||||
settingProvider setting.Provider
|
||||
userClient inventory.UserClient
|
||||
groupClient inventory.GroupClient
|
||||
storagePolicyClient inventory.StoragePolicyClient
|
||||
taskClient inventory.TaskClient
|
||||
nodeClient inventory.NodeClient
|
||||
davAccountClient inventory.DavAccountClient
|
||||
directLinkClient inventory.DirectLinkClient
|
||||
fsEventClient inventory.FsEventClient
|
||||
emailClient email.Driver
|
||||
generalAuth auth.Auth
|
||||
hashidEncoder hashid.Encoder
|
||||
tokenAuth auth.TokenAuth
|
||||
lockSystem lock.LockSystem
|
||||
requestClient request.Client
|
||||
ioIntenseQueue queue.Queue
|
||||
thumbQueue queue.Queue
|
||||
mediaMetaQueue queue.Queue
|
||||
entityRecycleQueue queue.Queue
|
||||
slaveQueue queue.Queue
|
||||
remoteDownloadQueue queue.Queue
|
||||
ioIntenseQueueTask queue.Task
|
||||
mediaMeta mediameta.Extractor
|
||||
thumbPipeline thumb.Generator
|
||||
mimeDetector mime.MimeDetector
|
||||
credManager credmanager.CredManager
|
||||
nodePool cluster.NodePool
|
||||
taskRegistry queue.TaskRegistry
|
||||
webauthn *webauthn.WebAuthn
|
||||
parser *uaparser.Parser
|
||||
cron *cron.Cron
|
||||
masterEncryptKeyVault encrypt.MasterEncryptKeyVault
|
||||
eventHub eventhub.EventHub
|
||||
configProvider conf.ConfigProvider
|
||||
logger logging.Logger
|
||||
statics iofs.FS
|
||||
serverStaticFS static.ServeFileSystem
|
||||
dbClient *ent.Client
|
||||
rawEntClient *ent.Client
|
||||
kv cache.Driver
|
||||
navigatorStateKv cache.Driver
|
||||
settingClient inventory.SettingClient
|
||||
fileClient inventory.FileClient
|
||||
shareClient inventory.ShareClient
|
||||
settingProvider setting.Provider
|
||||
userClient inventory.UserClient
|
||||
groupClient inventory.GroupClient
|
||||
storagePolicyClient inventory.StoragePolicyClient
|
||||
taskClient inventory.TaskClient
|
||||
nodeClient inventory.NodeClient
|
||||
davAccountClient inventory.DavAccountClient
|
||||
directLinkClient inventory.DirectLinkClient
|
||||
emailClient email.Driver
|
||||
generalAuth auth.Auth
|
||||
hashidEncoder hashid.Encoder
|
||||
tokenAuth auth.TokenAuth
|
||||
lockSystem lock.LockSystem
|
||||
requestClient request.Client
|
||||
ioIntenseQueue queue.Queue
|
||||
thumbQueue queue.Queue
|
||||
mediaMetaQueue queue.Queue
|
||||
entityRecycleQueue queue.Queue
|
||||
slaveQueue queue.Queue
|
||||
remoteDownloadQueue queue.Queue
|
||||
ioIntenseQueueTask queue.Task
|
||||
mediaMeta mediameta.Extractor
|
||||
thumbPipeline thumb.Generator
|
||||
mimeDetector mime.MimeDetector
|
||||
credManager credmanager.CredManager
|
||||
nodePool cluster.NodePool
|
||||
taskRegistry queue.TaskRegistry
|
||||
webauthn *webauthn.WebAuthn
|
||||
parser *uaparser.Parser
|
||||
cron *cron.Cron
|
||||
|
||||
configPath string
|
||||
isPro bool
|
||||
requiredDbVersion string
|
||||
licenseKey string
|
||||
|
||||
// Protects inner deps that can be reloaded at runtime.
|
||||
mu sync.Mutex
|
||||
|
|
@ -216,19 +206,6 @@ func (d *dependency) RequestClient(opts ...request.Option) request.Client {
|
|||
return request.NewClient(d.ConfigProvider(), opts...)
|
||||
}
|
||||
|
||||
func (d *dependency) MasterEncryptKeyVault(ctx context.Context) encrypt.MasterEncryptKeyVault {
|
||||
if d.masterEncryptKeyVault != nil {
|
||||
return d.masterEncryptKeyVault
|
||||
}
|
||||
|
||||
d.masterEncryptKeyVault = encrypt.NewMasterEncryptKeyVault(ctx, d.SettingProvider())
|
||||
return d.masterEncryptKeyVault
|
||||
}
|
||||
|
||||
func (d *dependency) EncryptorFactory(ctx context.Context) encrypt.CryptorFactory {
|
||||
return encrypt.NewCryptorFactory(d.MasterEncryptKeyVault(ctx))
|
||||
}
|
||||
|
||||
func (d *dependency) WebAuthn(ctx context.Context) (*webauthn.WebAuthn, error) {
|
||||
if d.webauthn != nil {
|
||||
return d.webauthn, nil
|
||||
|
|
@ -369,21 +346,6 @@ func (d *dependency) NavigatorStateKV() cache.Driver {
|
|||
return d.navigatorStateKv
|
||||
}
|
||||
|
||||
func (d *dependency) EventHub() eventhub.EventHub {
|
||||
if d.eventHub != nil {
|
||||
return d.eventHub
|
||||
}
|
||||
d.eventHub = eventhub.NewEventHub(d.UserClient(), d.FsEventClient())
|
||||
return d.eventHub
|
||||
}
|
||||
|
||||
func (d *dependency) FsEventClient() inventory.FsEventClient {
|
||||
if d.fsEventClient != nil {
|
||||
return d.fsEventClient
|
||||
}
|
||||
return inventory.NewFsEventClient(d.DBClient(), d.ConfigProvider().Database().Type)
|
||||
}
|
||||
|
||||
func (d *dependency) SettingClient() inventory.SettingClient {
|
||||
if d.settingClient != nil {
|
||||
return d.settingClient
|
||||
|
|
@ -505,7 +467,7 @@ func (d *dependency) MediaMetaExtractor(ctx context.Context) mediameta.Extractor
|
|||
return d.mediaMeta
|
||||
}
|
||||
|
||||
d.mediaMeta = mediameta.NewExtractorManager(ctx, d.SettingProvider(), d.Logger(), d.RequestClient())
|
||||
d.mediaMeta = mediameta.NewExtractorManager(ctx, d.SettingProvider(), d.Logger())
|
||||
return d.mediaMeta
|
||||
}
|
||||
|
||||
|
|
@ -881,14 +843,6 @@ func (d *dependency) Shutdown(ctx context.Context) error {
|
|||
}()
|
||||
}
|
||||
|
||||
if d.eventHub != nil {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
d.eventHub.Close()
|
||||
defer wg.Done()
|
||||
}()
|
||||
}
|
||||
|
||||
d.mu.Unlock()
|
||||
wg.Wait()
|
||||
|
||||
|
|
|
|||
|
|
@ -1,8 +1,6 @@
|
|||
package dependency
|
||||
|
||||
import (
|
||||
"io/fs"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/auth"
|
||||
|
|
@ -13,6 +11,7 @@ import (
|
|||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
|
||||
"github.com/gin-contrib/static"
|
||||
"io/fs"
|
||||
)
|
||||
|
||||
// Option 发送请求的额外设置
|
||||
|
|
@ -68,6 +67,12 @@ func WithProFlag(c bool) Option {
|
|||
})
|
||||
}
|
||||
|
||||
func WithLicenseKey(c string) Option {
|
||||
return optionFunc(func(o *dependency) {
|
||||
o.licenseKey = c
|
||||
})
|
||||
}
|
||||
|
||||
// WithRawEntClient Set the default raw ent client.
|
||||
func WithRawEntClient(c *ent.Client) Option {
|
||||
return optionFunc(func(o *dependency) {
|
||||
|
|
|
|||
|
|
@ -103,6 +103,10 @@ func (m *Migrator) migratePolicy() (map[int]bool, error) {
|
|||
settings.ProxyServer = policy.OptionsSerialized.OdProxy
|
||||
}
|
||||
|
||||
if policy.DirNameRule == "" {
|
||||
policy.DirNameRule = "uploads/{uid}/{path}"
|
||||
}
|
||||
|
||||
if policy.Type == types.PolicyTypeCos {
|
||||
settings.ChunkSize = 1024 * 1024 * 25
|
||||
}
|
||||
|
|
@ -118,16 +122,8 @@ func (m *Migrator) migratePolicy() (map[int]bool, error) {
|
|||
hasRandomElement = true
|
||||
break
|
||||
}
|
||||
|
||||
if strings.Contains(policy.DirNameRule, c) {
|
||||
hasRandomElement = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !hasRandomElement {
|
||||
if policy.DirNameRule == "" {
|
||||
policy.DirNameRule = "uploads/{uid}/{path}"
|
||||
}
|
||||
policy.FileNameRule = "{uid}_{randomkey8}_{originname}"
|
||||
m.l.Warning("Storage policy %q has no random element in file name rule, using default file name rule.", policy.Name)
|
||||
}
|
||||
|
|
|
|||
2
assets
2
assets
|
|
@ -1 +1 @@
|
|||
Subproject commit 0b388cc50a6c8e67f645d1b7d569bd9e58ae2c30
|
||||
Subproject commit 63c7abf214d94995ed02491d412971ae2bf2996b
|
||||
230
cmd/masterkey.go
230
cmd/masterkey.go
|
|
@ -1,230 +0,0 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/application/dependency"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/entity"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/encrypt"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
outputToFile string
|
||||
newMasterKeyFile string
|
||||
)
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(masterKeyCmd)
|
||||
masterKeyCmd.AddCommand(masterKeyGenerateCmd)
|
||||
masterKeyCmd.AddCommand(masterKeyGetCmd)
|
||||
masterKeyCmd.AddCommand(masterKeyRotateCmd)
|
||||
|
||||
masterKeyGenerateCmd.Flags().StringVarP(&outputToFile, "output", "o", "", "Output master key to file instead of stdout")
|
||||
masterKeyRotateCmd.Flags().StringVarP(&newMasterKeyFile, "new-key", "n", "", "Path to file containing the new master key (base64 encoded).")
|
||||
}
|
||||
|
||||
var masterKeyCmd = &cobra.Command{
|
||||
Use: "master-key",
|
||||
Short: "Master encryption key management",
|
||||
Long: "Manage master encryption keys for file encryption. Use subcommands to generate, get, or rotate keys.",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
_ = cmd.Help()
|
||||
},
|
||||
}
|
||||
|
||||
var masterKeyGenerateCmd = &cobra.Command{
|
||||
Use: "generate",
|
||||
Short: "Generate a new master encryption key",
|
||||
Long: "Generate a new random 32-byte (256-bit) master encryption key and output it in base64 format.",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
// Generate 32-byte random key
|
||||
key := make([]byte, 32)
|
||||
if _, err := io.ReadFull(rand.Reader, key); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error: Failed to generate random key: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Encode to base64
|
||||
encodedKey := base64.StdEncoding.EncodeToString(key)
|
||||
|
||||
if outputToFile != "" {
|
||||
// Write to file
|
||||
if err := os.WriteFile(outputToFile, []byte(encodedKey), 0600); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error: Failed to write key to file: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Printf("Master key generated and saved to: %s\n", outputToFile)
|
||||
} else {
|
||||
// Output to stdout
|
||||
fmt.Println(encodedKey)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
var masterKeyGetCmd = &cobra.Command{
|
||||
Use: "get",
|
||||
Short: "Get the current master encryption key",
|
||||
Long: "Retrieve and display the current master encryption key from the configured vault (setting, env, or file).",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
ctx := context.Background()
|
||||
dep := dependency.NewDependency(
|
||||
dependency.WithConfigPath(confPath),
|
||||
)
|
||||
logger := dep.Logger()
|
||||
|
||||
// Get the master key vault
|
||||
vault := encrypt.NewMasterEncryptKeyVault(ctx, dep.SettingProvider())
|
||||
|
||||
// Retrieve the master key
|
||||
key, err := vault.GetMasterKey(ctx)
|
||||
if err != nil {
|
||||
logger.Error("Failed to get master key: %s", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Encode to base64 and display
|
||||
encodedKey := base64.StdEncoding.EncodeToString(key)
|
||||
fmt.Println("")
|
||||
fmt.Println(encodedKey)
|
||||
},
|
||||
}
|
||||
|
||||
var masterKeyRotateCmd = &cobra.Command{
|
||||
Use: "rotate",
|
||||
Short: "Rotate the master encryption key",
|
||||
Long: `Rotate the master encryption key by re-encrypting all encrypted file keys with a new master key.
|
||||
This operation:
|
||||
1. Retrieves the current master key
|
||||
2. Loads a new master key from file
|
||||
3. Re-encrypts all file encryption keys with the new master key
|
||||
4. Updates the master key in the settings database
|
||||
|
||||
Warning: This is a critical operation. Make sure to backup your database before proceeding.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
ctx := context.Background()
|
||||
dep := dependency.NewDependency(
|
||||
dependency.WithConfigPath(confPath),
|
||||
)
|
||||
logger := dep.Logger()
|
||||
|
||||
logger.Info("Starting master key rotation...")
|
||||
|
||||
// Get the old master key
|
||||
vault := encrypt.NewMasterEncryptKeyVault(ctx, dep.SettingProvider())
|
||||
oldMasterKey, err := vault.GetMasterKey(ctx)
|
||||
if err != nil {
|
||||
logger.Error("Failed to get current master key: %s", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
logger.Info("Retrieved current master key")
|
||||
|
||||
// Get or generate the new master key
|
||||
var newMasterKey []byte
|
||||
// Load from file
|
||||
keyData, err := os.ReadFile(newMasterKeyFile)
|
||||
if err != nil {
|
||||
logger.Error("Failed to read new master key file: %s", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
newMasterKey, err = base64.StdEncoding.DecodeString(string(keyData))
|
||||
if err != nil {
|
||||
logger.Error("Failed to decode new master key: %s", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if len(newMasterKey) != 32 {
|
||||
logger.Error("Invalid new master key: must be 32 bytes (256 bits), got %d bytes", len(newMasterKey))
|
||||
os.Exit(1)
|
||||
}
|
||||
logger.Info("Loaded new master key from file: %s", newMasterKeyFile)
|
||||
|
||||
// Query all entities with encryption metadata
|
||||
db := dep.DBClient()
|
||||
entities, err := db.Entity.Query().
|
||||
Where(entity.Not(entity.PropsIsNil())).
|
||||
All(ctx)
|
||||
if err != nil {
|
||||
logger.Error("Failed to query entities: %s", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
logger.Info("Found %d entities to check for encryption", len(entities))
|
||||
|
||||
// Re-encrypt each entity's encryption key
|
||||
encryptedCount := 0
|
||||
for _, ent := range entities {
|
||||
if ent.Props == nil || ent.Props.EncryptMetadata == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
encMeta := ent.Props.EncryptMetadata
|
||||
|
||||
// Decrypt the file key with old master key
|
||||
decryptedFileKey, err := encrypt.DecryptWithMasterKey(oldMasterKey, encMeta.Key)
|
||||
if err != nil {
|
||||
logger.Error("Failed to decrypt key for entity %d: %s", ent.ID, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Re-encrypt the file key with new master key
|
||||
newEncryptedKey, err := encrypt.EncryptWithMasterKey(newMasterKey, decryptedFileKey)
|
||||
if err != nil {
|
||||
logger.Error("Failed to re-encrypt key for entity %d: %s", ent.ID, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Update the entity
|
||||
newProps := *ent.Props
|
||||
newProps.EncryptMetadata = &types.EncryptMetadata{
|
||||
Algorithm: encMeta.Algorithm,
|
||||
Key: newEncryptedKey,
|
||||
KeyPlainText: nil, // Don't store plaintext
|
||||
IV: encMeta.IV,
|
||||
}
|
||||
|
||||
err = db.Entity.UpdateOne(ent).
|
||||
SetProps(&newProps).
|
||||
Exec(ctx)
|
||||
if err != nil {
|
||||
logger.Error("Failed to update entity %d: %s", ent.ID, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
encryptedCount++
|
||||
}
|
||||
|
||||
logger.Info("Re-encrypted %d file keys", encryptedCount)
|
||||
|
||||
// Update the master key in settings
|
||||
keyStore := dep.SettingProvider().MasterEncryptKeyVault(ctx)
|
||||
if keyStore == setting.MasterEncryptKeyVaultTypeSetting {
|
||||
encodedNewKey := base64.StdEncoding.EncodeToString(newMasterKey)
|
||||
err = dep.SettingClient().Set(ctx, map[string]string{
|
||||
"encrypt_master_key": encodedNewKey,
|
||||
})
|
||||
if err != nil {
|
||||
logger.Error("Failed to update master key in settings: %s", err)
|
||||
logger.Error("WARNING: File keys have been re-encrypted but master key update failed!")
|
||||
logger.Error("Please manually update the encrypt_master_key setting.")
|
||||
os.Exit(1)
|
||||
}
|
||||
} else {
|
||||
logger.Info("Current master key is stored in %q", keyStore)
|
||||
if keyStore == setting.MasterEncryptKeyVaultTypeEnv {
|
||||
logger.Info("Please update the new master encryption key in your \"CR_ENCRYPT_MASTER_KEY\" environment variable.")
|
||||
} else if keyStore == setting.MasterEncryptKeyVaultTypeFile {
|
||||
logger.Info("Please update the new master encryption key in your key file: %q", dep.SettingProvider().MasterEncryptKeyFile(ctx))
|
||||
}
|
||||
logger.Info("Last step: Please manually update the new master encryption key in your ENV or key file.")
|
||||
}
|
||||
|
||||
logger.Info("Master key rotation completed successfully")
|
||||
},
|
||||
}
|
||||
|
|
@ -2,16 +2,14 @@ package cmd
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/util"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
"os"
|
||||
)
|
||||
|
||||
var (
|
||||
confPath string
|
||||
licenseKey string
|
||||
confPath string
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
|
|
|||
|
|
@ -12,6 +12,10 @@ import (
|
|||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
licenseKey string
|
||||
)
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(serverCmd)
|
||||
serverCmd.PersistentFlags().StringVarP(&licenseKey, "license-key", "l", "", "License key of your Cloudreve Pro")
|
||||
|
|
@ -25,6 +29,7 @@ var serverCmd = &cobra.Command{
|
|||
dependency.WithConfigPath(confPath),
|
||||
dependency.WithProFlag(constants.IsProBool),
|
||||
dependency.WithRequiredDbVersion(constants.BackendVersion),
|
||||
dependency.WithLicenseKey(licenseKey),
|
||||
)
|
||||
server := application.NewServer(dep)
|
||||
logger := dep.Logger()
|
||||
|
|
|
|||
|
|
@ -5,11 +5,9 @@ services:
|
|||
depends_on:
|
||||
- postgresql
|
||||
- redis
|
||||
restart: unless-stopped
|
||||
restart: always
|
||||
ports:
|
||||
- 5212:5212
|
||||
- 6888:6888
|
||||
- 6888:6888/udp
|
||||
environment:
|
||||
- CR_CONF_Database.Type=postgres
|
||||
- CR_CONF_Database.Host=postgresql
|
||||
|
|
@ -26,7 +24,6 @@ services:
|
|||
# backup & consult https://www.postgresql.org/docs/current/pgupgrade.html
|
||||
image: postgres:17
|
||||
container_name: postgresql
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- POSTGRES_USER=cloudreve
|
||||
- POSTGRES_DB=cloudreve
|
||||
|
|
@ -37,7 +34,6 @@ services:
|
|||
redis:
|
||||
image: redis:latest
|
||||
container_name: redis
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- redis_data:/data
|
||||
|
||||
|
|
|
|||
191
ent/client.go
191
ent/client.go
|
|
@ -19,7 +19,6 @@ import (
|
|||
"github.com/cloudreve/Cloudreve/v4/ent/directlink"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/entity"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/file"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/fsevent"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/group"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/metadata"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/node"
|
||||
|
|
@ -46,8 +45,6 @@ type Client struct {
|
|||
Entity *EntityClient
|
||||
// File is the client for interacting with the File builders.
|
||||
File *FileClient
|
||||
// FsEvent is the client for interacting with the FsEvent builders.
|
||||
FsEvent *FsEventClient
|
||||
// Group is the client for interacting with the Group builders.
|
||||
Group *GroupClient
|
||||
// Metadata is the client for interacting with the Metadata builders.
|
||||
|
|
@ -81,7 +78,6 @@ func (c *Client) init() {
|
|||
c.DirectLink = NewDirectLinkClient(c.config)
|
||||
c.Entity = NewEntityClient(c.config)
|
||||
c.File = NewFileClient(c.config)
|
||||
c.FsEvent = NewFsEventClient(c.config)
|
||||
c.Group = NewGroupClient(c.config)
|
||||
c.Metadata = NewMetadataClient(c.config)
|
||||
c.Node = NewNodeClient(c.config)
|
||||
|
|
@ -187,7 +183,6 @@ func (c *Client) Tx(ctx context.Context) (*Tx, error) {
|
|||
DirectLink: NewDirectLinkClient(cfg),
|
||||
Entity: NewEntityClient(cfg),
|
||||
File: NewFileClient(cfg),
|
||||
FsEvent: NewFsEventClient(cfg),
|
||||
Group: NewGroupClient(cfg),
|
||||
Metadata: NewMetadataClient(cfg),
|
||||
Node: NewNodeClient(cfg),
|
||||
|
|
@ -220,7 +215,6 @@ func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error)
|
|||
DirectLink: NewDirectLinkClient(cfg),
|
||||
Entity: NewEntityClient(cfg),
|
||||
File: NewFileClient(cfg),
|
||||
FsEvent: NewFsEventClient(cfg),
|
||||
Group: NewGroupClient(cfg),
|
||||
Metadata: NewMetadataClient(cfg),
|
||||
Node: NewNodeClient(cfg),
|
||||
|
|
@ -259,8 +253,8 @@ func (c *Client) Close() error {
|
|||
// In order to add hooks to a specific client, call: `client.Node.Use(...)`.
|
||||
func (c *Client) Use(hooks ...Hook) {
|
||||
for _, n := range []interface{ Use(...Hook) }{
|
||||
c.DavAccount, c.DirectLink, c.Entity, c.File, c.FsEvent, c.Group, c.Metadata,
|
||||
c.Node, c.Passkey, c.Setting, c.Share, c.StoragePolicy, c.Task, c.User,
|
||||
c.DavAccount, c.DirectLink, c.Entity, c.File, c.Group, c.Metadata, c.Node,
|
||||
c.Passkey, c.Setting, c.Share, c.StoragePolicy, c.Task, c.User,
|
||||
} {
|
||||
n.Use(hooks...)
|
||||
}
|
||||
|
|
@ -270,8 +264,8 @@ func (c *Client) Use(hooks ...Hook) {
|
|||
// In order to add interceptors to a specific client, call: `client.Node.Intercept(...)`.
|
||||
func (c *Client) Intercept(interceptors ...Interceptor) {
|
||||
for _, n := range []interface{ Intercept(...Interceptor) }{
|
||||
c.DavAccount, c.DirectLink, c.Entity, c.File, c.FsEvent, c.Group, c.Metadata,
|
||||
c.Node, c.Passkey, c.Setting, c.Share, c.StoragePolicy, c.Task, c.User,
|
||||
c.DavAccount, c.DirectLink, c.Entity, c.File, c.Group, c.Metadata, c.Node,
|
||||
c.Passkey, c.Setting, c.Share, c.StoragePolicy, c.Task, c.User,
|
||||
} {
|
||||
n.Intercept(interceptors...)
|
||||
}
|
||||
|
|
@ -288,8 +282,6 @@ func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) {
|
|||
return c.Entity.mutate(ctx, m)
|
||||
case *FileMutation:
|
||||
return c.File.mutate(ctx, m)
|
||||
case *FsEventMutation:
|
||||
return c.FsEvent.mutate(ctx, m)
|
||||
case *GroupMutation:
|
||||
return c.Group.mutate(ctx, m)
|
||||
case *MetadataMutation:
|
||||
|
|
@ -1060,157 +1052,6 @@ func (c *FileClient) mutate(ctx context.Context, m *FileMutation) (Value, error)
|
|||
}
|
||||
}
|
||||
|
||||
// FsEventClient is a client for the FsEvent schema.
|
||||
type FsEventClient struct {
|
||||
config
|
||||
}
|
||||
|
||||
// NewFsEventClient returns a client for the FsEvent from the given config.
|
||||
func NewFsEventClient(c config) *FsEventClient {
|
||||
return &FsEventClient{config: c}
|
||||
}
|
||||
|
||||
// Use adds a list of mutation hooks to the hooks stack.
|
||||
// A call to `Use(f, g, h)` equals to `fsevent.Hooks(f(g(h())))`.
|
||||
func (c *FsEventClient) Use(hooks ...Hook) {
|
||||
c.hooks.FsEvent = append(c.hooks.FsEvent, hooks...)
|
||||
}
|
||||
|
||||
// Intercept adds a list of query interceptors to the interceptors stack.
|
||||
// A call to `Intercept(f, g, h)` equals to `fsevent.Intercept(f(g(h())))`.
|
||||
func (c *FsEventClient) Intercept(interceptors ...Interceptor) {
|
||||
c.inters.FsEvent = append(c.inters.FsEvent, interceptors...)
|
||||
}
|
||||
|
||||
// Create returns a builder for creating a FsEvent entity.
|
||||
func (c *FsEventClient) Create() *FsEventCreate {
|
||||
mutation := newFsEventMutation(c.config, OpCreate)
|
||||
return &FsEventCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
}
|
||||
|
||||
// CreateBulk returns a builder for creating a bulk of FsEvent entities.
|
||||
func (c *FsEventClient) CreateBulk(builders ...*FsEventCreate) *FsEventCreateBulk {
|
||||
return &FsEventCreateBulk{config: c.config, builders: builders}
|
||||
}
|
||||
|
||||
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
|
||||
// a builder and applies setFunc on it.
|
||||
func (c *FsEventClient) MapCreateBulk(slice any, setFunc func(*FsEventCreate, int)) *FsEventCreateBulk {
|
||||
rv := reflect.ValueOf(slice)
|
||||
if rv.Kind() != reflect.Slice {
|
||||
return &FsEventCreateBulk{err: fmt.Errorf("calling to FsEventClient.MapCreateBulk with wrong type %T, need slice", slice)}
|
||||
}
|
||||
builders := make([]*FsEventCreate, rv.Len())
|
||||
for i := 0; i < rv.Len(); i++ {
|
||||
builders[i] = c.Create()
|
||||
setFunc(builders[i], i)
|
||||
}
|
||||
return &FsEventCreateBulk{config: c.config, builders: builders}
|
||||
}
|
||||
|
||||
// Update returns an update builder for FsEvent.
|
||||
func (c *FsEventClient) Update() *FsEventUpdate {
|
||||
mutation := newFsEventMutation(c.config, OpUpdate)
|
||||
return &FsEventUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
}
|
||||
|
||||
// UpdateOne returns an update builder for the given entity.
|
||||
func (c *FsEventClient) UpdateOne(fe *FsEvent) *FsEventUpdateOne {
|
||||
mutation := newFsEventMutation(c.config, OpUpdateOne, withFsEvent(fe))
|
||||
return &FsEventUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
}
|
||||
|
||||
// UpdateOneID returns an update builder for the given id.
|
||||
func (c *FsEventClient) UpdateOneID(id int) *FsEventUpdateOne {
|
||||
mutation := newFsEventMutation(c.config, OpUpdateOne, withFsEventID(id))
|
||||
return &FsEventUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
}
|
||||
|
||||
// Delete returns a delete builder for FsEvent.
|
||||
func (c *FsEventClient) Delete() *FsEventDelete {
|
||||
mutation := newFsEventMutation(c.config, OpDelete)
|
||||
return &FsEventDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
}
|
||||
|
||||
// DeleteOne returns a builder for deleting the given entity.
|
||||
func (c *FsEventClient) DeleteOne(fe *FsEvent) *FsEventDeleteOne {
|
||||
return c.DeleteOneID(fe.ID)
|
||||
}
|
||||
|
||||
// DeleteOneID returns a builder for deleting the given entity by its id.
|
||||
func (c *FsEventClient) DeleteOneID(id int) *FsEventDeleteOne {
|
||||
builder := c.Delete().Where(fsevent.ID(id))
|
||||
builder.mutation.id = &id
|
||||
builder.mutation.op = OpDeleteOne
|
||||
return &FsEventDeleteOne{builder}
|
||||
}
|
||||
|
||||
// Query returns a query builder for FsEvent.
|
||||
func (c *FsEventClient) Query() *FsEventQuery {
|
||||
return &FsEventQuery{
|
||||
config: c.config,
|
||||
ctx: &QueryContext{Type: TypeFsEvent},
|
||||
inters: c.Interceptors(),
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns a FsEvent entity by its id.
|
||||
func (c *FsEventClient) Get(ctx context.Context, id int) (*FsEvent, error) {
|
||||
return c.Query().Where(fsevent.ID(id)).Only(ctx)
|
||||
}
|
||||
|
||||
// GetX is like Get, but panics if an error occurs.
|
||||
func (c *FsEventClient) GetX(ctx context.Context, id int) *FsEvent {
|
||||
obj, err := c.Get(ctx, id)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return obj
|
||||
}
|
||||
|
||||
// QueryUser queries the user edge of a FsEvent.
|
||||
func (c *FsEventClient) QueryUser(fe *FsEvent) *UserQuery {
|
||||
query := (&UserClient{config: c.config}).Query()
|
||||
query.path = func(context.Context) (fromV *sql.Selector, _ error) {
|
||||
id := fe.ID
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(fsevent.Table, fsevent.FieldID, id),
|
||||
sqlgraph.To(user.Table, user.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, fsevent.UserTable, fsevent.UserColumn),
|
||||
)
|
||||
fromV = sqlgraph.Neighbors(fe.driver.Dialect(), step)
|
||||
return fromV, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// Hooks returns the client hooks.
|
||||
func (c *FsEventClient) Hooks() []Hook {
|
||||
hooks := c.hooks.FsEvent
|
||||
return append(hooks[:len(hooks):len(hooks)], fsevent.Hooks[:]...)
|
||||
}
|
||||
|
||||
// Interceptors returns the client interceptors.
|
||||
func (c *FsEventClient) Interceptors() []Interceptor {
|
||||
inters := c.inters.FsEvent
|
||||
return append(inters[:len(inters):len(inters)], fsevent.Interceptors[:]...)
|
||||
}
|
||||
|
||||
func (c *FsEventClient) mutate(ctx context.Context, m *FsEventMutation) (Value, error) {
|
||||
switch m.Op() {
|
||||
case OpCreate:
|
||||
return (&FsEventCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
||||
case OpUpdate:
|
||||
return (&FsEventUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
||||
case OpUpdateOne:
|
||||
return (&FsEventUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
||||
case OpDelete, OpDeleteOne:
|
||||
return (&FsEventDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx)
|
||||
default:
|
||||
return nil, fmt.Errorf("ent: unknown FsEvent mutation op: %q", m.Op())
|
||||
}
|
||||
}
|
||||
|
||||
// GroupClient is a client for the Group schema.
|
||||
type GroupClient struct {
|
||||
config
|
||||
|
|
@ -2687,22 +2528,6 @@ func (c *UserClient) QueryTasks(u *User) *TaskQuery {
|
|||
return query
|
||||
}
|
||||
|
||||
// QueryFsevents queries the fsevents edge of a User.
|
||||
func (c *UserClient) QueryFsevents(u *User) *FsEventQuery {
|
||||
query := (&FsEventClient{config: c.config}).Query()
|
||||
query.path = func(context.Context) (fromV *sql.Selector, _ error) {
|
||||
id := u.ID
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(user.Table, user.FieldID, id),
|
||||
sqlgraph.To(fsevent.Table, fsevent.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, user.FseventsTable, user.FseventsColumn),
|
||||
)
|
||||
fromV = sqlgraph.Neighbors(u.driver.Dialect(), step)
|
||||
return fromV, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// QueryEntities queries the entities edge of a User.
|
||||
func (c *UserClient) QueryEntities(u *User) *EntityQuery {
|
||||
query := (&EntityClient{config: c.config}).Query()
|
||||
|
|
@ -2749,12 +2574,12 @@ func (c *UserClient) mutate(ctx context.Context, m *UserMutation) (Value, error)
|
|||
// hooks and interceptors per client, for fast access.
|
||||
type (
|
||||
hooks struct {
|
||||
DavAccount, DirectLink, Entity, File, FsEvent, Group, Metadata, Node, Passkey,
|
||||
Setting, Share, StoragePolicy, Task, User []ent.Hook
|
||||
DavAccount, DirectLink, Entity, File, Group, Metadata, Node, Passkey, Setting,
|
||||
Share, StoragePolicy, Task, User []ent.Hook
|
||||
}
|
||||
inters struct {
|
||||
DavAccount, DirectLink, Entity, File, FsEvent, Group, Metadata, Node, Passkey,
|
||||
Setting, Share, StoragePolicy, Task, User []ent.Interceptor
|
||||
DavAccount, DirectLink, Entity, File, Group, Metadata, Node, Passkey, Setting,
|
||||
Share, StoragePolicy, Task, User []ent.Interceptor
|
||||
}
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -16,7 +16,6 @@ import (
|
|||
"github.com/cloudreve/Cloudreve/v4/ent/directlink"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/entity"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/file"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/fsevent"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/group"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/metadata"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/node"
|
||||
|
|
@ -90,7 +89,6 @@ func checkColumn(table, column string) error {
|
|||
directlink.Table: directlink.ValidColumn,
|
||||
entity.Table: entity.ValidColumn,
|
||||
file.Table: file.ValidColumn,
|
||||
fsevent.Table: fsevent.ValidColumn,
|
||||
group.Table: group.ValidColumn,
|
||||
metadata.Table: metadata.ValidColumn,
|
||||
node.Table: node.ValidColumn,
|
||||
|
|
|
|||
|
|
@ -42,8 +42,8 @@ type Entity struct {
|
|||
CreatedBy int `json:"created_by,omitempty"`
|
||||
// UploadSessionID holds the value of the "upload_session_id" field.
|
||||
UploadSessionID *uuid.UUID `json:"upload_session_id,omitempty"`
|
||||
// Props holds the value of the "props" field.
|
||||
Props *types.EntityProps `json:"props,omitempty"`
|
||||
// RecycleOptions holds the value of the "recycle_options" field.
|
||||
RecycleOptions *types.EntityRecycleOption `json:"recycle_options,omitempty"`
|
||||
// Edges holds the relations/edges for other nodes in the graph.
|
||||
// The values are being populated by the EntityQuery when eager-loading is set.
|
||||
Edges EntityEdges `json:"edges"`
|
||||
|
|
@ -105,7 +105,7 @@ func (*Entity) scanValues(columns []string) ([]any, error) {
|
|||
switch columns[i] {
|
||||
case entity.FieldUploadSessionID:
|
||||
values[i] = &sql.NullScanner{S: new(uuid.UUID)}
|
||||
case entity.FieldProps:
|
||||
case entity.FieldRecycleOptions:
|
||||
values[i] = new([]byte)
|
||||
case entity.FieldID, entity.FieldType, entity.FieldSize, entity.FieldReferenceCount, entity.FieldStoragePolicyEntities, entity.FieldCreatedBy:
|
||||
values[i] = new(sql.NullInt64)
|
||||
|
|
@ -196,12 +196,12 @@ func (e *Entity) assignValues(columns []string, values []any) error {
|
|||
e.UploadSessionID = new(uuid.UUID)
|
||||
*e.UploadSessionID = *value.S.(*uuid.UUID)
|
||||
}
|
||||
case entity.FieldProps:
|
||||
case entity.FieldRecycleOptions:
|
||||
if value, ok := values[i].(*[]byte); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field props", values[i])
|
||||
return fmt.Errorf("unexpected type %T for field recycle_options", values[i])
|
||||
} else if value != nil && len(*value) > 0 {
|
||||
if err := json.Unmarshal(*value, &e.Props); err != nil {
|
||||
return fmt.Errorf("unmarshal field props: %w", err)
|
||||
if err := json.Unmarshal(*value, &e.RecycleOptions); err != nil {
|
||||
return fmt.Errorf("unmarshal field recycle_options: %w", err)
|
||||
}
|
||||
}
|
||||
default:
|
||||
|
|
@ -289,8 +289,8 @@ func (e *Entity) String() string {
|
|||
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("props=")
|
||||
builder.WriteString(fmt.Sprintf("%v", e.Props))
|
||||
builder.WriteString("recycle_options=")
|
||||
builder.WriteString(fmt.Sprintf("%v", e.RecycleOptions))
|
||||
builder.WriteByte(')')
|
||||
return builder.String()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -35,8 +35,8 @@ const (
|
|||
FieldCreatedBy = "created_by"
|
||||
// FieldUploadSessionID holds the string denoting the upload_session_id field in the database.
|
||||
FieldUploadSessionID = "upload_session_id"
|
||||
// FieldProps holds the string denoting the props field in the database.
|
||||
FieldProps = "recycle_options"
|
||||
// FieldRecycleOptions holds the string denoting the recycle_options field in the database.
|
||||
FieldRecycleOptions = "recycle_options"
|
||||
// EdgeFile holds the string denoting the file edge name in mutations.
|
||||
EdgeFile = "file"
|
||||
// EdgeUser holds the string denoting the user edge name in mutations.
|
||||
|
|
@ -79,7 +79,7 @@ var Columns = []string{
|
|||
FieldStoragePolicyEntities,
|
||||
FieldCreatedBy,
|
||||
FieldUploadSessionID,
|
||||
FieldProps,
|
||||
FieldRecycleOptions,
|
||||
}
|
||||
|
||||
var (
|
||||
|
|
|
|||
|
|
@ -521,14 +521,14 @@ func UploadSessionIDNotNil() predicate.Entity {
|
|||
return predicate.Entity(sql.FieldNotNull(FieldUploadSessionID))
|
||||
}
|
||||
|
||||
// PropsIsNil applies the IsNil predicate on the "props" field.
|
||||
func PropsIsNil() predicate.Entity {
|
||||
return predicate.Entity(sql.FieldIsNull(FieldProps))
|
||||
// RecycleOptionsIsNil applies the IsNil predicate on the "recycle_options" field.
|
||||
func RecycleOptionsIsNil() predicate.Entity {
|
||||
return predicate.Entity(sql.FieldIsNull(FieldRecycleOptions))
|
||||
}
|
||||
|
||||
// PropsNotNil applies the NotNil predicate on the "props" field.
|
||||
func PropsNotNil() predicate.Entity {
|
||||
return predicate.Entity(sql.FieldNotNull(FieldProps))
|
||||
// RecycleOptionsNotNil applies the NotNil predicate on the "recycle_options" field.
|
||||
func RecycleOptionsNotNil() predicate.Entity {
|
||||
return predicate.Entity(sql.FieldNotNull(FieldRecycleOptions))
|
||||
}
|
||||
|
||||
// HasFile applies the HasEdge predicate on the "file" edge.
|
||||
|
|
|
|||
|
|
@ -135,9 +135,9 @@ func (ec *EntityCreate) SetNillableUploadSessionID(u *uuid.UUID) *EntityCreate {
|
|||
return ec
|
||||
}
|
||||
|
||||
// SetProps sets the "props" field.
|
||||
func (ec *EntityCreate) SetProps(tp *types.EntityProps) *EntityCreate {
|
||||
ec.mutation.SetProps(tp)
|
||||
// SetRecycleOptions sets the "recycle_options" field.
|
||||
func (ec *EntityCreate) SetRecycleOptions(tro *types.EntityRecycleOption) *EntityCreate {
|
||||
ec.mutation.SetRecycleOptions(tro)
|
||||
return ec
|
||||
}
|
||||
|
||||
|
|
@ -336,9 +336,9 @@ func (ec *EntityCreate) createSpec() (*Entity, *sqlgraph.CreateSpec) {
|
|||
_spec.SetField(entity.FieldUploadSessionID, field.TypeUUID, value)
|
||||
_node.UploadSessionID = &value
|
||||
}
|
||||
if value, ok := ec.mutation.Props(); ok {
|
||||
_spec.SetField(entity.FieldProps, field.TypeJSON, value)
|
||||
_node.Props = value
|
||||
if value, ok := ec.mutation.RecycleOptions(); ok {
|
||||
_spec.SetField(entity.FieldRecycleOptions, field.TypeJSON, value)
|
||||
_node.RecycleOptions = value
|
||||
}
|
||||
if nodes := ec.mutation.FileIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
|
|
@ -586,21 +586,21 @@ func (u *EntityUpsert) ClearUploadSessionID() *EntityUpsert {
|
|||
return u
|
||||
}
|
||||
|
||||
// SetProps sets the "props" field.
|
||||
func (u *EntityUpsert) SetProps(v *types.EntityProps) *EntityUpsert {
|
||||
u.Set(entity.FieldProps, v)
|
||||
// SetRecycleOptions sets the "recycle_options" field.
|
||||
func (u *EntityUpsert) SetRecycleOptions(v *types.EntityRecycleOption) *EntityUpsert {
|
||||
u.Set(entity.FieldRecycleOptions, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateProps sets the "props" field to the value that was provided on create.
|
||||
func (u *EntityUpsert) UpdateProps() *EntityUpsert {
|
||||
u.SetExcluded(entity.FieldProps)
|
||||
// UpdateRecycleOptions sets the "recycle_options" field to the value that was provided on create.
|
||||
func (u *EntityUpsert) UpdateRecycleOptions() *EntityUpsert {
|
||||
u.SetExcluded(entity.FieldRecycleOptions)
|
||||
return u
|
||||
}
|
||||
|
||||
// ClearProps clears the value of the "props" field.
|
||||
func (u *EntityUpsert) ClearProps() *EntityUpsert {
|
||||
u.SetNull(entity.FieldProps)
|
||||
// ClearRecycleOptions clears the value of the "recycle_options" field.
|
||||
func (u *EntityUpsert) ClearRecycleOptions() *EntityUpsert {
|
||||
u.SetNull(entity.FieldRecycleOptions)
|
||||
return u
|
||||
}
|
||||
|
||||
|
|
@ -817,24 +817,24 @@ func (u *EntityUpsertOne) ClearUploadSessionID() *EntityUpsertOne {
|
|||
})
|
||||
}
|
||||
|
||||
// SetProps sets the "props" field.
|
||||
func (u *EntityUpsertOne) SetProps(v *types.EntityProps) *EntityUpsertOne {
|
||||
// SetRecycleOptions sets the "recycle_options" field.
|
||||
func (u *EntityUpsertOne) SetRecycleOptions(v *types.EntityRecycleOption) *EntityUpsertOne {
|
||||
return u.Update(func(s *EntityUpsert) {
|
||||
s.SetProps(v)
|
||||
s.SetRecycleOptions(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateProps sets the "props" field to the value that was provided on create.
|
||||
func (u *EntityUpsertOne) UpdateProps() *EntityUpsertOne {
|
||||
// UpdateRecycleOptions sets the "recycle_options" field to the value that was provided on create.
|
||||
func (u *EntityUpsertOne) UpdateRecycleOptions() *EntityUpsertOne {
|
||||
return u.Update(func(s *EntityUpsert) {
|
||||
s.UpdateProps()
|
||||
s.UpdateRecycleOptions()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearProps clears the value of the "props" field.
|
||||
func (u *EntityUpsertOne) ClearProps() *EntityUpsertOne {
|
||||
// ClearRecycleOptions clears the value of the "recycle_options" field.
|
||||
func (u *EntityUpsertOne) ClearRecycleOptions() *EntityUpsertOne {
|
||||
return u.Update(func(s *EntityUpsert) {
|
||||
s.ClearProps()
|
||||
s.ClearRecycleOptions()
|
||||
})
|
||||
}
|
||||
|
||||
|
|
@ -1222,24 +1222,24 @@ func (u *EntityUpsertBulk) ClearUploadSessionID() *EntityUpsertBulk {
|
|||
})
|
||||
}
|
||||
|
||||
// SetProps sets the "props" field.
|
||||
func (u *EntityUpsertBulk) SetProps(v *types.EntityProps) *EntityUpsertBulk {
|
||||
// SetRecycleOptions sets the "recycle_options" field.
|
||||
func (u *EntityUpsertBulk) SetRecycleOptions(v *types.EntityRecycleOption) *EntityUpsertBulk {
|
||||
return u.Update(func(s *EntityUpsert) {
|
||||
s.SetProps(v)
|
||||
s.SetRecycleOptions(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateProps sets the "props" field to the value that was provided on create.
|
||||
func (u *EntityUpsertBulk) UpdateProps() *EntityUpsertBulk {
|
||||
// UpdateRecycleOptions sets the "recycle_options" field to the value that was provided on create.
|
||||
func (u *EntityUpsertBulk) UpdateRecycleOptions() *EntityUpsertBulk {
|
||||
return u.Update(func(s *EntityUpsert) {
|
||||
s.UpdateProps()
|
||||
s.UpdateRecycleOptions()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearProps clears the value of the "props" field.
|
||||
func (u *EntityUpsertBulk) ClearProps() *EntityUpsertBulk {
|
||||
// ClearRecycleOptions clears the value of the "recycle_options" field.
|
||||
func (u *EntityUpsertBulk) ClearRecycleOptions() *EntityUpsertBulk {
|
||||
return u.Update(func(s *EntityUpsert) {
|
||||
s.ClearProps()
|
||||
s.ClearRecycleOptions()
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -190,15 +190,15 @@ func (eu *EntityUpdate) ClearUploadSessionID() *EntityUpdate {
|
|||
return eu
|
||||
}
|
||||
|
||||
// SetProps sets the "props" field.
|
||||
func (eu *EntityUpdate) SetProps(tp *types.EntityProps) *EntityUpdate {
|
||||
eu.mutation.SetProps(tp)
|
||||
// SetRecycleOptions sets the "recycle_options" field.
|
||||
func (eu *EntityUpdate) SetRecycleOptions(tro *types.EntityRecycleOption) *EntityUpdate {
|
||||
eu.mutation.SetRecycleOptions(tro)
|
||||
return eu
|
||||
}
|
||||
|
||||
// ClearProps clears the value of the "props" field.
|
||||
func (eu *EntityUpdate) ClearProps() *EntityUpdate {
|
||||
eu.mutation.ClearProps()
|
||||
// ClearRecycleOptions clears the value of the "recycle_options" field.
|
||||
func (eu *EntityUpdate) ClearRecycleOptions() *EntityUpdate {
|
||||
eu.mutation.ClearRecycleOptions()
|
||||
return eu
|
||||
}
|
||||
|
||||
|
|
@ -383,11 +383,11 @@ func (eu *EntityUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
|||
if eu.mutation.UploadSessionIDCleared() {
|
||||
_spec.ClearField(entity.FieldUploadSessionID, field.TypeUUID)
|
||||
}
|
||||
if value, ok := eu.mutation.Props(); ok {
|
||||
_spec.SetField(entity.FieldProps, field.TypeJSON, value)
|
||||
if value, ok := eu.mutation.RecycleOptions(); ok {
|
||||
_spec.SetField(entity.FieldRecycleOptions, field.TypeJSON, value)
|
||||
}
|
||||
if eu.mutation.PropsCleared() {
|
||||
_spec.ClearField(entity.FieldProps, field.TypeJSON)
|
||||
if eu.mutation.RecycleOptionsCleared() {
|
||||
_spec.ClearField(entity.FieldRecycleOptions, field.TypeJSON)
|
||||
}
|
||||
if eu.mutation.FileCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
|
|
@ -669,15 +669,15 @@ func (euo *EntityUpdateOne) ClearUploadSessionID() *EntityUpdateOne {
|
|||
return euo
|
||||
}
|
||||
|
||||
// SetProps sets the "props" field.
|
||||
func (euo *EntityUpdateOne) SetProps(tp *types.EntityProps) *EntityUpdateOne {
|
||||
euo.mutation.SetProps(tp)
|
||||
// SetRecycleOptions sets the "recycle_options" field.
|
||||
func (euo *EntityUpdateOne) SetRecycleOptions(tro *types.EntityRecycleOption) *EntityUpdateOne {
|
||||
euo.mutation.SetRecycleOptions(tro)
|
||||
return euo
|
||||
}
|
||||
|
||||
// ClearProps clears the value of the "props" field.
|
||||
func (euo *EntityUpdateOne) ClearProps() *EntityUpdateOne {
|
||||
euo.mutation.ClearProps()
|
||||
// ClearRecycleOptions clears the value of the "recycle_options" field.
|
||||
func (euo *EntityUpdateOne) ClearRecycleOptions() *EntityUpdateOne {
|
||||
euo.mutation.ClearRecycleOptions()
|
||||
return euo
|
||||
}
|
||||
|
||||
|
|
@ -892,11 +892,11 @@ func (euo *EntityUpdateOne) sqlSave(ctx context.Context) (_node *Entity, err err
|
|||
if euo.mutation.UploadSessionIDCleared() {
|
||||
_spec.ClearField(entity.FieldUploadSessionID, field.TypeUUID)
|
||||
}
|
||||
if value, ok := euo.mutation.Props(); ok {
|
||||
_spec.SetField(entity.FieldProps, field.TypeJSON, value)
|
||||
if value, ok := euo.mutation.RecycleOptions(); ok {
|
||||
_spec.SetField(entity.FieldRecycleOptions, field.TypeJSON, value)
|
||||
}
|
||||
if euo.mutation.PropsCleared() {
|
||||
_spec.ClearField(entity.FieldProps, field.TypeJSON)
|
||||
if euo.mutation.RecycleOptionsCleared() {
|
||||
_spec.ClearField(entity.FieldRecycleOptions, field.TypeJSON)
|
||||
}
|
||||
if euo.mutation.FileCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
|
|
|
|||
204
ent/fsevent.go
204
ent/fsevent.go
|
|
@ -1,204 +0,0 @@
|
|||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/fsevent"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/user"
|
||||
"github.com/gofrs/uuid"
|
||||
)
|
||||
|
||||
// FsEvent is the model entity for the FsEvent schema.
|
||||
type FsEvent struct {
|
||||
config `json:"-"`
|
||||
// ID of the ent.
|
||||
ID int `json:"id,omitempty"`
|
||||
// CreatedAt holds the value of the "created_at" field.
|
||||
CreatedAt time.Time `json:"created_at,omitempty"`
|
||||
// UpdatedAt holds the value of the "updated_at" field.
|
||||
UpdatedAt time.Time `json:"updated_at,omitempty"`
|
||||
// DeletedAt holds the value of the "deleted_at" field.
|
||||
DeletedAt *time.Time `json:"deleted_at,omitempty"`
|
||||
// Event holds the value of the "event" field.
|
||||
Event string `json:"event,omitempty"`
|
||||
// Subscriber holds the value of the "subscriber" field.
|
||||
Subscriber uuid.UUID `json:"subscriber,omitempty"`
|
||||
// UserFsevent holds the value of the "user_fsevent" field.
|
||||
UserFsevent int `json:"user_fsevent,omitempty"`
|
||||
// Edges holds the relations/edges for other nodes in the graph.
|
||||
// The values are being populated by the FsEventQuery when eager-loading is set.
|
||||
Edges FsEventEdges `json:"edges"`
|
||||
selectValues sql.SelectValues
|
||||
}
|
||||
|
||||
// FsEventEdges holds the relations/edges for other nodes in the graph.
|
||||
type FsEventEdges struct {
|
||||
// User holds the value of the user edge.
|
||||
User *User `json:"user,omitempty"`
|
||||
// loadedTypes holds the information for reporting if a
|
||||
// type was loaded (or requested) in eager-loading or not.
|
||||
loadedTypes [1]bool
|
||||
}
|
||||
|
||||
// UserOrErr returns the User value or an error if the edge
|
||||
// was not loaded in eager-loading, or loaded but was not found.
|
||||
func (e FsEventEdges) UserOrErr() (*User, error) {
|
||||
if e.loadedTypes[0] {
|
||||
if e.User == nil {
|
||||
// Edge was loaded but was not found.
|
||||
return nil, &NotFoundError{label: user.Label}
|
||||
}
|
||||
return e.User, nil
|
||||
}
|
||||
return nil, &NotLoadedError{edge: "user"}
|
||||
}
|
||||
|
||||
// scanValues returns the types for scanning values from sql.Rows.
|
||||
func (*FsEvent) scanValues(columns []string) ([]any, error) {
|
||||
values := make([]any, len(columns))
|
||||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case fsevent.FieldID, fsevent.FieldUserFsevent:
|
||||
values[i] = new(sql.NullInt64)
|
||||
case fsevent.FieldEvent:
|
||||
values[i] = new(sql.NullString)
|
||||
case fsevent.FieldCreatedAt, fsevent.FieldUpdatedAt, fsevent.FieldDeletedAt:
|
||||
values[i] = new(sql.NullTime)
|
||||
case fsevent.FieldSubscriber:
|
||||
values[i] = new(uuid.UUID)
|
||||
default:
|
||||
values[i] = new(sql.UnknownType)
|
||||
}
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// assignValues assigns the values that were returned from sql.Rows (after scanning)
|
||||
// to the FsEvent fields.
|
||||
func (fe *FsEvent) assignValues(columns []string, values []any) error {
|
||||
if m, n := len(values), len(columns); m < n {
|
||||
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
|
||||
}
|
||||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case fsevent.FieldID:
|
||||
value, ok := values[i].(*sql.NullInt64)
|
||||
if !ok {
|
||||
return fmt.Errorf("unexpected type %T for field id", value)
|
||||
}
|
||||
fe.ID = int(value.Int64)
|
||||
case fsevent.FieldCreatedAt:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field created_at", values[i])
|
||||
} else if value.Valid {
|
||||
fe.CreatedAt = value.Time
|
||||
}
|
||||
case fsevent.FieldUpdatedAt:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field updated_at", values[i])
|
||||
} else if value.Valid {
|
||||
fe.UpdatedAt = value.Time
|
||||
}
|
||||
case fsevent.FieldDeletedAt:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field deleted_at", values[i])
|
||||
} else if value.Valid {
|
||||
fe.DeletedAt = new(time.Time)
|
||||
*fe.DeletedAt = value.Time
|
||||
}
|
||||
case fsevent.FieldEvent:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field event", values[i])
|
||||
} else if value.Valid {
|
||||
fe.Event = value.String
|
||||
}
|
||||
case fsevent.FieldSubscriber:
|
||||
if value, ok := values[i].(*uuid.UUID); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field subscriber", values[i])
|
||||
} else if value != nil {
|
||||
fe.Subscriber = *value
|
||||
}
|
||||
case fsevent.FieldUserFsevent:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field user_fsevent", values[i])
|
||||
} else if value.Valid {
|
||||
fe.UserFsevent = int(value.Int64)
|
||||
}
|
||||
default:
|
||||
fe.selectValues.Set(columns[i], values[i])
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value returns the ent.Value that was dynamically selected and assigned to the FsEvent.
|
||||
// This includes values selected through modifiers, order, etc.
|
||||
func (fe *FsEvent) Value(name string) (ent.Value, error) {
|
||||
return fe.selectValues.Get(name)
|
||||
}
|
||||
|
||||
// QueryUser queries the "user" edge of the FsEvent entity.
|
||||
func (fe *FsEvent) QueryUser() *UserQuery {
|
||||
return NewFsEventClient(fe.config).QueryUser(fe)
|
||||
}
|
||||
|
||||
// Update returns a builder for updating this FsEvent.
|
||||
// Note that you need to call FsEvent.Unwrap() before calling this method if this FsEvent
|
||||
// was returned from a transaction, and the transaction was committed or rolled back.
|
||||
func (fe *FsEvent) Update() *FsEventUpdateOne {
|
||||
return NewFsEventClient(fe.config).UpdateOne(fe)
|
||||
}
|
||||
|
||||
// Unwrap unwraps the FsEvent entity that was returned from a transaction after it was closed,
|
||||
// so that all future queries will be executed through the driver which created the transaction.
|
||||
func (fe *FsEvent) Unwrap() *FsEvent {
|
||||
_tx, ok := fe.config.driver.(*txDriver)
|
||||
if !ok {
|
||||
panic("ent: FsEvent is not a transactional entity")
|
||||
}
|
||||
fe.config.driver = _tx.drv
|
||||
return fe
|
||||
}
|
||||
|
||||
// String implements the fmt.Stringer.
|
||||
func (fe *FsEvent) String() string {
|
||||
var builder strings.Builder
|
||||
builder.WriteString("FsEvent(")
|
||||
builder.WriteString(fmt.Sprintf("id=%v, ", fe.ID))
|
||||
builder.WriteString("created_at=")
|
||||
builder.WriteString(fe.CreatedAt.Format(time.ANSIC))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("updated_at=")
|
||||
builder.WriteString(fe.UpdatedAt.Format(time.ANSIC))
|
||||
builder.WriteString(", ")
|
||||
if v := fe.DeletedAt; v != nil {
|
||||
builder.WriteString("deleted_at=")
|
||||
builder.WriteString(v.Format(time.ANSIC))
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("event=")
|
||||
builder.WriteString(fe.Event)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("subscriber=")
|
||||
builder.WriteString(fmt.Sprintf("%v", fe.Subscriber))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("user_fsevent=")
|
||||
builder.WriteString(fmt.Sprintf("%v", fe.UserFsevent))
|
||||
builder.WriteByte(')')
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
// SetUser manually set the edge as loaded state.
|
||||
func (e *FsEvent) SetUser(v *User) {
|
||||
e.Edges.User = v
|
||||
e.Edges.loadedTypes[0] = true
|
||||
}
|
||||
|
||||
// FsEvents is a parsable slice of FsEvent.
|
||||
type FsEvents []*FsEvent
|
||||
|
|
@ -1,130 +0,0 @@
|
|||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package fsevent
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
)
|
||||
|
||||
const (
|
||||
// Label holds the string label denoting the fsevent type in the database.
|
||||
Label = "fs_event"
|
||||
// FieldID holds the string denoting the id field in the database.
|
||||
FieldID = "id"
|
||||
// FieldCreatedAt holds the string denoting the created_at field in the database.
|
||||
FieldCreatedAt = "created_at"
|
||||
// FieldUpdatedAt holds the string denoting the updated_at field in the database.
|
||||
FieldUpdatedAt = "updated_at"
|
||||
// FieldDeletedAt holds the string denoting the deleted_at field in the database.
|
||||
FieldDeletedAt = "deleted_at"
|
||||
// FieldEvent holds the string denoting the event field in the database.
|
||||
FieldEvent = "event"
|
||||
// FieldSubscriber holds the string denoting the subscriber field in the database.
|
||||
FieldSubscriber = "subscriber"
|
||||
// FieldUserFsevent holds the string denoting the user_fsevent field in the database.
|
||||
FieldUserFsevent = "user_fsevent"
|
||||
// EdgeUser holds the string denoting the user edge name in mutations.
|
||||
EdgeUser = "user"
|
||||
// Table holds the table name of the fsevent in the database.
|
||||
Table = "fs_events"
|
||||
// UserTable is the table that holds the user relation/edge.
|
||||
UserTable = "fs_events"
|
||||
// UserInverseTable is the table name for the User entity.
|
||||
// It exists in this package in order to avoid circular dependency with the "user" package.
|
||||
UserInverseTable = "users"
|
||||
// UserColumn is the table column denoting the user relation/edge.
|
||||
UserColumn = "user_fsevent"
|
||||
)
|
||||
|
||||
// Columns holds all SQL columns for fsevent fields.
|
||||
var Columns = []string{
|
||||
FieldID,
|
||||
FieldCreatedAt,
|
||||
FieldUpdatedAt,
|
||||
FieldDeletedAt,
|
||||
FieldEvent,
|
||||
FieldSubscriber,
|
||||
FieldUserFsevent,
|
||||
}
|
||||
|
||||
// ValidColumn reports if the column name is valid (part of the table columns).
|
||||
func ValidColumn(column string) bool {
|
||||
for i := range Columns {
|
||||
if column == Columns[i] {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Note that the variables below are initialized by the runtime
|
||||
// package on the initialization of the application. Therefore,
|
||||
// it should be imported in the main as follows:
|
||||
//
|
||||
// import _ "github.com/cloudreve/Cloudreve/v4/ent/runtime"
|
||||
var (
|
||||
Hooks [1]ent.Hook
|
||||
Interceptors [1]ent.Interceptor
|
||||
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
|
||||
DefaultCreatedAt func() time.Time
|
||||
// DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
|
||||
DefaultUpdatedAt func() time.Time
|
||||
// UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
|
||||
UpdateDefaultUpdatedAt func() time.Time
|
||||
)
|
||||
|
||||
// OrderOption defines the ordering options for the FsEvent queries.
|
||||
type OrderOption func(*sql.Selector)
|
||||
|
||||
// ByID orders the results by the id field.
|
||||
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldID, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByCreatedAt orders the results by the created_at field.
|
||||
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByUpdatedAt orders the results by the updated_at field.
|
||||
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByDeletedAt orders the results by the deleted_at field.
|
||||
func ByDeletedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldDeletedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByEvent orders the results by the event field.
|
||||
func ByEvent(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldEvent, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// BySubscriber orders the results by the subscriber field.
|
||||
func BySubscriber(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldSubscriber, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByUserFsevent orders the results by the user_fsevent field.
|
||||
func ByUserFsevent(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldUserFsevent, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByUserField orders the results by user field.
|
||||
func ByUserField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newUserStep(), sql.OrderByField(field, opts...))
|
||||
}
|
||||
}
|
||||
func newUserStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(UserInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn),
|
||||
)
|
||||
}
|
||||
|
|
@ -1,390 +0,0 @@
|
|||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package fsevent
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
|
||||
"github.com/gofrs/uuid"
|
||||
)
|
||||
|
||||
// ID filters vertices based on their ID field.
|
||||
func ID(id int) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDEQ applies the EQ predicate on the ID field.
|
||||
func IDEQ(id int) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDNEQ applies the NEQ predicate on the ID field.
|
||||
func IDNEQ(id int) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldNEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDIn applies the In predicate on the ID field.
|
||||
func IDIn(ids ...int) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldIn(FieldID, ids...))
|
||||
}
|
||||
|
||||
// IDNotIn applies the NotIn predicate on the ID field.
|
||||
func IDNotIn(ids ...int) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldNotIn(FieldID, ids...))
|
||||
}
|
||||
|
||||
// IDGT applies the GT predicate on the ID field.
|
||||
func IDGT(id int) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldGT(FieldID, id))
|
||||
}
|
||||
|
||||
// IDGTE applies the GTE predicate on the ID field.
|
||||
func IDGTE(id int) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldGTE(FieldID, id))
|
||||
}
|
||||
|
||||
// IDLT applies the LT predicate on the ID field.
|
||||
func IDLT(id int) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldLT(FieldID, id))
|
||||
}
|
||||
|
||||
// IDLTE applies the LTE predicate on the ID field.
|
||||
func IDLTE(id int) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldLTE(FieldID, id))
|
||||
}
|
||||
|
||||
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
|
||||
func CreatedAt(v time.Time) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldEQ(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
|
||||
func UpdatedAt(v time.Time) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldEQ(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// DeletedAt applies equality check predicate on the "deleted_at" field. It's identical to DeletedAtEQ.
|
||||
func DeletedAt(v time.Time) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldEQ(FieldDeletedAt, v))
|
||||
}
|
||||
|
||||
// Event applies equality check predicate on the "event" field. It's identical to EventEQ.
|
||||
func Event(v string) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldEQ(FieldEvent, v))
|
||||
}
|
||||
|
||||
// Subscriber applies equality check predicate on the "subscriber" field. It's identical to SubscriberEQ.
|
||||
func Subscriber(v uuid.UUID) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldEQ(FieldSubscriber, v))
|
||||
}
|
||||
|
||||
// UserFsevent applies equality check predicate on the "user_fsevent" field. It's identical to UserFseventEQ.
|
||||
func UserFsevent(v int) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldEQ(FieldUserFsevent, v))
|
||||
}
|
||||
|
||||
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
|
||||
func CreatedAtEQ(v time.Time) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldEQ(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
|
||||
func CreatedAtNEQ(v time.Time) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldNEQ(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtIn applies the In predicate on the "created_at" field.
|
||||
func CreatedAtIn(vs ...time.Time) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldIn(FieldCreatedAt, vs...))
|
||||
}
|
||||
|
||||
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
|
||||
func CreatedAtNotIn(vs ...time.Time) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldNotIn(FieldCreatedAt, vs...))
|
||||
}
|
||||
|
||||
// CreatedAtGT applies the GT predicate on the "created_at" field.
|
||||
func CreatedAtGT(v time.Time) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldGT(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
|
||||
func CreatedAtGTE(v time.Time) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldGTE(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtLT applies the LT predicate on the "created_at" field.
|
||||
func CreatedAtLT(v time.Time) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldLT(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
|
||||
func CreatedAtLTE(v time.Time) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldLTE(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
|
||||
func UpdatedAtEQ(v time.Time) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldEQ(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
|
||||
func UpdatedAtNEQ(v time.Time) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldNEQ(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtIn applies the In predicate on the "updated_at" field.
|
||||
func UpdatedAtIn(vs ...time.Time) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldIn(FieldUpdatedAt, vs...))
|
||||
}
|
||||
|
||||
// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
|
||||
func UpdatedAtNotIn(vs ...time.Time) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldNotIn(FieldUpdatedAt, vs...))
|
||||
}
|
||||
|
||||
// UpdatedAtGT applies the GT predicate on the "updated_at" field.
|
||||
func UpdatedAtGT(v time.Time) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldGT(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
|
||||
func UpdatedAtGTE(v time.Time) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldGTE(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtLT applies the LT predicate on the "updated_at" field.
|
||||
func UpdatedAtLT(v time.Time) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldLT(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
|
||||
func UpdatedAtLTE(v time.Time) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldLTE(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// DeletedAtEQ applies the EQ predicate on the "deleted_at" field.
|
||||
func DeletedAtEQ(v time.Time) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldEQ(FieldDeletedAt, v))
|
||||
}
|
||||
|
||||
// DeletedAtNEQ applies the NEQ predicate on the "deleted_at" field.
|
||||
func DeletedAtNEQ(v time.Time) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldNEQ(FieldDeletedAt, v))
|
||||
}
|
||||
|
||||
// DeletedAtIn applies the In predicate on the "deleted_at" field.
|
||||
func DeletedAtIn(vs ...time.Time) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldIn(FieldDeletedAt, vs...))
|
||||
}
|
||||
|
||||
// DeletedAtNotIn applies the NotIn predicate on the "deleted_at" field.
|
||||
func DeletedAtNotIn(vs ...time.Time) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldNotIn(FieldDeletedAt, vs...))
|
||||
}
|
||||
|
||||
// DeletedAtGT applies the GT predicate on the "deleted_at" field.
|
||||
func DeletedAtGT(v time.Time) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldGT(FieldDeletedAt, v))
|
||||
}
|
||||
|
||||
// DeletedAtGTE applies the GTE predicate on the "deleted_at" field.
|
||||
func DeletedAtGTE(v time.Time) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldGTE(FieldDeletedAt, v))
|
||||
}
|
||||
|
||||
// DeletedAtLT applies the LT predicate on the "deleted_at" field.
|
||||
func DeletedAtLT(v time.Time) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldLT(FieldDeletedAt, v))
|
||||
}
|
||||
|
||||
// DeletedAtLTE applies the LTE predicate on the "deleted_at" field.
|
||||
func DeletedAtLTE(v time.Time) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldLTE(FieldDeletedAt, v))
|
||||
}
|
||||
|
||||
// DeletedAtIsNil applies the IsNil predicate on the "deleted_at" field.
|
||||
func DeletedAtIsNil() predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldIsNull(FieldDeletedAt))
|
||||
}
|
||||
|
||||
// DeletedAtNotNil applies the NotNil predicate on the "deleted_at" field.
|
||||
func DeletedAtNotNil() predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldNotNull(FieldDeletedAt))
|
||||
}
|
||||
|
||||
// EventEQ applies the EQ predicate on the "event" field.
|
||||
func EventEQ(v string) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldEQ(FieldEvent, v))
|
||||
}
|
||||
|
||||
// EventNEQ applies the NEQ predicate on the "event" field.
|
||||
func EventNEQ(v string) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldNEQ(FieldEvent, v))
|
||||
}
|
||||
|
||||
// EventIn applies the In predicate on the "event" field.
|
||||
func EventIn(vs ...string) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldIn(FieldEvent, vs...))
|
||||
}
|
||||
|
||||
// EventNotIn applies the NotIn predicate on the "event" field.
|
||||
func EventNotIn(vs ...string) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldNotIn(FieldEvent, vs...))
|
||||
}
|
||||
|
||||
// EventGT applies the GT predicate on the "event" field.
|
||||
func EventGT(v string) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldGT(FieldEvent, v))
|
||||
}
|
||||
|
||||
// EventGTE applies the GTE predicate on the "event" field.
|
||||
func EventGTE(v string) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldGTE(FieldEvent, v))
|
||||
}
|
||||
|
||||
// EventLT applies the LT predicate on the "event" field.
|
||||
func EventLT(v string) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldLT(FieldEvent, v))
|
||||
}
|
||||
|
||||
// EventLTE applies the LTE predicate on the "event" field.
|
||||
func EventLTE(v string) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldLTE(FieldEvent, v))
|
||||
}
|
||||
|
||||
// EventContains applies the Contains predicate on the "event" field.
|
||||
func EventContains(v string) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldContains(FieldEvent, v))
|
||||
}
|
||||
|
||||
// EventHasPrefix applies the HasPrefix predicate on the "event" field.
|
||||
func EventHasPrefix(v string) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldHasPrefix(FieldEvent, v))
|
||||
}
|
||||
|
||||
// EventHasSuffix applies the HasSuffix predicate on the "event" field.
|
||||
func EventHasSuffix(v string) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldHasSuffix(FieldEvent, v))
|
||||
}
|
||||
|
||||
// EventEqualFold applies the EqualFold predicate on the "event" field.
|
||||
func EventEqualFold(v string) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldEqualFold(FieldEvent, v))
|
||||
}
|
||||
|
||||
// EventContainsFold applies the ContainsFold predicate on the "event" field.
|
||||
func EventContainsFold(v string) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldContainsFold(FieldEvent, v))
|
||||
}
|
||||
|
||||
// SubscriberEQ applies the EQ predicate on the "subscriber" field.
|
||||
func SubscriberEQ(v uuid.UUID) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldEQ(FieldSubscriber, v))
|
||||
}
|
||||
|
||||
// SubscriberNEQ applies the NEQ predicate on the "subscriber" field.
|
||||
func SubscriberNEQ(v uuid.UUID) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldNEQ(FieldSubscriber, v))
|
||||
}
|
||||
|
||||
// SubscriberIn applies the In predicate on the "subscriber" field.
|
||||
func SubscriberIn(vs ...uuid.UUID) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldIn(FieldSubscriber, vs...))
|
||||
}
|
||||
|
||||
// SubscriberNotIn applies the NotIn predicate on the "subscriber" field.
|
||||
func SubscriberNotIn(vs ...uuid.UUID) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldNotIn(FieldSubscriber, vs...))
|
||||
}
|
||||
|
||||
// SubscriberGT applies the GT predicate on the "subscriber" field.
|
||||
func SubscriberGT(v uuid.UUID) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldGT(FieldSubscriber, v))
|
||||
}
|
||||
|
||||
// SubscriberGTE applies the GTE predicate on the "subscriber" field.
|
||||
func SubscriberGTE(v uuid.UUID) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldGTE(FieldSubscriber, v))
|
||||
}
|
||||
|
||||
// SubscriberLT applies the LT predicate on the "subscriber" field.
|
||||
func SubscriberLT(v uuid.UUID) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldLT(FieldSubscriber, v))
|
||||
}
|
||||
|
||||
// SubscriberLTE applies the LTE predicate on the "subscriber" field.
|
||||
func SubscriberLTE(v uuid.UUID) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldLTE(FieldSubscriber, v))
|
||||
}
|
||||
|
||||
// UserFseventEQ applies the EQ predicate on the "user_fsevent" field.
|
||||
func UserFseventEQ(v int) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldEQ(FieldUserFsevent, v))
|
||||
}
|
||||
|
||||
// UserFseventNEQ applies the NEQ predicate on the "user_fsevent" field.
|
||||
func UserFseventNEQ(v int) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldNEQ(FieldUserFsevent, v))
|
||||
}
|
||||
|
||||
// UserFseventIn applies the In predicate on the "user_fsevent" field.
|
||||
func UserFseventIn(vs ...int) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldIn(FieldUserFsevent, vs...))
|
||||
}
|
||||
|
||||
// UserFseventNotIn applies the NotIn predicate on the "user_fsevent" field.
|
||||
func UserFseventNotIn(vs ...int) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldNotIn(FieldUserFsevent, vs...))
|
||||
}
|
||||
|
||||
// UserFseventIsNil applies the IsNil predicate on the "user_fsevent" field.
|
||||
func UserFseventIsNil() predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldIsNull(FieldUserFsevent))
|
||||
}
|
||||
|
||||
// UserFseventNotNil applies the NotNil predicate on the "user_fsevent" field.
|
||||
func UserFseventNotNil() predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.FieldNotNull(FieldUserFsevent))
|
||||
}
|
||||
|
||||
// HasUser applies the HasEdge predicate on the "user" edge.
|
||||
func HasUser() predicate.FsEvent {
|
||||
return predicate.FsEvent(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn),
|
||||
)
|
||||
sqlgraph.HasNeighbors(s, step)
|
||||
})
|
||||
}
|
||||
|
||||
// HasUserWith applies the HasEdge predicate on the "user" edge with a given conditions (other predicates).
|
||||
func HasUserWith(preds ...predicate.User) predicate.FsEvent {
|
||||
return predicate.FsEvent(func(s *sql.Selector) {
|
||||
step := newUserStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// And groups predicates with the AND operator between them.
|
||||
func And(predicates ...predicate.FsEvent) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.AndPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Or groups predicates with the OR operator between them.
|
||||
func Or(predicates ...predicate.FsEvent) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.OrPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Not applies the not operator on the given predicate.
|
||||
func Not(p predicate.FsEvent) predicate.FsEvent {
|
||||
return predicate.FsEvent(sql.NotPredicates(p))
|
||||
}
|
||||
|
|
@ -1,827 +0,0 @@
|
|||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/fsevent"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/user"
|
||||
"github.com/gofrs/uuid"
|
||||
)
|
||||
|
||||
// FsEventCreate is the builder for creating a FsEvent entity.
|
||||
type FsEventCreate struct {
|
||||
config
|
||||
mutation *FsEventMutation
|
||||
hooks []Hook
|
||||
conflict []sql.ConflictOption
|
||||
}
|
||||
|
||||
// SetCreatedAt sets the "created_at" field.
|
||||
func (fec *FsEventCreate) SetCreatedAt(t time.Time) *FsEventCreate {
|
||||
fec.mutation.SetCreatedAt(t)
|
||||
return fec
|
||||
}
|
||||
|
||||
// SetNillableCreatedAt sets the "created_at" field if the given value is not nil.
|
||||
func (fec *FsEventCreate) SetNillableCreatedAt(t *time.Time) *FsEventCreate {
|
||||
if t != nil {
|
||||
fec.SetCreatedAt(*t)
|
||||
}
|
||||
return fec
|
||||
}
|
||||
|
||||
// SetUpdatedAt sets the "updated_at" field.
|
||||
func (fec *FsEventCreate) SetUpdatedAt(t time.Time) *FsEventCreate {
|
||||
fec.mutation.SetUpdatedAt(t)
|
||||
return fec
|
||||
}
|
||||
|
||||
// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil.
|
||||
func (fec *FsEventCreate) SetNillableUpdatedAt(t *time.Time) *FsEventCreate {
|
||||
if t != nil {
|
||||
fec.SetUpdatedAt(*t)
|
||||
}
|
||||
return fec
|
||||
}
|
||||
|
||||
// SetDeletedAt sets the "deleted_at" field.
|
||||
func (fec *FsEventCreate) SetDeletedAt(t time.Time) *FsEventCreate {
|
||||
fec.mutation.SetDeletedAt(t)
|
||||
return fec
|
||||
}
|
||||
|
||||
// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil.
|
||||
func (fec *FsEventCreate) SetNillableDeletedAt(t *time.Time) *FsEventCreate {
|
||||
if t != nil {
|
||||
fec.SetDeletedAt(*t)
|
||||
}
|
||||
return fec
|
||||
}
|
||||
|
||||
// SetEvent sets the "event" field.
|
||||
func (fec *FsEventCreate) SetEvent(s string) *FsEventCreate {
|
||||
fec.mutation.SetEvent(s)
|
||||
return fec
|
||||
}
|
||||
|
||||
// SetSubscriber sets the "subscriber" field.
|
||||
func (fec *FsEventCreate) SetSubscriber(u uuid.UUID) *FsEventCreate {
|
||||
fec.mutation.SetSubscriber(u)
|
||||
return fec
|
||||
}
|
||||
|
||||
// SetUserFsevent sets the "user_fsevent" field.
|
||||
func (fec *FsEventCreate) SetUserFsevent(i int) *FsEventCreate {
|
||||
fec.mutation.SetUserFsevent(i)
|
||||
return fec
|
||||
}
|
||||
|
||||
// SetNillableUserFsevent sets the "user_fsevent" field if the given value is not nil.
|
||||
func (fec *FsEventCreate) SetNillableUserFsevent(i *int) *FsEventCreate {
|
||||
if i != nil {
|
||||
fec.SetUserFsevent(*i)
|
||||
}
|
||||
return fec
|
||||
}
|
||||
|
||||
// SetUserID sets the "user" edge to the User entity by ID.
|
||||
func (fec *FsEventCreate) SetUserID(id int) *FsEventCreate {
|
||||
fec.mutation.SetUserID(id)
|
||||
return fec
|
||||
}
|
||||
|
||||
// SetNillableUserID sets the "user" edge to the User entity by ID if the given value is not nil.
|
||||
func (fec *FsEventCreate) SetNillableUserID(id *int) *FsEventCreate {
|
||||
if id != nil {
|
||||
fec = fec.SetUserID(*id)
|
||||
}
|
||||
return fec
|
||||
}
|
||||
|
||||
// SetUser sets the "user" edge to the User entity.
|
||||
func (fec *FsEventCreate) SetUser(u *User) *FsEventCreate {
|
||||
return fec.SetUserID(u.ID)
|
||||
}
|
||||
|
||||
// Mutation returns the FsEventMutation object of the builder.
|
||||
func (fec *FsEventCreate) Mutation() *FsEventMutation {
|
||||
return fec.mutation
|
||||
}
|
||||
|
||||
// Save creates the FsEvent in the database.
|
||||
func (fec *FsEventCreate) Save(ctx context.Context) (*FsEvent, error) {
|
||||
if err := fec.defaults(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return withHooks(ctx, fec.sqlSave, fec.mutation, fec.hooks)
|
||||
}
|
||||
|
||||
// SaveX calls Save and panics if Save returns an error.
|
||||
func (fec *FsEventCreate) SaveX(ctx context.Context) *FsEvent {
|
||||
v, err := fec.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (fec *FsEventCreate) Exec(ctx context.Context) error {
|
||||
_, err := fec.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (fec *FsEventCreate) ExecX(ctx context.Context) {
|
||||
if err := fec.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// defaults sets the default values of the builder before save.
|
||||
func (fec *FsEventCreate) defaults() error {
|
||||
if _, ok := fec.mutation.CreatedAt(); !ok {
|
||||
if fsevent.DefaultCreatedAt == nil {
|
||||
return fmt.Errorf("ent: uninitialized fsevent.DefaultCreatedAt (forgotten import ent/runtime?)")
|
||||
}
|
||||
v := fsevent.DefaultCreatedAt()
|
||||
fec.mutation.SetCreatedAt(v)
|
||||
}
|
||||
if _, ok := fec.mutation.UpdatedAt(); !ok {
|
||||
if fsevent.DefaultUpdatedAt == nil {
|
||||
return fmt.Errorf("ent: uninitialized fsevent.DefaultUpdatedAt (forgotten import ent/runtime?)")
|
||||
}
|
||||
v := fsevent.DefaultUpdatedAt()
|
||||
fec.mutation.SetUpdatedAt(v)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// check runs all checks and user-defined validators on the builder.
|
||||
func (fec *FsEventCreate) check() error {
|
||||
if _, ok := fec.mutation.CreatedAt(); !ok {
|
||||
return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "FsEvent.created_at"`)}
|
||||
}
|
||||
if _, ok := fec.mutation.UpdatedAt(); !ok {
|
||||
return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "FsEvent.updated_at"`)}
|
||||
}
|
||||
if _, ok := fec.mutation.Event(); !ok {
|
||||
return &ValidationError{Name: "event", err: errors.New(`ent: missing required field "FsEvent.event"`)}
|
||||
}
|
||||
if _, ok := fec.mutation.Subscriber(); !ok {
|
||||
return &ValidationError{Name: "subscriber", err: errors.New(`ent: missing required field "FsEvent.subscriber"`)}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fec *FsEventCreate) sqlSave(ctx context.Context) (*FsEvent, error) {
|
||||
if err := fec.check(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_node, _spec := fec.createSpec()
|
||||
if err := sqlgraph.CreateNode(ctx, fec.driver, _spec); err != nil {
|
||||
if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
id := _spec.ID.Value.(int64)
|
||||
_node.ID = int(id)
|
||||
fec.mutation.id = &_node.ID
|
||||
fec.mutation.done = true
|
||||
return _node, nil
|
||||
}
|
||||
|
||||
func (fec *FsEventCreate) createSpec() (*FsEvent, *sqlgraph.CreateSpec) {
|
||||
var (
|
||||
_node = &FsEvent{config: fec.config}
|
||||
_spec = sqlgraph.NewCreateSpec(fsevent.Table, sqlgraph.NewFieldSpec(fsevent.FieldID, field.TypeInt))
|
||||
)
|
||||
|
||||
if id, ok := fec.mutation.ID(); ok {
|
||||
_node.ID = id
|
||||
id64 := int64(id)
|
||||
_spec.ID.Value = id64
|
||||
}
|
||||
|
||||
_spec.OnConflict = fec.conflict
|
||||
if value, ok := fec.mutation.CreatedAt(); ok {
|
||||
_spec.SetField(fsevent.FieldCreatedAt, field.TypeTime, value)
|
||||
_node.CreatedAt = value
|
||||
}
|
||||
if value, ok := fec.mutation.UpdatedAt(); ok {
|
||||
_spec.SetField(fsevent.FieldUpdatedAt, field.TypeTime, value)
|
||||
_node.UpdatedAt = value
|
||||
}
|
||||
if value, ok := fec.mutation.DeletedAt(); ok {
|
||||
_spec.SetField(fsevent.FieldDeletedAt, field.TypeTime, value)
|
||||
_node.DeletedAt = &value
|
||||
}
|
||||
if value, ok := fec.mutation.Event(); ok {
|
||||
_spec.SetField(fsevent.FieldEvent, field.TypeString, value)
|
||||
_node.Event = value
|
||||
}
|
||||
if value, ok := fec.mutation.Subscriber(); ok {
|
||||
_spec.SetField(fsevent.FieldSubscriber, field.TypeUUID, value)
|
||||
_node.Subscriber = value
|
||||
}
|
||||
if nodes := fec.mutation.UserIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: fsevent.UserTable,
|
||||
Columns: []string{fsevent.UserColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_node.UserFsevent = nodes[0]
|
||||
_spec.Edges = append(_spec.Edges, edge)
|
||||
}
|
||||
return _node, _spec
|
||||
}
|
||||
|
||||
// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause
|
||||
// of the `INSERT` statement. For example:
|
||||
//
|
||||
// client.FsEvent.Create().
|
||||
// SetCreatedAt(v).
|
||||
// OnConflict(
|
||||
// // Update the row with the new values
|
||||
// // the was proposed for insertion.
|
||||
// sql.ResolveWithNewValues(),
|
||||
// ).
|
||||
// // Override some of the fields with custom
|
||||
// // update values.
|
||||
// Update(func(u *ent.FsEventUpsert) {
|
||||
// SetCreatedAt(v+v).
|
||||
// }).
|
||||
// Exec(ctx)
|
||||
func (fec *FsEventCreate) OnConflict(opts ...sql.ConflictOption) *FsEventUpsertOne {
|
||||
fec.conflict = opts
|
||||
return &FsEventUpsertOne{
|
||||
create: fec,
|
||||
}
|
||||
}
|
||||
|
||||
// OnConflictColumns calls `OnConflict` and configures the columns
|
||||
// as conflict target. Using this option is equivalent to using:
|
||||
//
|
||||
// client.FsEvent.Create().
|
||||
// OnConflict(sql.ConflictColumns(columns...)).
|
||||
// Exec(ctx)
|
||||
func (fec *FsEventCreate) OnConflictColumns(columns ...string) *FsEventUpsertOne {
|
||||
fec.conflict = append(fec.conflict, sql.ConflictColumns(columns...))
|
||||
return &FsEventUpsertOne{
|
||||
create: fec,
|
||||
}
|
||||
}
|
||||
|
||||
type (
|
||||
// FsEventUpsertOne is the builder for "upsert"-ing
|
||||
// one FsEvent node.
|
||||
FsEventUpsertOne struct {
|
||||
create *FsEventCreate
|
||||
}
|
||||
|
||||
// FsEventUpsert is the "OnConflict" setter.
|
||||
FsEventUpsert struct {
|
||||
*sql.UpdateSet
|
||||
}
|
||||
)
|
||||
|
||||
// SetUpdatedAt sets the "updated_at" field.
|
||||
func (u *FsEventUpsert) SetUpdatedAt(v time.Time) *FsEventUpsert {
|
||||
u.Set(fsevent.FieldUpdatedAt, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create.
|
||||
func (u *FsEventUpsert) UpdateUpdatedAt() *FsEventUpsert {
|
||||
u.SetExcluded(fsevent.FieldUpdatedAt)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetDeletedAt sets the "deleted_at" field.
|
||||
func (u *FsEventUpsert) SetDeletedAt(v time.Time) *FsEventUpsert {
|
||||
u.Set(fsevent.FieldDeletedAt, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create.
|
||||
func (u *FsEventUpsert) UpdateDeletedAt() *FsEventUpsert {
|
||||
u.SetExcluded(fsevent.FieldDeletedAt)
|
||||
return u
|
||||
}
|
||||
|
||||
// ClearDeletedAt clears the value of the "deleted_at" field.
|
||||
func (u *FsEventUpsert) ClearDeletedAt() *FsEventUpsert {
|
||||
u.SetNull(fsevent.FieldDeletedAt)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetEvent sets the "event" field.
|
||||
func (u *FsEventUpsert) SetEvent(v string) *FsEventUpsert {
|
||||
u.Set(fsevent.FieldEvent, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateEvent sets the "event" field to the value that was provided on create.
|
||||
func (u *FsEventUpsert) UpdateEvent() *FsEventUpsert {
|
||||
u.SetExcluded(fsevent.FieldEvent)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetSubscriber sets the "subscriber" field.
|
||||
func (u *FsEventUpsert) SetSubscriber(v uuid.UUID) *FsEventUpsert {
|
||||
u.Set(fsevent.FieldSubscriber, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateSubscriber sets the "subscriber" field to the value that was provided on create.
|
||||
func (u *FsEventUpsert) UpdateSubscriber() *FsEventUpsert {
|
||||
u.SetExcluded(fsevent.FieldSubscriber)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetUserFsevent sets the "user_fsevent" field.
|
||||
func (u *FsEventUpsert) SetUserFsevent(v int) *FsEventUpsert {
|
||||
u.Set(fsevent.FieldUserFsevent, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateUserFsevent sets the "user_fsevent" field to the value that was provided on create.
|
||||
func (u *FsEventUpsert) UpdateUserFsevent() *FsEventUpsert {
|
||||
u.SetExcluded(fsevent.FieldUserFsevent)
|
||||
return u
|
||||
}
|
||||
|
||||
// ClearUserFsevent clears the value of the "user_fsevent" field.
|
||||
func (u *FsEventUpsert) ClearUserFsevent() *FsEventUpsert {
|
||||
u.SetNull(fsevent.FieldUserFsevent)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateNewValues updates the mutable fields using the new values that were set on create.
|
||||
// Using this option is equivalent to using:
|
||||
//
|
||||
// client.FsEvent.Create().
|
||||
// OnConflict(
|
||||
// sql.ResolveWithNewValues(),
|
||||
// ).
|
||||
// Exec(ctx)
|
||||
func (u *FsEventUpsertOne) UpdateNewValues() *FsEventUpsertOne {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues())
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) {
|
||||
if _, exists := u.create.mutation.CreatedAt(); exists {
|
||||
s.SetIgnore(fsevent.FieldCreatedAt)
|
||||
}
|
||||
}))
|
||||
return u
|
||||
}
|
||||
|
||||
// Ignore sets each column to itself in case of conflict.
|
||||
// Using this option is equivalent to using:
|
||||
//
|
||||
// client.FsEvent.Create().
|
||||
// OnConflict(sql.ResolveWithIgnore()).
|
||||
// Exec(ctx)
|
||||
func (u *FsEventUpsertOne) Ignore() *FsEventUpsertOne {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore())
|
||||
return u
|
||||
}
|
||||
|
||||
// DoNothing configures the conflict_action to `DO NOTHING`.
|
||||
// Supported only by SQLite and PostgreSQL.
|
||||
func (u *FsEventUpsertOne) DoNothing() *FsEventUpsertOne {
|
||||
u.create.conflict = append(u.create.conflict, sql.DoNothing())
|
||||
return u
|
||||
}
|
||||
|
||||
// Update allows overriding fields `UPDATE` values. See the FsEventCreate.OnConflict
|
||||
// documentation for more info.
|
||||
func (u *FsEventUpsertOne) Update(set func(*FsEventUpsert)) *FsEventUpsertOne {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) {
|
||||
set(&FsEventUpsert{UpdateSet: update})
|
||||
}))
|
||||
return u
|
||||
}
|
||||
|
||||
// SetUpdatedAt sets the "updated_at" field.
|
||||
func (u *FsEventUpsertOne) SetUpdatedAt(v time.Time) *FsEventUpsertOne {
|
||||
return u.Update(func(s *FsEventUpsert) {
|
||||
s.SetUpdatedAt(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create.
|
||||
func (u *FsEventUpsertOne) UpdateUpdatedAt() *FsEventUpsertOne {
|
||||
return u.Update(func(s *FsEventUpsert) {
|
||||
s.UpdateUpdatedAt()
|
||||
})
|
||||
}
|
||||
|
||||
// SetDeletedAt sets the "deleted_at" field.
|
||||
func (u *FsEventUpsertOne) SetDeletedAt(v time.Time) *FsEventUpsertOne {
|
||||
return u.Update(func(s *FsEventUpsert) {
|
||||
s.SetDeletedAt(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create.
|
||||
func (u *FsEventUpsertOne) UpdateDeletedAt() *FsEventUpsertOne {
|
||||
return u.Update(func(s *FsEventUpsert) {
|
||||
s.UpdateDeletedAt()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearDeletedAt clears the value of the "deleted_at" field.
|
||||
func (u *FsEventUpsertOne) ClearDeletedAt() *FsEventUpsertOne {
|
||||
return u.Update(func(s *FsEventUpsert) {
|
||||
s.ClearDeletedAt()
|
||||
})
|
||||
}
|
||||
|
||||
// SetEvent sets the "event" field.
|
||||
func (u *FsEventUpsertOne) SetEvent(v string) *FsEventUpsertOne {
|
||||
return u.Update(func(s *FsEventUpsert) {
|
||||
s.SetEvent(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateEvent sets the "event" field to the value that was provided on create.
|
||||
func (u *FsEventUpsertOne) UpdateEvent() *FsEventUpsertOne {
|
||||
return u.Update(func(s *FsEventUpsert) {
|
||||
s.UpdateEvent()
|
||||
})
|
||||
}
|
||||
|
||||
// SetSubscriber sets the "subscriber" field.
|
||||
func (u *FsEventUpsertOne) SetSubscriber(v uuid.UUID) *FsEventUpsertOne {
|
||||
return u.Update(func(s *FsEventUpsert) {
|
||||
s.SetSubscriber(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateSubscriber sets the "subscriber" field to the value that was provided on create.
|
||||
func (u *FsEventUpsertOne) UpdateSubscriber() *FsEventUpsertOne {
|
||||
return u.Update(func(s *FsEventUpsert) {
|
||||
s.UpdateSubscriber()
|
||||
})
|
||||
}
|
||||
|
||||
// SetUserFsevent sets the "user_fsevent" field.
|
||||
func (u *FsEventUpsertOne) SetUserFsevent(v int) *FsEventUpsertOne {
|
||||
return u.Update(func(s *FsEventUpsert) {
|
||||
s.SetUserFsevent(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateUserFsevent sets the "user_fsevent" field to the value that was provided on create.
|
||||
func (u *FsEventUpsertOne) UpdateUserFsevent() *FsEventUpsertOne {
|
||||
return u.Update(func(s *FsEventUpsert) {
|
||||
s.UpdateUserFsevent()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearUserFsevent clears the value of the "user_fsevent" field.
|
||||
func (u *FsEventUpsertOne) ClearUserFsevent() *FsEventUpsertOne {
|
||||
return u.Update(func(s *FsEventUpsert) {
|
||||
s.ClearUserFsevent()
|
||||
})
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (u *FsEventUpsertOne) Exec(ctx context.Context) error {
|
||||
if len(u.create.conflict) == 0 {
|
||||
return errors.New("ent: missing options for FsEventCreate.OnConflict")
|
||||
}
|
||||
return u.create.Exec(ctx)
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (u *FsEventUpsertOne) ExecX(ctx context.Context) {
|
||||
if err := u.create.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Exec executes the UPSERT query and returns the inserted/updated ID.
|
||||
func (u *FsEventUpsertOne) ID(ctx context.Context) (id int, err error) {
|
||||
node, err := u.create.Save(ctx)
|
||||
if err != nil {
|
||||
return id, err
|
||||
}
|
||||
return node.ID, nil
|
||||
}
|
||||
|
||||
// IDX is like ID, but panics if an error occurs.
|
||||
func (u *FsEventUpsertOne) IDX(ctx context.Context) int {
|
||||
id, err := u.ID(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
func (m *FsEventCreate) SetRawID(t int) *FsEventCreate {
|
||||
m.mutation.SetRawID(t)
|
||||
return m
|
||||
}
|
||||
|
||||
// FsEventCreateBulk is the builder for creating many FsEvent entities in bulk.
|
||||
type FsEventCreateBulk struct {
|
||||
config
|
||||
err error
|
||||
builders []*FsEventCreate
|
||||
conflict []sql.ConflictOption
|
||||
}
|
||||
|
||||
// Save creates the FsEvent entities in the database.
|
||||
func (fecb *FsEventCreateBulk) Save(ctx context.Context) ([]*FsEvent, error) {
|
||||
if fecb.err != nil {
|
||||
return nil, fecb.err
|
||||
}
|
||||
specs := make([]*sqlgraph.CreateSpec, len(fecb.builders))
|
||||
nodes := make([]*FsEvent, len(fecb.builders))
|
||||
mutators := make([]Mutator, len(fecb.builders))
|
||||
for i := range fecb.builders {
|
||||
func(i int, root context.Context) {
|
||||
builder := fecb.builders[i]
|
||||
builder.defaults()
|
||||
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
|
||||
mutation, ok := m.(*FsEventMutation)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected mutation type %T", m)
|
||||
}
|
||||
if err := builder.check(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
builder.mutation = mutation
|
||||
var err error
|
||||
nodes[i], specs[i] = builder.createSpec()
|
||||
if i < len(mutators)-1 {
|
||||
_, err = mutators[i+1].Mutate(root, fecb.builders[i+1].mutation)
|
||||
} else {
|
||||
spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
|
||||
spec.OnConflict = fecb.conflict
|
||||
// Invoke the actual operation on the latest mutation in the chain.
|
||||
if err = sqlgraph.BatchCreate(ctx, fecb.driver, spec); err != nil {
|
||||
if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mutation.id = &nodes[i].ID
|
||||
if specs[i].ID.Value != nil {
|
||||
id := specs[i].ID.Value.(int64)
|
||||
nodes[i].ID = int(id)
|
||||
}
|
||||
mutation.done = true
|
||||
return nodes[i], nil
|
||||
})
|
||||
for i := len(builder.hooks) - 1; i >= 0; i-- {
|
||||
mut = builder.hooks[i](mut)
|
||||
}
|
||||
mutators[i] = mut
|
||||
}(i, ctx)
|
||||
}
|
||||
if len(mutators) > 0 {
|
||||
if _, err := mutators[0].Mutate(ctx, fecb.builders[0].mutation); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
func (fecb *FsEventCreateBulk) SaveX(ctx context.Context) []*FsEvent {
|
||||
v, err := fecb.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (fecb *FsEventCreateBulk) Exec(ctx context.Context) error {
|
||||
_, err := fecb.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (fecb *FsEventCreateBulk) ExecX(ctx context.Context) {
|
||||
if err := fecb.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause
|
||||
// of the `INSERT` statement. For example:
|
||||
//
|
||||
// client.FsEvent.CreateBulk(builders...).
|
||||
// OnConflict(
|
||||
// // Update the row with the new values
|
||||
// // the was proposed for insertion.
|
||||
// sql.ResolveWithNewValues(),
|
||||
// ).
|
||||
// // Override some of the fields with custom
|
||||
// // update values.
|
||||
// Update(func(u *ent.FsEventUpsert) {
|
||||
// SetCreatedAt(v+v).
|
||||
// }).
|
||||
// Exec(ctx)
|
||||
func (fecb *FsEventCreateBulk) OnConflict(opts ...sql.ConflictOption) *FsEventUpsertBulk {
|
||||
fecb.conflict = opts
|
||||
return &FsEventUpsertBulk{
|
||||
create: fecb,
|
||||
}
|
||||
}
|
||||
|
||||
// OnConflictColumns calls `OnConflict` and configures the columns
|
||||
// as conflict target. Using this option is equivalent to using:
|
||||
//
|
||||
// client.FsEvent.Create().
|
||||
// OnConflict(sql.ConflictColumns(columns...)).
|
||||
// Exec(ctx)
|
||||
func (fecb *FsEventCreateBulk) OnConflictColumns(columns ...string) *FsEventUpsertBulk {
|
||||
fecb.conflict = append(fecb.conflict, sql.ConflictColumns(columns...))
|
||||
return &FsEventUpsertBulk{
|
||||
create: fecb,
|
||||
}
|
||||
}
|
||||
|
||||
// FsEventUpsertBulk is the builder for "upsert"-ing
|
||||
// a bulk of FsEvent nodes.
|
||||
type FsEventUpsertBulk struct {
|
||||
create *FsEventCreateBulk
|
||||
}
|
||||
|
||||
// UpdateNewValues updates the mutable fields using the new values that
|
||||
// were set on create. Using this option is equivalent to using:
|
||||
//
|
||||
// client.FsEvent.Create().
|
||||
// OnConflict(
|
||||
// sql.ResolveWithNewValues(),
|
||||
// ).
|
||||
// Exec(ctx)
|
||||
func (u *FsEventUpsertBulk) UpdateNewValues() *FsEventUpsertBulk {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues())
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) {
|
||||
for _, b := range u.create.builders {
|
||||
if _, exists := b.mutation.CreatedAt(); exists {
|
||||
s.SetIgnore(fsevent.FieldCreatedAt)
|
||||
}
|
||||
}
|
||||
}))
|
||||
return u
|
||||
}
|
||||
|
||||
// Ignore sets each column to itself in case of conflict.
|
||||
// Using this option is equivalent to using:
|
||||
//
|
||||
// client.FsEvent.Create().
|
||||
// OnConflict(sql.ResolveWithIgnore()).
|
||||
// Exec(ctx)
|
||||
func (u *FsEventUpsertBulk) Ignore() *FsEventUpsertBulk {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore())
|
||||
return u
|
||||
}
|
||||
|
||||
// DoNothing configures the conflict_action to `DO NOTHING`.
|
||||
// Supported only by SQLite and PostgreSQL.
|
||||
func (u *FsEventUpsertBulk) DoNothing() *FsEventUpsertBulk {
|
||||
u.create.conflict = append(u.create.conflict, sql.DoNothing())
|
||||
return u
|
||||
}
|
||||
|
||||
// Update allows overriding fields `UPDATE` values. See the FsEventCreateBulk.OnConflict
|
||||
// documentation for more info.
|
||||
func (u *FsEventUpsertBulk) Update(set func(*FsEventUpsert)) *FsEventUpsertBulk {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) {
|
||||
set(&FsEventUpsert{UpdateSet: update})
|
||||
}))
|
||||
return u
|
||||
}
|
||||
|
||||
// SetUpdatedAt sets the "updated_at" field.
|
||||
func (u *FsEventUpsertBulk) SetUpdatedAt(v time.Time) *FsEventUpsertBulk {
|
||||
return u.Update(func(s *FsEventUpsert) {
|
||||
s.SetUpdatedAt(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create.
|
||||
func (u *FsEventUpsertBulk) UpdateUpdatedAt() *FsEventUpsertBulk {
|
||||
return u.Update(func(s *FsEventUpsert) {
|
||||
s.UpdateUpdatedAt()
|
||||
})
|
||||
}
|
||||
|
||||
// SetDeletedAt sets the "deleted_at" field.
|
||||
func (u *FsEventUpsertBulk) SetDeletedAt(v time.Time) *FsEventUpsertBulk {
|
||||
return u.Update(func(s *FsEventUpsert) {
|
||||
s.SetDeletedAt(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create.
|
||||
func (u *FsEventUpsertBulk) UpdateDeletedAt() *FsEventUpsertBulk {
|
||||
return u.Update(func(s *FsEventUpsert) {
|
||||
s.UpdateDeletedAt()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearDeletedAt clears the value of the "deleted_at" field.
|
||||
func (u *FsEventUpsertBulk) ClearDeletedAt() *FsEventUpsertBulk {
|
||||
return u.Update(func(s *FsEventUpsert) {
|
||||
s.ClearDeletedAt()
|
||||
})
|
||||
}
|
||||
|
||||
// SetEvent sets the "event" field.
|
||||
func (u *FsEventUpsertBulk) SetEvent(v string) *FsEventUpsertBulk {
|
||||
return u.Update(func(s *FsEventUpsert) {
|
||||
s.SetEvent(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateEvent sets the "event" field to the value that was provided on create.
|
||||
func (u *FsEventUpsertBulk) UpdateEvent() *FsEventUpsertBulk {
|
||||
return u.Update(func(s *FsEventUpsert) {
|
||||
s.UpdateEvent()
|
||||
})
|
||||
}
|
||||
|
||||
// SetSubscriber sets the "subscriber" field.
|
||||
func (u *FsEventUpsertBulk) SetSubscriber(v uuid.UUID) *FsEventUpsertBulk {
|
||||
return u.Update(func(s *FsEventUpsert) {
|
||||
s.SetSubscriber(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateSubscriber sets the "subscriber" field to the value that was provided on create.
|
||||
func (u *FsEventUpsertBulk) UpdateSubscriber() *FsEventUpsertBulk {
|
||||
return u.Update(func(s *FsEventUpsert) {
|
||||
s.UpdateSubscriber()
|
||||
})
|
||||
}
|
||||
|
||||
// SetUserFsevent sets the "user_fsevent" field.
|
||||
func (u *FsEventUpsertBulk) SetUserFsevent(v int) *FsEventUpsertBulk {
|
||||
return u.Update(func(s *FsEventUpsert) {
|
||||
s.SetUserFsevent(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateUserFsevent sets the "user_fsevent" field to the value that was provided on create.
|
||||
func (u *FsEventUpsertBulk) UpdateUserFsevent() *FsEventUpsertBulk {
|
||||
return u.Update(func(s *FsEventUpsert) {
|
||||
s.UpdateUserFsevent()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearUserFsevent clears the value of the "user_fsevent" field.
|
||||
func (u *FsEventUpsertBulk) ClearUserFsevent() *FsEventUpsertBulk {
|
||||
return u.Update(func(s *FsEventUpsert) {
|
||||
s.ClearUserFsevent()
|
||||
})
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (u *FsEventUpsertBulk) Exec(ctx context.Context) error {
|
||||
if u.create.err != nil {
|
||||
return u.create.err
|
||||
}
|
||||
for i, b := range u.create.builders {
|
||||
if len(b.conflict) != 0 {
|
||||
return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the FsEventCreateBulk instead", i)
|
||||
}
|
||||
}
|
||||
if len(u.create.conflict) == 0 {
|
||||
return errors.New("ent: missing options for FsEventCreateBulk.OnConflict")
|
||||
}
|
||||
return u.create.Exec(ctx)
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (u *FsEventUpsertBulk) ExecX(ctx context.Context) {
|
||||
if err := u.create.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
|
@ -1,88 +0,0 @@
|
|||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/fsevent"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
|
||||
)
|
||||
|
||||
// FsEventDelete is the builder for deleting a FsEvent entity.
|
||||
type FsEventDelete struct {
|
||||
config
|
||||
hooks []Hook
|
||||
mutation *FsEventMutation
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the FsEventDelete builder.
|
||||
func (fed *FsEventDelete) Where(ps ...predicate.FsEvent) *FsEventDelete {
|
||||
fed.mutation.Where(ps...)
|
||||
return fed
|
||||
}
|
||||
|
||||
// Exec executes the deletion query and returns how many vertices were deleted.
|
||||
func (fed *FsEventDelete) Exec(ctx context.Context) (int, error) {
|
||||
return withHooks(ctx, fed.sqlExec, fed.mutation, fed.hooks)
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (fed *FsEventDelete) ExecX(ctx context.Context) int {
|
||||
n, err := fed.Exec(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (fed *FsEventDelete) sqlExec(ctx context.Context) (int, error) {
|
||||
_spec := sqlgraph.NewDeleteSpec(fsevent.Table, sqlgraph.NewFieldSpec(fsevent.FieldID, field.TypeInt))
|
||||
if ps := fed.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
affected, err := sqlgraph.DeleteNodes(ctx, fed.driver, _spec)
|
||||
if err != nil && sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
fed.mutation.done = true
|
||||
return affected, err
|
||||
}
|
||||
|
||||
// FsEventDeleteOne is the builder for deleting a single FsEvent entity.
|
||||
type FsEventDeleteOne struct {
|
||||
fed *FsEventDelete
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the FsEventDelete builder.
|
||||
func (fedo *FsEventDeleteOne) Where(ps ...predicate.FsEvent) *FsEventDeleteOne {
|
||||
fedo.fed.mutation.Where(ps...)
|
||||
return fedo
|
||||
}
|
||||
|
||||
// Exec executes the deletion query.
|
||||
func (fedo *FsEventDeleteOne) Exec(ctx context.Context) error {
|
||||
n, err := fedo.fed.Exec(ctx)
|
||||
switch {
|
||||
case err != nil:
|
||||
return err
|
||||
case n == 0:
|
||||
return &NotFoundError{fsevent.Label}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (fedo *FsEventDeleteOne) ExecX(ctx context.Context) {
|
||||
if err := fedo.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
|
@ -1,605 +0,0 @@
|
|||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/fsevent"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/user"
|
||||
)
|
||||
|
||||
// FsEventQuery is the builder for querying FsEvent entities.
|
||||
type FsEventQuery struct {
|
||||
config
|
||||
ctx *QueryContext
|
||||
order []fsevent.OrderOption
|
||||
inters []Interceptor
|
||||
predicates []predicate.FsEvent
|
||||
withUser *UserQuery
|
||||
// intermediate query (i.e. traversal path).
|
||||
sql *sql.Selector
|
||||
path func(context.Context) (*sql.Selector, error)
|
||||
}
|
||||
|
||||
// Where adds a new predicate for the FsEventQuery builder.
|
||||
func (feq *FsEventQuery) Where(ps ...predicate.FsEvent) *FsEventQuery {
|
||||
feq.predicates = append(feq.predicates, ps...)
|
||||
return feq
|
||||
}
|
||||
|
||||
// Limit the number of records to be returned by this query.
|
||||
func (feq *FsEventQuery) Limit(limit int) *FsEventQuery {
|
||||
feq.ctx.Limit = &limit
|
||||
return feq
|
||||
}
|
||||
|
||||
// Offset to start from.
|
||||
func (feq *FsEventQuery) Offset(offset int) *FsEventQuery {
|
||||
feq.ctx.Offset = &offset
|
||||
return feq
|
||||
}
|
||||
|
||||
// Unique configures the query builder to filter duplicate records on query.
|
||||
// By default, unique is set to true, and can be disabled using this method.
|
||||
func (feq *FsEventQuery) Unique(unique bool) *FsEventQuery {
|
||||
feq.ctx.Unique = &unique
|
||||
return feq
|
||||
}
|
||||
|
||||
// Order specifies how the records should be ordered.
|
||||
func (feq *FsEventQuery) Order(o ...fsevent.OrderOption) *FsEventQuery {
|
||||
feq.order = append(feq.order, o...)
|
||||
return feq
|
||||
}
|
||||
|
||||
// QueryUser chains the current query on the "user" edge.
|
||||
func (feq *FsEventQuery) QueryUser() *UserQuery {
|
||||
query := (&UserClient{config: feq.config}).Query()
|
||||
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||
if err := feq.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
selector := feq.sqlQuery(ctx)
|
||||
if err := selector.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(fsevent.Table, fsevent.FieldID, selector),
|
||||
sqlgraph.To(user.Table, user.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, fsevent.UserTable, fsevent.UserColumn),
|
||||
)
|
||||
fromU = sqlgraph.SetNeighbors(feq.driver.Dialect(), step)
|
||||
return fromU, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// First returns the first FsEvent entity from the query.
|
||||
// Returns a *NotFoundError when no FsEvent was found.
|
||||
func (feq *FsEventQuery) First(ctx context.Context) (*FsEvent, error) {
|
||||
nodes, err := feq.Limit(1).All(setContextOp(ctx, feq.ctx, "First"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(nodes) == 0 {
|
||||
return nil, &NotFoundError{fsevent.Label}
|
||||
}
|
||||
return nodes[0], nil
|
||||
}
|
||||
|
||||
// FirstX is like First, but panics if an error occurs.
|
||||
func (feq *FsEventQuery) FirstX(ctx context.Context) *FsEvent {
|
||||
node, err := feq.First(ctx)
|
||||
if err != nil && !IsNotFound(err) {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// FirstID returns the first FsEvent ID from the query.
|
||||
// Returns a *NotFoundError when no FsEvent ID was found.
|
||||
func (feq *FsEventQuery) FirstID(ctx context.Context) (id int, err error) {
|
||||
var ids []int
|
||||
if ids, err = feq.Limit(1).IDs(setContextOp(ctx, feq.ctx, "FirstID")); err != nil {
|
||||
return
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
err = &NotFoundError{fsevent.Label}
|
||||
return
|
||||
}
|
||||
return ids[0], nil
|
||||
}
|
||||
|
||||
// FirstIDX is like FirstID, but panics if an error occurs.
|
||||
func (feq *FsEventQuery) FirstIDX(ctx context.Context) int {
|
||||
id, err := feq.FirstID(ctx)
|
||||
if err != nil && !IsNotFound(err) {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// Only returns a single FsEvent entity found by the query, ensuring it only returns one.
|
||||
// Returns a *NotSingularError when more than one FsEvent entity is found.
|
||||
// Returns a *NotFoundError when no FsEvent entities are found.
|
||||
func (feq *FsEventQuery) Only(ctx context.Context) (*FsEvent, error) {
|
||||
nodes, err := feq.Limit(2).All(setContextOp(ctx, feq.ctx, "Only"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch len(nodes) {
|
||||
case 1:
|
||||
return nodes[0], nil
|
||||
case 0:
|
||||
return nil, &NotFoundError{fsevent.Label}
|
||||
default:
|
||||
return nil, &NotSingularError{fsevent.Label}
|
||||
}
|
||||
}
|
||||
|
||||
// OnlyX is like Only, but panics if an error occurs.
|
||||
func (feq *FsEventQuery) OnlyX(ctx context.Context) *FsEvent {
|
||||
node, err := feq.Only(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// OnlyID is like Only, but returns the only FsEvent ID in the query.
|
||||
// Returns a *NotSingularError when more than one FsEvent ID is found.
|
||||
// Returns a *NotFoundError when no entities are found.
|
||||
func (feq *FsEventQuery) OnlyID(ctx context.Context) (id int, err error) {
|
||||
var ids []int
|
||||
if ids, err = feq.Limit(2).IDs(setContextOp(ctx, feq.ctx, "OnlyID")); err != nil {
|
||||
return
|
||||
}
|
||||
switch len(ids) {
|
||||
case 1:
|
||||
id = ids[0]
|
||||
case 0:
|
||||
err = &NotFoundError{fsevent.Label}
|
||||
default:
|
||||
err = &NotSingularError{fsevent.Label}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// OnlyIDX is like OnlyID, but panics if an error occurs.
|
||||
func (feq *FsEventQuery) OnlyIDX(ctx context.Context) int {
|
||||
id, err := feq.OnlyID(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// All executes the query and returns a list of FsEvents.
|
||||
func (feq *FsEventQuery) All(ctx context.Context) ([]*FsEvent, error) {
|
||||
ctx = setContextOp(ctx, feq.ctx, "All")
|
||||
if err := feq.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
qr := querierAll[[]*FsEvent, *FsEventQuery]()
|
||||
return withInterceptors[[]*FsEvent](ctx, feq, qr, feq.inters)
|
||||
}
|
||||
|
||||
// AllX is like All, but panics if an error occurs.
|
||||
func (feq *FsEventQuery) AllX(ctx context.Context) []*FsEvent {
|
||||
nodes, err := feq.All(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return nodes
|
||||
}
|
||||
|
||||
// IDs executes the query and returns a list of FsEvent IDs.
|
||||
func (feq *FsEventQuery) IDs(ctx context.Context) (ids []int, err error) {
|
||||
if feq.ctx.Unique == nil && feq.path != nil {
|
||||
feq.Unique(true)
|
||||
}
|
||||
ctx = setContextOp(ctx, feq.ctx, "IDs")
|
||||
if err = feq.Select(fsevent.FieldID).Scan(ctx, &ids); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
// IDsX is like IDs, but panics if an error occurs.
|
||||
func (feq *FsEventQuery) IDsX(ctx context.Context) []int {
|
||||
ids, err := feq.IDs(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
||||
// Count returns the count of the given query.
|
||||
func (feq *FsEventQuery) Count(ctx context.Context) (int, error) {
|
||||
ctx = setContextOp(ctx, feq.ctx, "Count")
|
||||
if err := feq.prepareQuery(ctx); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return withInterceptors[int](ctx, feq, querierCount[*FsEventQuery](), feq.inters)
|
||||
}
|
||||
|
||||
// CountX is like Count, but panics if an error occurs.
|
||||
func (feq *FsEventQuery) CountX(ctx context.Context) int {
|
||||
count, err := feq.Count(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// Exist returns true if the query has elements in the graph.
|
||||
func (feq *FsEventQuery) Exist(ctx context.Context) (bool, error) {
|
||||
ctx = setContextOp(ctx, feq.ctx, "Exist")
|
||||
switch _, err := feq.FirstID(ctx); {
|
||||
case IsNotFound(err):
|
||||
return false, nil
|
||||
case err != nil:
|
||||
return false, fmt.Errorf("ent: check existence: %w", err)
|
||||
default:
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// ExistX is like Exist, but panics if an error occurs.
|
||||
func (feq *FsEventQuery) ExistX(ctx context.Context) bool {
|
||||
exist, err := feq.Exist(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return exist
|
||||
}
|
||||
|
||||
// Clone returns a duplicate of the FsEventQuery builder, including all associated steps. It can be
|
||||
// used to prepare common query builders and use them differently after the clone is made.
|
||||
func (feq *FsEventQuery) Clone() *FsEventQuery {
|
||||
if feq == nil {
|
||||
return nil
|
||||
}
|
||||
return &FsEventQuery{
|
||||
config: feq.config,
|
||||
ctx: feq.ctx.Clone(),
|
||||
order: append([]fsevent.OrderOption{}, feq.order...),
|
||||
inters: append([]Interceptor{}, feq.inters...),
|
||||
predicates: append([]predicate.FsEvent{}, feq.predicates...),
|
||||
withUser: feq.withUser.Clone(),
|
||||
// clone intermediate query.
|
||||
sql: feq.sql.Clone(),
|
||||
path: feq.path,
|
||||
}
|
||||
}
|
||||
|
||||
// WithUser tells the query-builder to eager-load the nodes that are connected to
|
||||
// the "user" edge. The optional arguments are used to configure the query builder of the edge.
|
||||
func (feq *FsEventQuery) WithUser(opts ...func(*UserQuery)) *FsEventQuery {
|
||||
query := (&UserClient{config: feq.config}).Query()
|
||||
for _, opt := range opts {
|
||||
opt(query)
|
||||
}
|
||||
feq.withUser = query
|
||||
return feq
|
||||
}
|
||||
|
||||
// GroupBy is used to group vertices by one or more fields/columns.
|
||||
// It is often used with aggregate functions, like: count, max, mean, min, sum.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var v []struct {
|
||||
// CreatedAt time.Time `json:"created_at,omitempty"`
|
||||
// Count int `json:"count,omitempty"`
|
||||
// }
|
||||
//
|
||||
// client.FsEvent.Query().
|
||||
// GroupBy(fsevent.FieldCreatedAt).
|
||||
// Aggregate(ent.Count()).
|
||||
// Scan(ctx, &v)
|
||||
func (feq *FsEventQuery) GroupBy(field string, fields ...string) *FsEventGroupBy {
|
||||
feq.ctx.Fields = append([]string{field}, fields...)
|
||||
grbuild := &FsEventGroupBy{build: feq}
|
||||
grbuild.flds = &feq.ctx.Fields
|
||||
grbuild.label = fsevent.Label
|
||||
grbuild.scan = grbuild.Scan
|
||||
return grbuild
|
||||
}
|
||||
|
||||
// Select allows the selection one or more fields/columns for the given query,
|
||||
// instead of selecting all fields in the entity.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var v []struct {
|
||||
// CreatedAt time.Time `json:"created_at,omitempty"`
|
||||
// }
|
||||
//
|
||||
// client.FsEvent.Query().
|
||||
// Select(fsevent.FieldCreatedAt).
|
||||
// Scan(ctx, &v)
|
||||
func (feq *FsEventQuery) Select(fields ...string) *FsEventSelect {
|
||||
feq.ctx.Fields = append(feq.ctx.Fields, fields...)
|
||||
sbuild := &FsEventSelect{FsEventQuery: feq}
|
||||
sbuild.label = fsevent.Label
|
||||
sbuild.flds, sbuild.scan = &feq.ctx.Fields, sbuild.Scan
|
||||
return sbuild
|
||||
}
|
||||
|
||||
// Aggregate returns a FsEventSelect configured with the given aggregations.
|
||||
func (feq *FsEventQuery) Aggregate(fns ...AggregateFunc) *FsEventSelect {
|
||||
return feq.Select().Aggregate(fns...)
|
||||
}
|
||||
|
||||
func (feq *FsEventQuery) prepareQuery(ctx context.Context) error {
|
||||
for _, inter := range feq.inters {
|
||||
if inter == nil {
|
||||
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
|
||||
}
|
||||
if trv, ok := inter.(Traverser); ok {
|
||||
if err := trv.Traverse(ctx, feq); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, f := range feq.ctx.Fields {
|
||||
if !fsevent.ValidColumn(f) {
|
||||
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||
}
|
||||
}
|
||||
if feq.path != nil {
|
||||
prev, err := feq.path(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
feq.sql = prev
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (feq *FsEventQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*FsEvent, error) {
|
||||
var (
|
||||
nodes = []*FsEvent{}
|
||||
_spec = feq.querySpec()
|
||||
loadedTypes = [1]bool{
|
||||
feq.withUser != nil,
|
||||
}
|
||||
)
|
||||
_spec.ScanValues = func(columns []string) ([]any, error) {
|
||||
return (*FsEvent).scanValues(nil, columns)
|
||||
}
|
||||
_spec.Assign = func(columns []string, values []any) error {
|
||||
node := &FsEvent{config: feq.config}
|
||||
nodes = append(nodes, node)
|
||||
node.Edges.loadedTypes = loadedTypes
|
||||
return node.assignValues(columns, values)
|
||||
}
|
||||
for i := range hooks {
|
||||
hooks[i](ctx, _spec)
|
||||
}
|
||||
if err := sqlgraph.QueryNodes(ctx, feq.driver, _spec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(nodes) == 0 {
|
||||
return nodes, nil
|
||||
}
|
||||
if query := feq.withUser; query != nil {
|
||||
if err := feq.loadUser(ctx, query, nodes, nil,
|
||||
func(n *FsEvent, e *User) { n.Edges.User = e }); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
func (feq *FsEventQuery) loadUser(ctx context.Context, query *UserQuery, nodes []*FsEvent, init func(*FsEvent), assign func(*FsEvent, *User)) error {
|
||||
ids := make([]int, 0, len(nodes))
|
||||
nodeids := make(map[int][]*FsEvent)
|
||||
for i := range nodes {
|
||||
fk := nodes[i].UserFsevent
|
||||
if _, ok := nodeids[fk]; !ok {
|
||||
ids = append(ids, fk)
|
||||
}
|
||||
nodeids[fk] = append(nodeids[fk], nodes[i])
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
return nil
|
||||
}
|
||||
query.Where(user.IDIn(ids...))
|
||||
neighbors, err := query.All(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, n := range neighbors {
|
||||
nodes, ok := nodeids[n.ID]
|
||||
if !ok {
|
||||
return fmt.Errorf(`unexpected foreign-key "user_fsevent" returned %v`, n.ID)
|
||||
}
|
||||
for i := range nodes {
|
||||
assign(nodes[i], n)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (feq *FsEventQuery) sqlCount(ctx context.Context) (int, error) {
|
||||
_spec := feq.querySpec()
|
||||
_spec.Node.Columns = feq.ctx.Fields
|
||||
if len(feq.ctx.Fields) > 0 {
|
||||
_spec.Unique = feq.ctx.Unique != nil && *feq.ctx.Unique
|
||||
}
|
||||
return sqlgraph.CountNodes(ctx, feq.driver, _spec)
|
||||
}
|
||||
|
||||
func (feq *FsEventQuery) querySpec() *sqlgraph.QuerySpec {
|
||||
_spec := sqlgraph.NewQuerySpec(fsevent.Table, fsevent.Columns, sqlgraph.NewFieldSpec(fsevent.FieldID, field.TypeInt))
|
||||
_spec.From = feq.sql
|
||||
if unique := feq.ctx.Unique; unique != nil {
|
||||
_spec.Unique = *unique
|
||||
} else if feq.path != nil {
|
||||
_spec.Unique = true
|
||||
}
|
||||
if fields := feq.ctx.Fields; len(fields) > 0 {
|
||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, fsevent.FieldID)
|
||||
for i := range fields {
|
||||
if fields[i] != fsevent.FieldID {
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
|
||||
}
|
||||
}
|
||||
if feq.withUser != nil {
|
||||
_spec.Node.AddColumnOnce(fsevent.FieldUserFsevent)
|
||||
}
|
||||
}
|
||||
if ps := feq.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
if limit := feq.ctx.Limit; limit != nil {
|
||||
_spec.Limit = *limit
|
||||
}
|
||||
if offset := feq.ctx.Offset; offset != nil {
|
||||
_spec.Offset = *offset
|
||||
}
|
||||
if ps := feq.order; len(ps) > 0 {
|
||||
_spec.Order = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
return _spec
|
||||
}
|
||||
|
||||
func (feq *FsEventQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
||||
builder := sql.Dialect(feq.driver.Dialect())
|
||||
t1 := builder.Table(fsevent.Table)
|
||||
columns := feq.ctx.Fields
|
||||
if len(columns) == 0 {
|
||||
columns = fsevent.Columns
|
||||
}
|
||||
selector := builder.Select(t1.Columns(columns...)...).From(t1)
|
||||
if feq.sql != nil {
|
||||
selector = feq.sql
|
||||
selector.Select(selector.Columns(columns...)...)
|
||||
}
|
||||
if feq.ctx.Unique != nil && *feq.ctx.Unique {
|
||||
selector.Distinct()
|
||||
}
|
||||
for _, p := range feq.predicates {
|
||||
p(selector)
|
||||
}
|
||||
for _, p := range feq.order {
|
||||
p(selector)
|
||||
}
|
||||
if offset := feq.ctx.Offset; offset != nil {
|
||||
// limit is mandatory for offset clause. We start
|
||||
// with default value, and override it below if needed.
|
||||
selector.Offset(*offset).Limit(math.MaxInt32)
|
||||
}
|
||||
if limit := feq.ctx.Limit; limit != nil {
|
||||
selector.Limit(*limit)
|
||||
}
|
||||
return selector
|
||||
}
|
||||
|
||||
// FsEventGroupBy is the group-by builder for FsEvent entities.
|
||||
type FsEventGroupBy struct {
|
||||
selector
|
||||
build *FsEventQuery
|
||||
}
|
||||
|
||||
// Aggregate adds the given aggregation functions to the group-by query.
|
||||
func (fegb *FsEventGroupBy) Aggregate(fns ...AggregateFunc) *FsEventGroupBy {
|
||||
fegb.fns = append(fegb.fns, fns...)
|
||||
return fegb
|
||||
}
|
||||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (fegb *FsEventGroupBy) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, fegb.build.ctx, "GroupBy")
|
||||
if err := fegb.build.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return scanWithInterceptors[*FsEventQuery, *FsEventGroupBy](ctx, fegb.build, fegb, fegb.build.inters, v)
|
||||
}
|
||||
|
||||
func (fegb *FsEventGroupBy) sqlScan(ctx context.Context, root *FsEventQuery, v any) error {
|
||||
selector := root.sqlQuery(ctx).Select()
|
||||
aggregation := make([]string, 0, len(fegb.fns))
|
||||
for _, fn := range fegb.fns {
|
||||
aggregation = append(aggregation, fn(selector))
|
||||
}
|
||||
if len(selector.SelectedColumns()) == 0 {
|
||||
columns := make([]string, 0, len(*fegb.flds)+len(fegb.fns))
|
||||
for _, f := range *fegb.flds {
|
||||
columns = append(columns, selector.C(f))
|
||||
}
|
||||
columns = append(columns, aggregation...)
|
||||
selector.Select(columns...)
|
||||
}
|
||||
selector.GroupBy(selector.Columns(*fegb.flds...)...)
|
||||
if err := selector.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
rows := &sql.Rows{}
|
||||
query, args := selector.Query()
|
||||
if err := fegb.build.driver.Query(ctx, query, args, rows); err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
return sql.ScanSlice(rows, v)
|
||||
}
|
||||
|
||||
// FsEventSelect is the builder for selecting fields of FsEvent entities.
|
||||
type FsEventSelect struct {
|
||||
*FsEventQuery
|
||||
selector
|
||||
}
|
||||
|
||||
// Aggregate adds the given aggregation functions to the selector query.
|
||||
func (fes *FsEventSelect) Aggregate(fns ...AggregateFunc) *FsEventSelect {
|
||||
fes.fns = append(fes.fns, fns...)
|
||||
return fes
|
||||
}
|
||||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (fes *FsEventSelect) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, fes.ctx, "Select")
|
||||
if err := fes.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return scanWithInterceptors[*FsEventQuery, *FsEventSelect](ctx, fes.FsEventQuery, fes, fes.inters, v)
|
||||
}
|
||||
|
||||
func (fes *FsEventSelect) sqlScan(ctx context.Context, root *FsEventQuery, v any) error {
|
||||
selector := root.sqlQuery(ctx)
|
||||
aggregation := make([]string, 0, len(fes.fns))
|
||||
for _, fn := range fes.fns {
|
||||
aggregation = append(aggregation, fn(selector))
|
||||
}
|
||||
switch n := len(*fes.selector.flds); {
|
||||
case n == 0 && len(aggregation) > 0:
|
||||
selector.Select(aggregation...)
|
||||
case n != 0 && len(aggregation) > 0:
|
||||
selector.AppendSelect(aggregation...)
|
||||
}
|
||||
rows := &sql.Rows{}
|
||||
query, args := selector.Query()
|
||||
if err := fes.driver.Query(ctx, query, args, rows); err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
return sql.ScanSlice(rows, v)
|
||||
}
|
||||
|
|
@ -1,494 +0,0 @@
|
|||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/fsevent"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/user"
|
||||
"github.com/gofrs/uuid"
|
||||
)
|
||||
|
||||
// FsEventUpdate is the builder for updating FsEvent entities.
|
||||
type FsEventUpdate struct {
|
||||
config
|
||||
hooks []Hook
|
||||
mutation *FsEventMutation
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the FsEventUpdate builder.
|
||||
func (feu *FsEventUpdate) Where(ps ...predicate.FsEvent) *FsEventUpdate {
|
||||
feu.mutation.Where(ps...)
|
||||
return feu
|
||||
}
|
||||
|
||||
// SetUpdatedAt sets the "updated_at" field.
|
||||
func (feu *FsEventUpdate) SetUpdatedAt(t time.Time) *FsEventUpdate {
|
||||
feu.mutation.SetUpdatedAt(t)
|
||||
return feu
|
||||
}
|
||||
|
||||
// SetDeletedAt sets the "deleted_at" field.
|
||||
func (feu *FsEventUpdate) SetDeletedAt(t time.Time) *FsEventUpdate {
|
||||
feu.mutation.SetDeletedAt(t)
|
||||
return feu
|
||||
}
|
||||
|
||||
// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil.
|
||||
func (feu *FsEventUpdate) SetNillableDeletedAt(t *time.Time) *FsEventUpdate {
|
||||
if t != nil {
|
||||
feu.SetDeletedAt(*t)
|
||||
}
|
||||
return feu
|
||||
}
|
||||
|
||||
// ClearDeletedAt clears the value of the "deleted_at" field.
|
||||
func (feu *FsEventUpdate) ClearDeletedAt() *FsEventUpdate {
|
||||
feu.mutation.ClearDeletedAt()
|
||||
return feu
|
||||
}
|
||||
|
||||
// SetEvent sets the "event" field.
|
||||
func (feu *FsEventUpdate) SetEvent(s string) *FsEventUpdate {
|
||||
feu.mutation.SetEvent(s)
|
||||
return feu
|
||||
}
|
||||
|
||||
// SetNillableEvent sets the "event" field if the given value is not nil.
|
||||
func (feu *FsEventUpdate) SetNillableEvent(s *string) *FsEventUpdate {
|
||||
if s != nil {
|
||||
feu.SetEvent(*s)
|
||||
}
|
||||
return feu
|
||||
}
|
||||
|
||||
// SetSubscriber sets the "subscriber" field.
|
||||
func (feu *FsEventUpdate) SetSubscriber(u uuid.UUID) *FsEventUpdate {
|
||||
feu.mutation.SetSubscriber(u)
|
||||
return feu
|
||||
}
|
||||
|
||||
// SetNillableSubscriber sets the "subscriber" field if the given value is not nil.
|
||||
func (feu *FsEventUpdate) SetNillableSubscriber(u *uuid.UUID) *FsEventUpdate {
|
||||
if u != nil {
|
||||
feu.SetSubscriber(*u)
|
||||
}
|
||||
return feu
|
||||
}
|
||||
|
||||
// SetUserFsevent sets the "user_fsevent" field.
|
||||
func (feu *FsEventUpdate) SetUserFsevent(i int) *FsEventUpdate {
|
||||
feu.mutation.SetUserFsevent(i)
|
||||
return feu
|
||||
}
|
||||
|
||||
// SetNillableUserFsevent sets the "user_fsevent" field if the given value is not nil.
|
||||
func (feu *FsEventUpdate) SetNillableUserFsevent(i *int) *FsEventUpdate {
|
||||
if i != nil {
|
||||
feu.SetUserFsevent(*i)
|
||||
}
|
||||
return feu
|
||||
}
|
||||
|
||||
// ClearUserFsevent clears the value of the "user_fsevent" field.
|
||||
func (feu *FsEventUpdate) ClearUserFsevent() *FsEventUpdate {
|
||||
feu.mutation.ClearUserFsevent()
|
||||
return feu
|
||||
}
|
||||
|
||||
// SetUserID sets the "user" edge to the User entity by ID.
|
||||
func (feu *FsEventUpdate) SetUserID(id int) *FsEventUpdate {
|
||||
feu.mutation.SetUserID(id)
|
||||
return feu
|
||||
}
|
||||
|
||||
// SetNillableUserID sets the "user" edge to the User entity by ID if the given value is not nil.
|
||||
func (feu *FsEventUpdate) SetNillableUserID(id *int) *FsEventUpdate {
|
||||
if id != nil {
|
||||
feu = feu.SetUserID(*id)
|
||||
}
|
||||
return feu
|
||||
}
|
||||
|
||||
// SetUser sets the "user" edge to the User entity.
|
||||
func (feu *FsEventUpdate) SetUser(u *User) *FsEventUpdate {
|
||||
return feu.SetUserID(u.ID)
|
||||
}
|
||||
|
||||
// Mutation returns the FsEventMutation object of the builder.
|
||||
func (feu *FsEventUpdate) Mutation() *FsEventMutation {
|
||||
return feu.mutation
|
||||
}
|
||||
|
||||
// ClearUser clears the "user" edge to the User entity.
|
||||
func (feu *FsEventUpdate) ClearUser() *FsEventUpdate {
|
||||
feu.mutation.ClearUser()
|
||||
return feu
|
||||
}
|
||||
|
||||
// Save executes the query and returns the number of nodes affected by the update operation.
|
||||
func (feu *FsEventUpdate) Save(ctx context.Context) (int, error) {
|
||||
if err := feu.defaults(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return withHooks(ctx, feu.sqlSave, feu.mutation, feu.hooks)
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
func (feu *FsEventUpdate) SaveX(ctx context.Context) int {
|
||||
affected, err := feu.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return affected
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (feu *FsEventUpdate) Exec(ctx context.Context) error {
|
||||
_, err := feu.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (feu *FsEventUpdate) ExecX(ctx context.Context) {
|
||||
if err := feu.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// defaults sets the default values of the builder before save.
|
||||
func (feu *FsEventUpdate) defaults() error {
|
||||
if _, ok := feu.mutation.UpdatedAt(); !ok {
|
||||
if fsevent.UpdateDefaultUpdatedAt == nil {
|
||||
return fmt.Errorf("ent: uninitialized fsevent.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)")
|
||||
}
|
||||
v := fsevent.UpdateDefaultUpdatedAt()
|
||||
feu.mutation.SetUpdatedAt(v)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (feu *FsEventUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
_spec := sqlgraph.NewUpdateSpec(fsevent.Table, fsevent.Columns, sqlgraph.NewFieldSpec(fsevent.FieldID, field.TypeInt))
|
||||
if ps := feu.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
if value, ok := feu.mutation.UpdatedAt(); ok {
|
||||
_spec.SetField(fsevent.FieldUpdatedAt, field.TypeTime, value)
|
||||
}
|
||||
if value, ok := feu.mutation.DeletedAt(); ok {
|
||||
_spec.SetField(fsevent.FieldDeletedAt, field.TypeTime, value)
|
||||
}
|
||||
if feu.mutation.DeletedAtCleared() {
|
||||
_spec.ClearField(fsevent.FieldDeletedAt, field.TypeTime)
|
||||
}
|
||||
if value, ok := feu.mutation.Event(); ok {
|
||||
_spec.SetField(fsevent.FieldEvent, field.TypeString, value)
|
||||
}
|
||||
if value, ok := feu.mutation.Subscriber(); ok {
|
||||
_spec.SetField(fsevent.FieldSubscriber, field.TypeUUID, value)
|
||||
}
|
||||
if feu.mutation.UserCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: fsevent.UserTable,
|
||||
Columns: []string{fsevent.UserColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := feu.mutation.UserIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: fsevent.UserTable,
|
||||
Columns: []string{fsevent.UserColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
if n, err = sqlgraph.UpdateNodes(ctx, feu.driver, _spec); err != nil {
|
||||
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||
err = &NotFoundError{fsevent.Label}
|
||||
} else if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
feu.mutation.done = true
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// FsEventUpdateOne is the builder for updating a single FsEvent entity.
|
||||
type FsEventUpdateOne struct {
|
||||
config
|
||||
fields []string
|
||||
hooks []Hook
|
||||
mutation *FsEventMutation
|
||||
}
|
||||
|
||||
// SetUpdatedAt sets the "updated_at" field.
|
||||
func (feuo *FsEventUpdateOne) SetUpdatedAt(t time.Time) *FsEventUpdateOne {
|
||||
feuo.mutation.SetUpdatedAt(t)
|
||||
return feuo
|
||||
}
|
||||
|
||||
// SetDeletedAt sets the "deleted_at" field.
|
||||
func (feuo *FsEventUpdateOne) SetDeletedAt(t time.Time) *FsEventUpdateOne {
|
||||
feuo.mutation.SetDeletedAt(t)
|
||||
return feuo
|
||||
}
|
||||
|
||||
// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil.
|
||||
func (feuo *FsEventUpdateOne) SetNillableDeletedAt(t *time.Time) *FsEventUpdateOne {
|
||||
if t != nil {
|
||||
feuo.SetDeletedAt(*t)
|
||||
}
|
||||
return feuo
|
||||
}
|
||||
|
||||
// ClearDeletedAt clears the value of the "deleted_at" field.
|
||||
func (feuo *FsEventUpdateOne) ClearDeletedAt() *FsEventUpdateOne {
|
||||
feuo.mutation.ClearDeletedAt()
|
||||
return feuo
|
||||
}
|
||||
|
||||
// SetEvent sets the "event" field.
|
||||
func (feuo *FsEventUpdateOne) SetEvent(s string) *FsEventUpdateOne {
|
||||
feuo.mutation.SetEvent(s)
|
||||
return feuo
|
||||
}
|
||||
|
||||
// SetNillableEvent sets the "event" field if the given value is not nil.
|
||||
func (feuo *FsEventUpdateOne) SetNillableEvent(s *string) *FsEventUpdateOne {
|
||||
if s != nil {
|
||||
feuo.SetEvent(*s)
|
||||
}
|
||||
return feuo
|
||||
}
|
||||
|
||||
// SetSubscriber sets the "subscriber" field.
|
||||
func (feuo *FsEventUpdateOne) SetSubscriber(u uuid.UUID) *FsEventUpdateOne {
|
||||
feuo.mutation.SetSubscriber(u)
|
||||
return feuo
|
||||
}
|
||||
|
||||
// SetNillableSubscriber sets the "subscriber" field if the given value is not nil.
|
||||
func (feuo *FsEventUpdateOne) SetNillableSubscriber(u *uuid.UUID) *FsEventUpdateOne {
|
||||
if u != nil {
|
||||
feuo.SetSubscriber(*u)
|
||||
}
|
||||
return feuo
|
||||
}
|
||||
|
||||
// SetUserFsevent sets the "user_fsevent" field.
|
||||
func (feuo *FsEventUpdateOne) SetUserFsevent(i int) *FsEventUpdateOne {
|
||||
feuo.mutation.SetUserFsevent(i)
|
||||
return feuo
|
||||
}
|
||||
|
||||
// SetNillableUserFsevent sets the "user_fsevent" field if the given value is not nil.
|
||||
func (feuo *FsEventUpdateOne) SetNillableUserFsevent(i *int) *FsEventUpdateOne {
|
||||
if i != nil {
|
||||
feuo.SetUserFsevent(*i)
|
||||
}
|
||||
return feuo
|
||||
}
|
||||
|
||||
// ClearUserFsevent clears the value of the "user_fsevent" field.
|
||||
func (feuo *FsEventUpdateOne) ClearUserFsevent() *FsEventUpdateOne {
|
||||
feuo.mutation.ClearUserFsevent()
|
||||
return feuo
|
||||
}
|
||||
|
||||
// SetUserID sets the "user" edge to the User entity by ID.
|
||||
func (feuo *FsEventUpdateOne) SetUserID(id int) *FsEventUpdateOne {
|
||||
feuo.mutation.SetUserID(id)
|
||||
return feuo
|
||||
}
|
||||
|
||||
// SetNillableUserID sets the "user" edge to the User entity by ID if the given value is not nil.
|
||||
func (feuo *FsEventUpdateOne) SetNillableUserID(id *int) *FsEventUpdateOne {
|
||||
if id != nil {
|
||||
feuo = feuo.SetUserID(*id)
|
||||
}
|
||||
return feuo
|
||||
}
|
||||
|
||||
// SetUser sets the "user" edge to the User entity.
|
||||
func (feuo *FsEventUpdateOne) SetUser(u *User) *FsEventUpdateOne {
|
||||
return feuo.SetUserID(u.ID)
|
||||
}
|
||||
|
||||
// Mutation returns the FsEventMutation object of the builder.
|
||||
func (feuo *FsEventUpdateOne) Mutation() *FsEventMutation {
|
||||
return feuo.mutation
|
||||
}
|
||||
|
||||
// ClearUser clears the "user" edge to the User entity.
|
||||
func (feuo *FsEventUpdateOne) ClearUser() *FsEventUpdateOne {
|
||||
feuo.mutation.ClearUser()
|
||||
return feuo
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the FsEventUpdate builder.
|
||||
func (feuo *FsEventUpdateOne) Where(ps ...predicate.FsEvent) *FsEventUpdateOne {
|
||||
feuo.mutation.Where(ps...)
|
||||
return feuo
|
||||
}
|
||||
|
||||
// Select allows selecting one or more fields (columns) of the returned entity.
|
||||
// The default is selecting all fields defined in the entity schema.
|
||||
func (feuo *FsEventUpdateOne) Select(field string, fields ...string) *FsEventUpdateOne {
|
||||
feuo.fields = append([]string{field}, fields...)
|
||||
return feuo
|
||||
}
|
||||
|
||||
// Save executes the query and returns the updated FsEvent entity.
|
||||
func (feuo *FsEventUpdateOne) Save(ctx context.Context) (*FsEvent, error) {
|
||||
if err := feuo.defaults(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return withHooks(ctx, feuo.sqlSave, feuo.mutation, feuo.hooks)
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
func (feuo *FsEventUpdateOne) SaveX(ctx context.Context) *FsEvent {
|
||||
node, err := feuo.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// Exec executes the query on the entity.
|
||||
func (feuo *FsEventUpdateOne) Exec(ctx context.Context) error {
|
||||
_, err := feuo.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (feuo *FsEventUpdateOne) ExecX(ctx context.Context) {
|
||||
if err := feuo.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// defaults sets the default values of the builder before save.
|
||||
func (feuo *FsEventUpdateOne) defaults() error {
|
||||
if _, ok := feuo.mutation.UpdatedAt(); !ok {
|
||||
if fsevent.UpdateDefaultUpdatedAt == nil {
|
||||
return fmt.Errorf("ent: uninitialized fsevent.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)")
|
||||
}
|
||||
v := fsevent.UpdateDefaultUpdatedAt()
|
||||
feuo.mutation.SetUpdatedAt(v)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (feuo *FsEventUpdateOne) sqlSave(ctx context.Context) (_node *FsEvent, err error) {
|
||||
_spec := sqlgraph.NewUpdateSpec(fsevent.Table, fsevent.Columns, sqlgraph.NewFieldSpec(fsevent.FieldID, field.TypeInt))
|
||||
id, ok := feuo.mutation.ID()
|
||||
if !ok {
|
||||
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "FsEvent.id" for update`)}
|
||||
}
|
||||
_spec.Node.ID.Value = id
|
||||
if fields := feuo.fields; len(fields) > 0 {
|
||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, fsevent.FieldID)
|
||||
for _, f := range fields {
|
||||
if !fsevent.ValidColumn(f) {
|
||||
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||
}
|
||||
if f != fsevent.FieldID {
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, f)
|
||||
}
|
||||
}
|
||||
}
|
||||
if ps := feuo.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
if value, ok := feuo.mutation.UpdatedAt(); ok {
|
||||
_spec.SetField(fsevent.FieldUpdatedAt, field.TypeTime, value)
|
||||
}
|
||||
if value, ok := feuo.mutation.DeletedAt(); ok {
|
||||
_spec.SetField(fsevent.FieldDeletedAt, field.TypeTime, value)
|
||||
}
|
||||
if feuo.mutation.DeletedAtCleared() {
|
||||
_spec.ClearField(fsevent.FieldDeletedAt, field.TypeTime)
|
||||
}
|
||||
if value, ok := feuo.mutation.Event(); ok {
|
||||
_spec.SetField(fsevent.FieldEvent, field.TypeString, value)
|
||||
}
|
||||
if value, ok := feuo.mutation.Subscriber(); ok {
|
||||
_spec.SetField(fsevent.FieldSubscriber, field.TypeUUID, value)
|
||||
}
|
||||
if feuo.mutation.UserCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: fsevent.UserTable,
|
||||
Columns: []string{fsevent.UserColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := feuo.mutation.UserIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: fsevent.UserTable,
|
||||
Columns: []string{fsevent.UserColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
_node = &FsEvent{config: feuo.config}
|
||||
_spec.Assign = _node.assignValues
|
||||
_spec.ScanValues = _node.scanValues
|
||||
if err = sqlgraph.UpdateNode(ctx, feuo.driver, _spec); err != nil {
|
||||
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||
err = &NotFoundError{fsevent.Label}
|
||||
} else if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
feuo.mutation.done = true
|
||||
return _node, nil
|
||||
}
|
||||
|
|
@ -57,18 +57,6 @@ func (f FileFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error)
|
|||
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.FileMutation", m)
|
||||
}
|
||||
|
||||
// The FsEventFunc type is an adapter to allow the use of ordinary
|
||||
// function as FsEvent mutator.
|
||||
type FsEventFunc func(context.Context, *ent.FsEventMutation) (ent.Value, error)
|
||||
|
||||
// Mutate calls f(ctx, m).
|
||||
func (f FsEventFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
||||
if mv, ok := m.(*ent.FsEventMutation); ok {
|
||||
return f(ctx, mv)
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.FsEventMutation", m)
|
||||
}
|
||||
|
||||
// The GroupFunc type is an adapter to allow the use of ordinary
|
||||
// function as Group mutator.
|
||||
type GroupFunc func(context.Context, *ent.GroupMutation) (ent.Value, error)
|
||||
|
|
|
|||
|
|
@ -12,7 +12,6 @@ import (
|
|||
"github.com/cloudreve/Cloudreve/v4/ent/directlink"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/entity"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/file"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/fsevent"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/group"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/metadata"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/node"
|
||||
|
|
@ -189,33 +188,6 @@ func (f TraverseFile) Traverse(ctx context.Context, q ent.Query) error {
|
|||
return fmt.Errorf("unexpected query type %T. expect *ent.FileQuery", q)
|
||||
}
|
||||
|
||||
// The FsEventFunc type is an adapter to allow the use of ordinary function as a Querier.
|
||||
type FsEventFunc func(context.Context, *ent.FsEventQuery) (ent.Value, error)
|
||||
|
||||
// Query calls f(ctx, q).
|
||||
func (f FsEventFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
|
||||
if q, ok := q.(*ent.FsEventQuery); ok {
|
||||
return f(ctx, q)
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected query type %T. expect *ent.FsEventQuery", q)
|
||||
}
|
||||
|
||||
// The TraverseFsEvent type is an adapter to allow the use of ordinary function as Traverser.
|
||||
type TraverseFsEvent func(context.Context, *ent.FsEventQuery) error
|
||||
|
||||
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
|
||||
func (f TraverseFsEvent) Intercept(next ent.Querier) ent.Querier {
|
||||
return next
|
||||
}
|
||||
|
||||
// Traverse calls f(ctx, q).
|
||||
func (f TraverseFsEvent) Traverse(ctx context.Context, q ent.Query) error {
|
||||
if q, ok := q.(*ent.FsEventQuery); ok {
|
||||
return f(ctx, q)
|
||||
}
|
||||
return fmt.Errorf("unexpected query type %T. expect *ent.FsEventQuery", q)
|
||||
}
|
||||
|
||||
// The GroupFunc type is an adapter to allow the use of ordinary function as a Querier.
|
||||
type GroupFunc func(context.Context, *ent.GroupQuery) (ent.Value, error)
|
||||
|
||||
|
|
@ -470,8 +442,6 @@ func NewQuery(q ent.Query) (Query, error) {
|
|||
return &query[*ent.EntityQuery, predicate.Entity, entity.OrderOption]{typ: ent.TypeEntity, tq: q}, nil
|
||||
case *ent.FileQuery:
|
||||
return &query[*ent.FileQuery, predicate.File, file.OrderOption]{typ: ent.TypeFile, tq: q}, nil
|
||||
case *ent.FsEventQuery:
|
||||
return &query[*ent.FsEventQuery, predicate.FsEvent, fsevent.OrderOption]{typ: ent.TypeFsEvent, tq: q}, nil
|
||||
case *ent.GroupQuery:
|
||||
return &query[*ent.GroupQuery, predicate.Group, group.OrderOption]{typ: ent.TypeGroup, tq: q}, nil
|
||||
case *ent.MetadataQuery:
|
||||
|
|
|
|||
File diff suppressed because one or more lines are too long
|
|
@ -160,30 +160,6 @@ var (
|
|||
},
|
||||
},
|
||||
}
|
||||
// FsEventsColumns holds the columns for the "fs_events" table.
|
||||
FsEventsColumns = []*schema.Column{
|
||||
{Name: "id", Type: field.TypeInt, Increment: true},
|
||||
{Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"mysql": "datetime"}},
|
||||
{Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"mysql": "datetime"}},
|
||||
{Name: "deleted_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"mysql": "datetime"}},
|
||||
{Name: "event", Type: field.TypeString, Size: 2147483647},
|
||||
{Name: "subscriber", Type: field.TypeUUID},
|
||||
{Name: "user_fsevent", Type: field.TypeInt, Nullable: true},
|
||||
}
|
||||
// FsEventsTable holds the schema information for the "fs_events" table.
|
||||
FsEventsTable = &schema.Table{
|
||||
Name: "fs_events",
|
||||
Columns: FsEventsColumns,
|
||||
PrimaryKey: []*schema.Column{FsEventsColumns[0]},
|
||||
ForeignKeys: []*schema.ForeignKey{
|
||||
{
|
||||
Symbol: "fs_events_users_fsevents",
|
||||
Columns: []*schema.Column{FsEventsColumns[6]},
|
||||
RefColumns: []*schema.Column{UsersColumns[0]},
|
||||
OnDelete: schema.SetNull,
|
||||
},
|
||||
},
|
||||
}
|
||||
// GroupsColumns holds the columns for the "groups" table.
|
||||
GroupsColumns = []*schema.Column{
|
||||
{Name: "id", Type: field.TypeInt, Increment: true},
|
||||
|
|
@ -468,7 +444,6 @@ var (
|
|||
DirectLinksTable,
|
||||
EntitiesTable,
|
||||
FilesTable,
|
||||
FsEventsTable,
|
||||
GroupsTable,
|
||||
MetadataTable,
|
||||
NodesTable,
|
||||
|
|
@ -490,7 +465,6 @@ func init() {
|
|||
FilesTable.ForeignKeys[0].RefTable = FilesTable
|
||||
FilesTable.ForeignKeys[1].RefTable = StoragePoliciesTable
|
||||
FilesTable.ForeignKeys[2].RefTable = UsersTable
|
||||
FsEventsTable.ForeignKeys[0].RefTable = UsersTable
|
||||
GroupsTable.ForeignKeys[0].RefTable = StoragePoliciesTable
|
||||
MetadataTable.ForeignKeys[0].RefTable = FilesTable
|
||||
PasskeysTable.ForeignKeys[0].RefTable = UsersTable
|
||||
|
|
|
|||
876
ent/mutation.go
876
ent/mutation.go
File diff suppressed because it is too large
Load Diff
|
|
@ -28,12 +28,6 @@ func (m *FileMutation) SetRawID(t int) {
|
|||
|
||||
// SetUpdatedAt sets the "updated_at" field.
|
||||
|
||||
func (m *FsEventMutation) SetRawID(t int) {
|
||||
m.id = &t
|
||||
}
|
||||
|
||||
// SetUpdatedAt sets the "updated_at" field.
|
||||
|
||||
func (m *GroupMutation) SetRawID(t int) {
|
||||
m.id = &t
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,9 +18,6 @@ type Entity func(*sql.Selector)
|
|||
// File is the predicate function for file builders.
|
||||
type File func(*sql.Selector)
|
||||
|
||||
// FsEvent is the predicate function for fsevent builders.
|
||||
type FsEvent func(*sql.Selector)
|
||||
|
||||
// Group is the predicate function for group builders.
|
||||
type Group func(*sql.Selector)
|
||||
|
||||
|
|
|
|||
|
|
@ -9,7 +9,6 @@ import (
|
|||
"github.com/cloudreve/Cloudreve/v4/ent/directlink"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/entity"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/file"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/fsevent"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/group"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/metadata"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/node"
|
||||
|
|
@ -108,25 +107,6 @@ func init() {
|
|||
fileDescIsSymbolic := fileFields[8].Descriptor()
|
||||
// file.DefaultIsSymbolic holds the default value on creation for the is_symbolic field.
|
||||
file.DefaultIsSymbolic = fileDescIsSymbolic.Default.(bool)
|
||||
fseventMixin := schema.FsEvent{}.Mixin()
|
||||
fseventMixinHooks0 := fseventMixin[0].Hooks()
|
||||
fsevent.Hooks[0] = fseventMixinHooks0[0]
|
||||
fseventMixinInters0 := fseventMixin[0].Interceptors()
|
||||
fsevent.Interceptors[0] = fseventMixinInters0[0]
|
||||
fseventMixinFields0 := fseventMixin[0].Fields()
|
||||
_ = fseventMixinFields0
|
||||
fseventFields := schema.FsEvent{}.Fields()
|
||||
_ = fseventFields
|
||||
// fseventDescCreatedAt is the schema descriptor for created_at field.
|
||||
fseventDescCreatedAt := fseventMixinFields0[0].Descriptor()
|
||||
// fsevent.DefaultCreatedAt holds the default value on creation for the created_at field.
|
||||
fsevent.DefaultCreatedAt = fseventDescCreatedAt.Default.(func() time.Time)
|
||||
// fseventDescUpdatedAt is the schema descriptor for updated_at field.
|
||||
fseventDescUpdatedAt := fseventMixinFields0[1].Descriptor()
|
||||
// fsevent.DefaultUpdatedAt holds the default value on creation for the updated_at field.
|
||||
fsevent.DefaultUpdatedAt = fseventDescUpdatedAt.Default.(func() time.Time)
|
||||
// fsevent.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.
|
||||
fsevent.UpdateDefaultUpdatedAt = fseventDescUpdatedAt.UpdateDefault.(func() time.Time)
|
||||
groupMixin := schema.Group{}.Mixin()
|
||||
groupMixinHooks0 := groupMixin[0].Hooks()
|
||||
group.Hooks[0] = groupMixinHooks0[0]
|
||||
|
|
|
|||
|
|
@ -25,9 +25,8 @@ func (Entity) Fields() []ent.Field {
|
|||
field.UUID("upload_session_id", uuid.Must(uuid.NewV4())).
|
||||
Optional().
|
||||
Nillable(),
|
||||
field.JSON("props", &types.EntityProps{}).
|
||||
Optional().
|
||||
StorageKey("recycle_options"),
|
||||
field.JSON("recycle_options", &types.EntityRecycleOption{}).
|
||||
Optional(),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,38 +0,0 @@
|
|||
package schema
|
||||
|
||||
import (
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/schema/edge"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/gofrs/uuid"
|
||||
)
|
||||
|
||||
// FsEvent holds the schema definition for the FsEvent entity.
|
||||
type FsEvent struct {
|
||||
ent.Schema
|
||||
}
|
||||
|
||||
// Fields of the FsEvent.
|
||||
func (FsEvent) Fields() []ent.Field {
|
||||
return []ent.Field{
|
||||
field.Text("event"),
|
||||
field.UUID("subscriber", uuid.Must(uuid.NewV4())),
|
||||
field.Int("user_fsevent").Optional(),
|
||||
}
|
||||
}
|
||||
|
||||
// Edges of the Task.
|
||||
func (FsEvent) Edges() []ent.Edge {
|
||||
return []ent.Edge{
|
||||
edge.From("user", User.Type).
|
||||
Ref("fsevents").
|
||||
Field("user_fsevent").
|
||||
Unique(),
|
||||
}
|
||||
}
|
||||
|
||||
func (FsEvent) Mixin() []ent.Mixin {
|
||||
return []ent.Mixin{
|
||||
CommonMixin{},
|
||||
}
|
||||
}
|
||||
|
|
@ -51,7 +51,6 @@ func (User) Edges() []ent.Edge {
|
|||
edge.To("shares", Share.Type),
|
||||
edge.To("passkey", Passkey.Type),
|
||||
edge.To("tasks", Task.Type),
|
||||
edge.To("fsevents", FsEvent.Type),
|
||||
edge.To("entities", Entity.Type),
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -22,8 +22,6 @@ type Tx struct {
|
|||
Entity *EntityClient
|
||||
// File is the client for interacting with the File builders.
|
||||
File *FileClient
|
||||
// FsEvent is the client for interacting with the FsEvent builders.
|
||||
FsEvent *FsEventClient
|
||||
// Group is the client for interacting with the Group builders.
|
||||
Group *GroupClient
|
||||
// Metadata is the client for interacting with the Metadata builders.
|
||||
|
|
@ -177,7 +175,6 @@ func (tx *Tx) init() {
|
|||
tx.DirectLink = NewDirectLinkClient(tx.config)
|
||||
tx.Entity = NewEntityClient(tx.config)
|
||||
tx.File = NewFileClient(tx.config)
|
||||
tx.FsEvent = NewFsEventClient(tx.config)
|
||||
tx.Group = NewGroupClient(tx.config)
|
||||
tx.Metadata = NewMetadataClient(tx.config)
|
||||
tx.Node = NewNodeClient(tx.config)
|
||||
|
|
|
|||
28
ent/user.go
28
ent/user.go
|
|
@ -64,13 +64,11 @@ type UserEdges struct {
|
|||
Passkey []*Passkey `json:"passkey,omitempty"`
|
||||
// Tasks holds the value of the tasks edge.
|
||||
Tasks []*Task `json:"tasks,omitempty"`
|
||||
// Fsevents holds the value of the fsevents edge.
|
||||
Fsevents []*FsEvent `json:"fsevents,omitempty"`
|
||||
// Entities holds the value of the entities edge.
|
||||
Entities []*Entity `json:"entities,omitempty"`
|
||||
// loadedTypes holds the information for reporting if a
|
||||
// type was loaded (or requested) in eager-loading or not.
|
||||
loadedTypes [8]bool
|
||||
loadedTypes [7]bool
|
||||
}
|
||||
|
||||
// GroupOrErr returns the Group value or an error if the edge
|
||||
|
|
@ -131,19 +129,10 @@ func (e UserEdges) TasksOrErr() ([]*Task, error) {
|
|||
return nil, &NotLoadedError{edge: "tasks"}
|
||||
}
|
||||
|
||||
// FseventsOrErr returns the Fsevents value or an error if the edge
|
||||
// was not loaded in eager-loading.
|
||||
func (e UserEdges) FseventsOrErr() ([]*FsEvent, error) {
|
||||
if e.loadedTypes[6] {
|
||||
return e.Fsevents, nil
|
||||
}
|
||||
return nil, &NotLoadedError{edge: "fsevents"}
|
||||
}
|
||||
|
||||
// EntitiesOrErr returns the Entities value or an error if the edge
|
||||
// was not loaded in eager-loading.
|
||||
func (e UserEdges) EntitiesOrErr() ([]*Entity, error) {
|
||||
if e.loadedTypes[7] {
|
||||
if e.loadedTypes[6] {
|
||||
return e.Entities, nil
|
||||
}
|
||||
return nil, &NotLoadedError{edge: "entities"}
|
||||
|
|
@ -301,11 +290,6 @@ func (u *User) QueryTasks() *TaskQuery {
|
|||
return NewUserClient(u.config).QueryTasks(u)
|
||||
}
|
||||
|
||||
// QueryFsevents queries the "fsevents" edge of the User entity.
|
||||
func (u *User) QueryFsevents() *FsEventQuery {
|
||||
return NewUserClient(u.config).QueryFsevents(u)
|
||||
}
|
||||
|
||||
// QueryEntities queries the "entities" edge of the User entity.
|
||||
func (u *User) QueryEntities() *EntityQuery {
|
||||
return NewUserClient(u.config).QueryEntities(u)
|
||||
|
|
@ -409,16 +393,10 @@ func (e *User) SetTasks(v []*Task) {
|
|||
e.Edges.loadedTypes[5] = true
|
||||
}
|
||||
|
||||
// SetFsevents manually set the edge as loaded state.
|
||||
func (e *User) SetFsevents(v []*FsEvent) {
|
||||
e.Edges.Fsevents = v
|
||||
e.Edges.loadedTypes[6] = true
|
||||
}
|
||||
|
||||
// SetEntities manually set the edge as loaded state.
|
||||
func (e *User) SetEntities(v []*Entity) {
|
||||
e.Edges.Entities = v
|
||||
e.Edges.loadedTypes[7] = true
|
||||
e.Edges.loadedTypes[6] = true
|
||||
}
|
||||
|
||||
// Users is a parsable slice of User.
|
||||
|
|
|
|||
|
|
@ -53,8 +53,6 @@ const (
|
|||
EdgePasskey = "passkey"
|
||||
// EdgeTasks holds the string denoting the tasks edge name in mutations.
|
||||
EdgeTasks = "tasks"
|
||||
// EdgeFsevents holds the string denoting the fsevents edge name in mutations.
|
||||
EdgeFsevents = "fsevents"
|
||||
// EdgeEntities holds the string denoting the entities edge name in mutations.
|
||||
EdgeEntities = "entities"
|
||||
// Table holds the table name of the user in the database.
|
||||
|
|
@ -101,13 +99,6 @@ const (
|
|||
TasksInverseTable = "tasks"
|
||||
// TasksColumn is the table column denoting the tasks relation/edge.
|
||||
TasksColumn = "user_tasks"
|
||||
// FseventsTable is the table that holds the fsevents relation/edge.
|
||||
FseventsTable = "fs_events"
|
||||
// FseventsInverseTable is the table name for the FsEvent entity.
|
||||
// It exists in this package in order to avoid circular dependency with the "fsevent" package.
|
||||
FseventsInverseTable = "fs_events"
|
||||
// FseventsColumn is the table column denoting the fsevents relation/edge.
|
||||
FseventsColumn = "user_fsevent"
|
||||
// EntitiesTable is the table that holds the entities relation/edge.
|
||||
EntitiesTable = "entities"
|
||||
// EntitiesInverseTable is the table name for the Entity entity.
|
||||
|
|
@ -336,20 +327,6 @@ func ByTasks(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
|||
}
|
||||
}
|
||||
|
||||
// ByFseventsCount orders the results by fsevents count.
|
||||
func ByFseventsCount(opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborsCount(s, newFseventsStep(), opts...)
|
||||
}
|
||||
}
|
||||
|
||||
// ByFsevents orders the results by fsevents terms.
|
||||
func ByFsevents(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newFseventsStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||
}
|
||||
}
|
||||
|
||||
// ByEntitiesCount orders the results by entities count.
|
||||
func ByEntitiesCount(opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
|
|
@ -405,13 +382,6 @@ func newTasksStep() *sqlgraph.Step {
|
|||
sqlgraph.Edge(sqlgraph.O2M, false, TasksTable, TasksColumn),
|
||||
)
|
||||
}
|
||||
func newFseventsStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(FseventsInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, FseventsTable, FseventsColumn),
|
||||
)
|
||||
}
|
||||
func newEntitiesStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
|
|
|
|||
|
|
@ -818,29 +818,6 @@ func HasTasksWith(preds ...predicate.Task) predicate.User {
|
|||
})
|
||||
}
|
||||
|
||||
// HasFsevents applies the HasEdge predicate on the "fsevents" edge.
|
||||
func HasFsevents() predicate.User {
|
||||
return predicate.User(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, FseventsTable, FseventsColumn),
|
||||
)
|
||||
sqlgraph.HasNeighbors(s, step)
|
||||
})
|
||||
}
|
||||
|
||||
// HasFseventsWith applies the HasEdge predicate on the "fsevents" edge with a given conditions (other predicates).
|
||||
func HasFseventsWith(preds ...predicate.FsEvent) predicate.User {
|
||||
return predicate.User(func(s *sql.Selector) {
|
||||
step := newFseventsStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// HasEntities applies the HasEdge predicate on the "entities" edge.
|
||||
func HasEntities() predicate.User {
|
||||
return predicate.User(func(s *sql.Selector) {
|
||||
|
|
|
|||
|
|
@ -14,7 +14,6 @@ import (
|
|||
"github.com/cloudreve/Cloudreve/v4/ent/davaccount"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/entity"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/file"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/fsevent"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/group"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/passkey"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/share"
|
||||
|
|
@ -253,21 +252,6 @@ func (uc *UserCreate) AddTasks(t ...*Task) *UserCreate {
|
|||
return uc.AddTaskIDs(ids...)
|
||||
}
|
||||
|
||||
// AddFseventIDs adds the "fsevents" edge to the FsEvent entity by IDs.
|
||||
func (uc *UserCreate) AddFseventIDs(ids ...int) *UserCreate {
|
||||
uc.mutation.AddFseventIDs(ids...)
|
||||
return uc
|
||||
}
|
||||
|
||||
// AddFsevents adds the "fsevents" edges to the FsEvent entity.
|
||||
func (uc *UserCreate) AddFsevents(f ...*FsEvent) *UserCreate {
|
||||
ids := make([]int, len(f))
|
||||
for i := range f {
|
||||
ids[i] = f[i].ID
|
||||
}
|
||||
return uc.AddFseventIDs(ids...)
|
||||
}
|
||||
|
||||
// AddEntityIDs adds the "entities" edge to the Entity entity by IDs.
|
||||
func (uc *UserCreate) AddEntityIDs(ids ...int) *UserCreate {
|
||||
uc.mutation.AddEntityIDs(ids...)
|
||||
|
|
@ -565,22 +549,6 @@ func (uc *UserCreate) createSpec() (*User, *sqlgraph.CreateSpec) {
|
|||
}
|
||||
_spec.Edges = append(_spec.Edges, edge)
|
||||
}
|
||||
if nodes := uc.mutation.FseventsIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
Inverse: false,
|
||||
Table: user.FseventsTable,
|
||||
Columns: []string{user.FseventsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(fsevent.FieldID, field.TypeInt),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges = append(_spec.Edges, edge)
|
||||
}
|
||||
if nodes := uc.mutation.EntitiesIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
|
|
|
|||
|
|
@ -14,7 +14,6 @@ import (
|
|||
"github.com/cloudreve/Cloudreve/v4/ent/davaccount"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/entity"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/file"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/fsevent"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/group"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/passkey"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
|
||||
|
|
@ -36,7 +35,6 @@ type UserQuery struct {
|
|||
withShares *ShareQuery
|
||||
withPasskey *PasskeyQuery
|
||||
withTasks *TaskQuery
|
||||
withFsevents *FsEventQuery
|
||||
withEntities *EntityQuery
|
||||
// intermediate query (i.e. traversal path).
|
||||
sql *sql.Selector
|
||||
|
|
@ -206,28 +204,6 @@ func (uq *UserQuery) QueryTasks() *TaskQuery {
|
|||
return query
|
||||
}
|
||||
|
||||
// QueryFsevents chains the current query on the "fsevents" edge.
|
||||
func (uq *UserQuery) QueryFsevents() *FsEventQuery {
|
||||
query := (&FsEventClient{config: uq.config}).Query()
|
||||
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||
if err := uq.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
selector := uq.sqlQuery(ctx)
|
||||
if err := selector.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(user.Table, user.FieldID, selector),
|
||||
sqlgraph.To(fsevent.Table, fsevent.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, user.FseventsTable, user.FseventsColumn),
|
||||
)
|
||||
fromU = sqlgraph.SetNeighbors(uq.driver.Dialect(), step)
|
||||
return fromU, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// QueryEntities chains the current query on the "entities" edge.
|
||||
func (uq *UserQuery) QueryEntities() *EntityQuery {
|
||||
query := (&EntityClient{config: uq.config}).Query()
|
||||
|
|
@ -448,7 +424,6 @@ func (uq *UserQuery) Clone() *UserQuery {
|
|||
withShares: uq.withShares.Clone(),
|
||||
withPasskey: uq.withPasskey.Clone(),
|
||||
withTasks: uq.withTasks.Clone(),
|
||||
withFsevents: uq.withFsevents.Clone(),
|
||||
withEntities: uq.withEntities.Clone(),
|
||||
// clone intermediate query.
|
||||
sql: uq.sql.Clone(),
|
||||
|
|
@ -522,17 +497,6 @@ func (uq *UserQuery) WithTasks(opts ...func(*TaskQuery)) *UserQuery {
|
|||
return uq
|
||||
}
|
||||
|
||||
// WithFsevents tells the query-builder to eager-load the nodes that are connected to
|
||||
// the "fsevents" edge. The optional arguments are used to configure the query builder of the edge.
|
||||
func (uq *UserQuery) WithFsevents(opts ...func(*FsEventQuery)) *UserQuery {
|
||||
query := (&FsEventClient{config: uq.config}).Query()
|
||||
for _, opt := range opts {
|
||||
opt(query)
|
||||
}
|
||||
uq.withFsevents = query
|
||||
return uq
|
||||
}
|
||||
|
||||
// WithEntities tells the query-builder to eager-load the nodes that are connected to
|
||||
// the "entities" edge. The optional arguments are used to configure the query builder of the edge.
|
||||
func (uq *UserQuery) WithEntities(opts ...func(*EntityQuery)) *UserQuery {
|
||||
|
|
@ -622,14 +586,13 @@ func (uq *UserQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*User, e
|
|||
var (
|
||||
nodes = []*User{}
|
||||
_spec = uq.querySpec()
|
||||
loadedTypes = [8]bool{
|
||||
loadedTypes = [7]bool{
|
||||
uq.withGroup != nil,
|
||||
uq.withFiles != nil,
|
||||
uq.withDavAccounts != nil,
|
||||
uq.withShares != nil,
|
||||
uq.withPasskey != nil,
|
||||
uq.withTasks != nil,
|
||||
uq.withFsevents != nil,
|
||||
uq.withEntities != nil,
|
||||
}
|
||||
)
|
||||
|
|
@ -692,13 +655,6 @@ func (uq *UserQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*User, e
|
|||
return nil, err
|
||||
}
|
||||
}
|
||||
if query := uq.withFsevents; query != nil {
|
||||
if err := uq.loadFsevents(ctx, query, nodes,
|
||||
func(n *User) { n.Edges.Fsevents = []*FsEvent{} },
|
||||
func(n *User, e *FsEvent) { n.Edges.Fsevents = append(n.Edges.Fsevents, e) }); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if query := uq.withEntities; query != nil {
|
||||
if err := uq.loadEntities(ctx, query, nodes,
|
||||
func(n *User) { n.Edges.Entities = []*Entity{} },
|
||||
|
|
@ -889,36 +845,6 @@ func (uq *UserQuery) loadTasks(ctx context.Context, query *TaskQuery, nodes []*U
|
|||
}
|
||||
return nil
|
||||
}
|
||||
func (uq *UserQuery) loadFsevents(ctx context.Context, query *FsEventQuery, nodes []*User, init func(*User), assign func(*User, *FsEvent)) error {
|
||||
fks := make([]driver.Value, 0, len(nodes))
|
||||
nodeids := make(map[int]*User)
|
||||
for i := range nodes {
|
||||
fks = append(fks, nodes[i].ID)
|
||||
nodeids[nodes[i].ID] = nodes[i]
|
||||
if init != nil {
|
||||
init(nodes[i])
|
||||
}
|
||||
}
|
||||
if len(query.ctx.Fields) > 0 {
|
||||
query.ctx.AppendFieldOnce(fsevent.FieldUserFsevent)
|
||||
}
|
||||
query.Where(predicate.FsEvent(func(s *sql.Selector) {
|
||||
s.Where(sql.InValues(s.C(user.FseventsColumn), fks...))
|
||||
}))
|
||||
neighbors, err := query.All(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, n := range neighbors {
|
||||
fk := n.UserFsevent
|
||||
node, ok := nodeids[fk]
|
||||
if !ok {
|
||||
return fmt.Errorf(`unexpected referenced foreign-key "user_fsevent" returned %v for node %v`, fk, n.ID)
|
||||
}
|
||||
assign(node, n)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (uq *UserQuery) loadEntities(ctx context.Context, query *EntityQuery, nodes []*User, init func(*User), assign func(*User, *Entity)) error {
|
||||
fks := make([]driver.Value, 0, len(nodes))
|
||||
nodeids := make(map[int]*User)
|
||||
|
|
|
|||
|
|
@ -14,7 +14,6 @@ import (
|
|||
"github.com/cloudreve/Cloudreve/v4/ent/davaccount"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/entity"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/file"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/fsevent"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/group"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/passkey"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
|
||||
|
|
@ -298,21 +297,6 @@ func (uu *UserUpdate) AddTasks(t ...*Task) *UserUpdate {
|
|||
return uu.AddTaskIDs(ids...)
|
||||
}
|
||||
|
||||
// AddFseventIDs adds the "fsevents" edge to the FsEvent entity by IDs.
|
||||
func (uu *UserUpdate) AddFseventIDs(ids ...int) *UserUpdate {
|
||||
uu.mutation.AddFseventIDs(ids...)
|
||||
return uu
|
||||
}
|
||||
|
||||
// AddFsevents adds the "fsevents" edges to the FsEvent entity.
|
||||
func (uu *UserUpdate) AddFsevents(f ...*FsEvent) *UserUpdate {
|
||||
ids := make([]int, len(f))
|
||||
for i := range f {
|
||||
ids[i] = f[i].ID
|
||||
}
|
||||
return uu.AddFseventIDs(ids...)
|
||||
}
|
||||
|
||||
// AddEntityIDs adds the "entities" edge to the Entity entity by IDs.
|
||||
func (uu *UserUpdate) AddEntityIDs(ids ...int) *UserUpdate {
|
||||
uu.mutation.AddEntityIDs(ids...)
|
||||
|
|
@ -444,27 +428,6 @@ func (uu *UserUpdate) RemoveTasks(t ...*Task) *UserUpdate {
|
|||
return uu.RemoveTaskIDs(ids...)
|
||||
}
|
||||
|
||||
// ClearFsevents clears all "fsevents" edges to the FsEvent entity.
|
||||
func (uu *UserUpdate) ClearFsevents() *UserUpdate {
|
||||
uu.mutation.ClearFsevents()
|
||||
return uu
|
||||
}
|
||||
|
||||
// RemoveFseventIDs removes the "fsevents" edge to FsEvent entities by IDs.
|
||||
func (uu *UserUpdate) RemoveFseventIDs(ids ...int) *UserUpdate {
|
||||
uu.mutation.RemoveFseventIDs(ids...)
|
||||
return uu
|
||||
}
|
||||
|
||||
// RemoveFsevents removes "fsevents" edges to FsEvent entities.
|
||||
func (uu *UserUpdate) RemoveFsevents(f ...*FsEvent) *UserUpdate {
|
||||
ids := make([]int, len(f))
|
||||
for i := range f {
|
||||
ids[i] = f[i].ID
|
||||
}
|
||||
return uu.RemoveFseventIDs(ids...)
|
||||
}
|
||||
|
||||
// ClearEntities clears all "entities" edges to the Entity entity.
|
||||
func (uu *UserUpdate) ClearEntities() *UserUpdate {
|
||||
uu.mutation.ClearEntities()
|
||||
|
|
@ -865,51 +828,6 @@ func (uu *UserUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
|||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
if uu.mutation.FseventsCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
Inverse: false,
|
||||
Table: user.FseventsTable,
|
||||
Columns: []string{user.FseventsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(fsevent.FieldID, field.TypeInt),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := uu.mutation.RemovedFseventsIDs(); len(nodes) > 0 && !uu.mutation.FseventsCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
Inverse: false,
|
||||
Table: user.FseventsTable,
|
||||
Columns: []string{user.FseventsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(fsevent.FieldID, field.TypeInt),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := uu.mutation.FseventsIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
Inverse: false,
|
||||
Table: user.FseventsTable,
|
||||
Columns: []string{user.FseventsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(fsevent.FieldID, field.TypeInt),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
if uu.mutation.EntitiesCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
|
|
@ -1236,21 +1154,6 @@ func (uuo *UserUpdateOne) AddTasks(t ...*Task) *UserUpdateOne {
|
|||
return uuo.AddTaskIDs(ids...)
|
||||
}
|
||||
|
||||
// AddFseventIDs adds the "fsevents" edge to the FsEvent entity by IDs.
|
||||
func (uuo *UserUpdateOne) AddFseventIDs(ids ...int) *UserUpdateOne {
|
||||
uuo.mutation.AddFseventIDs(ids...)
|
||||
return uuo
|
||||
}
|
||||
|
||||
// AddFsevents adds the "fsevents" edges to the FsEvent entity.
|
||||
func (uuo *UserUpdateOne) AddFsevents(f ...*FsEvent) *UserUpdateOne {
|
||||
ids := make([]int, len(f))
|
||||
for i := range f {
|
||||
ids[i] = f[i].ID
|
||||
}
|
||||
return uuo.AddFseventIDs(ids...)
|
||||
}
|
||||
|
||||
// AddEntityIDs adds the "entities" edge to the Entity entity by IDs.
|
||||
func (uuo *UserUpdateOne) AddEntityIDs(ids ...int) *UserUpdateOne {
|
||||
uuo.mutation.AddEntityIDs(ids...)
|
||||
|
|
@ -1382,27 +1285,6 @@ func (uuo *UserUpdateOne) RemoveTasks(t ...*Task) *UserUpdateOne {
|
|||
return uuo.RemoveTaskIDs(ids...)
|
||||
}
|
||||
|
||||
// ClearFsevents clears all "fsevents" edges to the FsEvent entity.
|
||||
func (uuo *UserUpdateOne) ClearFsevents() *UserUpdateOne {
|
||||
uuo.mutation.ClearFsevents()
|
||||
return uuo
|
||||
}
|
||||
|
||||
// RemoveFseventIDs removes the "fsevents" edge to FsEvent entities by IDs.
|
||||
func (uuo *UserUpdateOne) RemoveFseventIDs(ids ...int) *UserUpdateOne {
|
||||
uuo.mutation.RemoveFseventIDs(ids...)
|
||||
return uuo
|
||||
}
|
||||
|
||||
// RemoveFsevents removes "fsevents" edges to FsEvent entities.
|
||||
func (uuo *UserUpdateOne) RemoveFsevents(f ...*FsEvent) *UserUpdateOne {
|
||||
ids := make([]int, len(f))
|
||||
for i := range f {
|
||||
ids[i] = f[i].ID
|
||||
}
|
||||
return uuo.RemoveFseventIDs(ids...)
|
||||
}
|
||||
|
||||
// ClearEntities clears all "entities" edges to the Entity entity.
|
||||
func (uuo *UserUpdateOne) ClearEntities() *UserUpdateOne {
|
||||
uuo.mutation.ClearEntities()
|
||||
|
|
@ -1833,51 +1715,6 @@ func (uuo *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error)
|
|||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
if uuo.mutation.FseventsCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
Inverse: false,
|
||||
Table: user.FseventsTable,
|
||||
Columns: []string{user.FseventsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(fsevent.FieldID, field.TypeInt),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := uuo.mutation.RemovedFseventsIDs(); len(nodes) > 0 && !uuo.mutation.FseventsCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
Inverse: false,
|
||||
Table: user.FseventsTable,
|
||||
Columns: []string{user.FseventsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(fsevent.FieldID, field.TypeInt),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := uuo.mutation.FseventsIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
Inverse: false,
|
||||
Table: user.FseventsTable,
|
||||
Columns: []string{user.FseventsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(fsevent.FieldID, field.TypeInt),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
if uuo.mutation.EntitiesCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
|
|
|
|||
65
go.mod
65
go.mod
|
|
@ -1,15 +1,13 @@
|
|||
module github.com/cloudreve/Cloudreve/v4
|
||||
|
||||
go 1.24.0
|
||||
|
||||
toolchain go1.24.9
|
||||
go 1.23.0
|
||||
|
||||
require (
|
||||
entgo.io/ent v0.13.0
|
||||
github.com/Masterminds/semver/v3 v3.3.1
|
||||
github.com/aliyun/alibabacloud-oss-go-sdk-v2 v1.3.0
|
||||
github.com/abslant/gzip v0.0.9
|
||||
github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible
|
||||
github.com/aws/aws-sdk-go v1.31.5
|
||||
github.com/bodgit/sevenzip v1.6.0
|
||||
github.com/cloudflare/cfssl v1.6.1
|
||||
github.com/dhowden/tag v0.0.0-20230630033851-978a0926ee25
|
||||
github.com/dsoprea/go-exif/v3 v3.0.1
|
||||
|
|
@ -19,13 +17,12 @@ require (
|
|||
github.com/dsoprea/go-tiff-image-structure v0.0.0-20221003165014-8ecc4f52edca
|
||||
github.com/dsoprea/go-utility v0.0.0-20200711062821-fab8125e9bdf
|
||||
github.com/fatih/color v1.18.0
|
||||
github.com/gin-contrib/cors v1.6.0
|
||||
github.com/gin-contrib/gzip v1.2.4
|
||||
github.com/gin-contrib/cors v1.3.0
|
||||
github.com/gin-contrib/sessions v1.0.2
|
||||
github.com/gin-contrib/static v0.0.0-20191128031702-f81c604d8ac2
|
||||
github.com/gin-gonic/gin v1.11.0
|
||||
github.com/gin-gonic/gin v1.10.0
|
||||
github.com/go-ini/ini v1.50.0
|
||||
github.com/go-playground/validator/v10 v10.28.0
|
||||
github.com/go-playground/validator/v10 v10.20.0
|
||||
github.com/go-sql-driver/mysql v1.6.0
|
||||
github.com/go-webauthn/webauthn v0.11.2
|
||||
github.com/gofrs/uuid v4.0.0+incompatible
|
||||
|
|
@ -52,16 +49,16 @@ require (
|
|||
github.com/speps/go-hashids v2.0.0+incompatible
|
||||
github.com/spf13/cobra v1.7.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.11.1
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/tencentyun/cos-go-sdk-v5 v0.7.54
|
||||
github.com/ua-parser/uap-go v0.0.0-20250213224047-9c035f085b90
|
||||
github.com/upyun/go-sdk v2.1.0+incompatible
|
||||
github.com/wneessen/go-mail v0.7.1
|
||||
github.com/wneessen/go-mail v0.6.2
|
||||
golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e
|
||||
golang.org/x/image v0.18.0
|
||||
golang.org/x/text v0.30.0
|
||||
golang.org/x/image v0.0.0-20211028202545-6944b10bf410
|
||||
golang.org/x/text v0.23.0
|
||||
golang.org/x/time v0.5.0
|
||||
golang.org/x/tools v0.38.0
|
||||
golang.org/x/tools v0.24.0
|
||||
modernc.org/sqlite v1.30.0
|
||||
)
|
||||
|
||||
|
|
@ -73,13 +70,14 @@ require (
|
|||
github.com/andybalholm/brotli v1.1.2-0.20250424173009-453214e765f3 // indirect
|
||||
github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect
|
||||
github.com/bodgit/plumbing v1.3.0 // indirect
|
||||
github.com/bodgit/sevenzip v1.6.0 // indirect
|
||||
github.com/bodgit/windows v1.0.1 // indirect
|
||||
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect
|
||||
github.com/bytedance/gopkg v0.1.3 // indirect
|
||||
github.com/bytedance/sonic v1.14.1 // indirect
|
||||
github.com/bytedance/sonic/loader v0.3.0 // indirect
|
||||
github.com/bytedance/sonic v1.11.6 // indirect
|
||||
github.com/bytedance/sonic/loader v0.1.1 // indirect
|
||||
github.com/clbanning/mxj v1.8.4 // indirect
|
||||
github.com/cloudwego/base64x v0.1.6 // indirect
|
||||
github.com/cloudwego/base64x v0.1.4 // indirect
|
||||
github.com/cloudwego/iasm v0.2.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/denisenkom/go-mssqldb v0.0.0-20190515213511-eb9f6a1743f3 // indirect
|
||||
github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 // indirect
|
||||
|
|
@ -90,19 +88,18 @@ require (
|
|||
github.com/dsoprea/go-utility/v2 v2.0.0-20221003172846-a3e1774ef349 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.10 // indirect
|
||||
github.com/gin-contrib/sse v1.1.0 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.3 // indirect
|
||||
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||
github.com/go-errors/errors v1.4.2 // indirect
|
||||
github.com/go-openapi/inflect v0.19.0 // indirect
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
github.com/go-webauthn/x v0.1.14 // indirect
|
||||
github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b // indirect
|
||||
github.com/goccy/go-json v0.10.5 // indirect
|
||||
github.com/goccy/go-yaml v1.18.0 // indirect
|
||||
github.com/goccy/go-json v0.10.2 // indirect
|
||||
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 // indirect
|
||||
github.com/golang/geo v0.0.0-20210211234256-740aa86cb551 // indirect
|
||||
github.com/google/go-cmp v0.7.0 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/go-tpm v0.9.1 // indirect
|
||||
github.com/gorilla/context v1.1.2 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
|
|
@ -115,7 +112,7 @@ require (
|
|||
github.com/jmespath/go-jmespath v0.3.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.17.11 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.7 // indirect
|
||||
github.com/klauspost/pgzip v1.2.6 // indirect
|
||||
github.com/kr/pretty v0.3.1 // indirect
|
||||
github.com/leodido/go-urn v1.4.0 // indirect
|
||||
|
|
@ -130,27 +127,25 @@ require (
|
|||
github.com/mozillazg/go-httpheader v0.4.0 // indirect
|
||||
github.com/ncruces/go-strftime v0.1.9 // indirect
|
||||
github.com/nwaples/rardecode/v2 v2.1.0 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.21 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/quic-go/qpack v0.5.1 // indirect
|
||||
github.com/quic-go/quic-go v0.55.0 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||
github.com/sorairolake/lzip-go v0.3.5 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||
github.com/ugorji/go/codec v1.3.0 // indirect
|
||||
github.com/ugorji/go/codec v1.2.12 // indirect
|
||||
github.com/ulikunitz/xz v0.5.12 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/zclconf/go-cty v1.8.0 // indirect
|
||||
go4.org v0.0.0-20230225012048-214862532bf5 // indirect
|
||||
golang.org/x/arch v0.22.0 // indirect
|
||||
golang.org/x/crypto v0.43.0 // indirect
|
||||
golang.org/x/mod v0.29.0 // indirect
|
||||
golang.org/x/net v0.46.0 // indirect
|
||||
golang.org/x/sync v0.17.0 // indirect
|
||||
golang.org/x/sys v0.37.0 // indirect
|
||||
google.golang.org/protobuf v1.36.10 // indirect
|
||||
golang.org/x/arch v0.8.0 // indirect
|
||||
golang.org/x/crypto v0.36.0 // indirect
|
||||
golang.org/x/mod v0.20.0 // indirect
|
||||
golang.org/x/net v0.38.0 // indirect
|
||||
golang.org/x/sync v0.12.0 // indirect
|
||||
golang.org/x/sys v0.31.0 // indirect
|
||||
google.golang.org/protobuf v1.34.2 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6 // indirect
|
||||
|
|
|
|||
171
go.sum
171
go.sum
|
|
@ -87,6 +87,8 @@ github.com/STARRY-S/zip v0.2.1/go.mod h1:xNvshLODWtC4EJ702g7cTYn13G53o1+X9BWnPFp
|
|||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
|
||||
github.com/abslant/gzip v0.0.9 h1:zxuOQ8QmPwni7vwgE3EyOygdmeCo2UkCmO5t+7Ms6cA=
|
||||
github.com/abslant/gzip v0.0.9/go.mod h1:IcN2c50tZn2y54oysNcIavbTAc1s0B2f5TqTEA+WCas=
|
||||
github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
|
||||
github.com/agext/levenshtein v1.2.1 h1:QmvMAjj2aEICytGiWzmxoE0x2KZvE0fvmqMOfy2tjT8=
|
||||
github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
|
||||
|
|
@ -98,8 +100,8 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy
|
|||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||
github.com/aliyun/alibabacloud-oss-go-sdk-v2 v1.3.0 h1:wQlqotpyjYPjJz+Noh5bRu7Snmydk8SKC5Z6u1CR20Y=
|
||||
github.com/aliyun/alibabacloud-oss-go-sdk-v2 v1.3.0/go.mod h1:FTzydeQVmR24FI0D6XWUOMKckjXehM/jgMn1xC+DA9M=
|
||||
github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible h1:8psS8a+wKfiLt1iVDX79F7Y6wUM49Lcha2FMXt4UM8g=
|
||||
github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8=
|
||||
github.com/andybalholm/brotli v1.1.2-0.20250424173009-453214e765f3 h1:8PmGpDEZl9yDpcdEr6Odf23feCxK3LNUNMxjXg41pZQ=
|
||||
github.com/andybalholm/brotli v1.1.2-0.20250424173009-453214e765f3/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
|
||||
|
|
@ -146,12 +148,10 @@ github.com/bodgit/windows v1.0.1 h1:tF7K6KOluPYygXa3Z2594zxlkbKPAOvqr97etrGNIz4=
|
|||
github.com/bodgit/windows v1.0.1/go.mod h1:a6JLwrB4KrTR5hBpp8FI9/9W9jJfeQ2h4XDXU74ZCdM=
|
||||
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8WK8raXaxBx6fRVTlJILwEwQGL1I/ByEI=
|
||||
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
|
||||
github.com/bytedance/gopkg v0.1.3 h1:TPBSwH8RsouGCBcMBktLt1AymVo2TVsBVCY4b6TnZ/M=
|
||||
github.com/bytedance/gopkg v0.1.3/go.mod h1:576VvJ+eJgyCzdjS+c4+77QF3p7ubbtiKARP3TxducM=
|
||||
github.com/bytedance/sonic v1.14.1 h1:FBMC0zVz5XUmE4z9wF4Jey0An5FueFvOsTKKKtwIl7w=
|
||||
github.com/bytedance/sonic v1.14.1/go.mod h1:gi6uhQLMbTdeP0muCnrjHLeCUPyb70ujhnNlhOylAFc=
|
||||
github.com/bytedance/sonic/loader v0.3.0 h1:dskwH8edlzNMctoruo8FPTJDF3vLtDT0sXZwvZJyqeA=
|
||||
github.com/bytedance/sonic/loader v0.3.0/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI=
|
||||
github.com/bytedance/sonic v1.11.6 h1:oUp34TzMlL+OY1OUWxHqsdkgC/Zfc85zGqw9siXjrc0=
|
||||
github.com/bytedance/sonic v1.11.6/go.mod h1:LysEHSvpvDySVdC2f87zGWf6CIKJcAvqab1ZaiQtds4=
|
||||
github.com/bytedance/sonic/loader v0.1.1 h1:c+e5Pt1k/cy5wMveRDyk2X4B9hF4g7an8N3zCYjJFNM=
|
||||
github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU=
|
||||
github.com/caarlos0/ctrlc v1.0.0/go.mod h1:CdXpj4rmq0q/1Eb44M9zi2nKB0QraNKuRGYGrrHhcQw=
|
||||
github.com/campoy/unique v0.0.0-20180121183637-88950e537e7e/go.mod h1:9IOqJGCPMSc6E5ydlp5NIonxObaeu/Iub/X03EKPVYo=
|
||||
github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
|
||||
|
|
@ -175,8 +175,10 @@ github.com/cloudflare/backoff v0.0.0-20161212185259-647f3cdfc87a/go.mod h1:rzgs2
|
|||
github.com/cloudflare/cfssl v1.6.1 h1:aIOUjpeuDJOpWjVJFP2ByplF53OgqG8I1S40Ggdlk3g=
|
||||
github.com/cloudflare/cfssl v1.6.1/go.mod h1:ENhCj4Z17+bY2XikpxVmTHDg/C2IsG2Q0ZBeXpAqhCk=
|
||||
github.com/cloudflare/redoctober v0.0.0-20201013214028-99c99a8e7544/go.mod h1:6Se34jNoqrd8bTxrmJB2Bg2aoZ2CdSXonils9NsiNgo=
|
||||
github.com/cloudwego/base64x v0.1.6 h1:t11wG9AECkCDk5fMSoxmufanudBtJ+/HemLstXDLI2M=
|
||||
github.com/cloudwego/base64x v0.1.6/go.mod h1:OFcloc187FXDaYHvrNIjxSe8ncn0OOM8gEHfghB2IPU=
|
||||
github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/0Y=
|
||||
github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w=
|
||||
github.com/cloudwego/iasm v0.2.0 h1:1KNIy1I1H9hNNFEEH3DVnI4UujN+1zjpuk6gwHLTssg=
|
||||
github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
|
|
@ -295,24 +297,24 @@ github.com/fullstorydev/grpcurl v1.8.0/go.mod h1:Mn2jWbdMrQGJQ8UD62uNyMumT2acsZU
|
|||
github.com/fullstorydev/grpcurl v1.8.1/go.mod h1:3BWhvHZwNO7iLXaQlojdg5NA6SxUDePli4ecpK1N7gw=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
|
||||
github.com/gabriel-vasile/mimetype v1.4.10 h1:zyueNbySn/z8mJZHLt6IPw0KoZsiQNszIpU+bX4+ZK0=
|
||||
github.com/gabriel-vasile/mimetype v1.4.10/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s=
|
||||
github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0=
|
||||
github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk=
|
||||
github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/gin-contrib/cors v1.6.0 h1:0Z7D/bVhE6ja07lI8CTjTonp6SB07o8bNuFyRbsBUQg=
|
||||
github.com/gin-contrib/cors v1.6.0/go.mod h1:cI+h6iOAyxKRtUtC6iF/Si1KSFvGm/gK+kshxlCi8ro=
|
||||
github.com/gin-contrib/gzip v1.2.4 h1:yNz4EhPC2kHSZJD1oc1zwp7MLEhEZ3goQeGM3a1b6jU=
|
||||
github.com/gin-contrib/gzip v1.2.4/go.mod h1:aomRgR7ftdZV3uWY0gW/m8rChfxau0n8YVvwlOHONzw=
|
||||
github.com/gin-contrib/cors v1.3.0 h1:PolezCc89peu+NgkIWt9OB01Kbzt6IP0J/JvkG6xxlg=
|
||||
github.com/gin-contrib/cors v1.3.0/go.mod h1:artPvLlhkF7oG06nK8v3U8TNz6IeX+w1uzCSEId5/Vc=
|
||||
github.com/gin-contrib/sessions v1.0.2 h1:UaIjUvTH1cMeOdj3in6dl+Xb6It8RiKRF9Z1anbUyCA=
|
||||
github.com/gin-contrib/sessions v1.0.2/go.mod h1:KxKxWqWP5LJVDCInulOl4WbLzK2KSPlLesfZ66wRvMs=
|
||||
github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s=
|
||||
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
|
||||
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
|
||||
github.com/gin-contrib/sse v1.1.0 h1:n0w2GMuUpWDVp7qSpvze6fAu9iRxJY4Hmj6AmBOU05w=
|
||||
github.com/gin-contrib/sse v1.1.0/go.mod h1:hxRZ5gVpWMT7Z0B0gSNYqqsSCNIJMjzvm6fqCz9vjwM=
|
||||
github.com/gin-contrib/static v0.0.0-20191128031702-f81c604d8ac2 h1:xLG16iua01X7Gzms9045s2Y2niNpvSY/Zb1oBwgNYZY=
|
||||
github.com/gin-contrib/static v0.0.0-20191128031702-f81c604d8ac2/go.mod h1:VhW/Ch/3FhimwZb8Oj+qJmdMmoB8r7lmJ5auRjm50oQ=
|
||||
github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM=
|
||||
github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do=
|
||||
github.com/gin-gonic/gin v1.11.0 h1:OW/6PLjyusp2PPXtyxKHU0RbX6I/l28FTdDlae5ueWk=
|
||||
github.com/gin-gonic/gin v1.11.0/go.mod h1:+iq/FyxlGzII0KHiBGjuNn4UNENUlKbGlNmc+W50Dls=
|
||||
github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M=
|
||||
github.com/gin-gonic/gin v1.10.0 h1:nTuyha1TYqgedzytsKYqna+DfLos46nTv2ygFy86HFU=
|
||||
github.com/gin-gonic/gin v1.10.0/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y=
|
||||
github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
|
||||
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
|
||||
github.com/go-errors/errors v1.0.2/go.mod h1:psDX2osz5VnTOnFWbDeWwS7yejl+uV3FEWEp4lssFEs=
|
||||
|
|
@ -346,9 +348,10 @@ github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+
|
|||
github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA=
|
||||
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
|
||||
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
|
||||
github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI=
|
||||
github.com/go-playground/validator/v10 v10.8.0/go.mod h1:9JhgTzTaE31GZDpH/HSvHiRJrJ3iKAgqqH0Bl/Ocjdk=
|
||||
github.com/go-playground/validator/v10 v10.28.0 h1:Q7ibns33JjyW48gHkuFT91qX48KG0ktULL6FgHdG688=
|
||||
github.com/go-playground/validator/v10 v10.28.0/go.mod h1:GoI6I1SjPBh9p7ykNE/yj3fFYbyDOpwMn5KXd+m2hUU=
|
||||
github.com/go-playground/validator/v10 v10.20.0 h1:K9ISHbSaI0lyB2eWMPJo+kOS/FBExVwjEviJTixqxL8=
|
||||
github.com/go-playground/validator/v10 v10.20.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
|
||||
github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
|
||||
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
|
|
@ -364,10 +367,8 @@ github.com/go-webauthn/x v0.1.14 h1:1wrB8jzXAofojJPAaRxnZhRgagvLGnLjhCAwg3kTpT0=
|
|||
github.com/go-webauthn/x v0.1.14/go.mod h1:UuVvFZ8/NbOnkDz3y1NaxtUN87pmtpC1PQ+/5BBQRdc=
|
||||
github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b h1:khEcpUM4yFcxg4/FHQWkvVRmgijNXRfzkIDHh23ggEo=
|
||||
github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM=
|
||||
github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4=
|
||||
github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
|
||||
github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw=
|
||||
github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA=
|
||||
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
|
||||
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw=
|
||||
|
|
@ -440,8 +441,8 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
|||
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-github/v28 v28.1.1/go.mod h1:bsqJWQX05omyWVmc00nEUql9mhQyv38lDZ8kPZcQVoM=
|
||||
github.com/google/go-licenses v0.0.0-20210329231322-ce1d9163b77d/go.mod h1:+TYOmkVoJOpwnS0wfdsJCV9CoD5nJYsHoFk/0CrTK4M=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
|
|
@ -622,10 +623,12 @@ github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0
|
|||
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
|
||||
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
|
||||
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
|
||||
github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM=
|
||||
github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||
github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU=
|
||||
github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||
github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
||||
|
|
@ -646,6 +649,7 @@ github.com/kylelemons/go-gypsy v1.0.0/go.mod h1:chkXM0zjdpXOiqkCW1XcCHDfjfk14PH2
|
|||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw=
|
||||
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
|
||||
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
|
||||
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
|
||||
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
|
||||
|
|
@ -671,9 +675,11 @@ github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HN
|
|||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
|
||||
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
|
|
@ -777,8 +783,8 @@ github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FI
|
|||
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
|
||||
github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
|
||||
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
|
||||
github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
|
||||
github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
|
||||
github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
|
||||
github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
|
|
@ -834,10 +840,6 @@ github.com/qiniu/dyn v1.3.0/go.mod h1:E8oERcm8TtwJiZvkQPbcAh0RL8jO1G0VXJMW3FAWdk
|
|||
github.com/qiniu/go-sdk/v7 v7.19.0 h1:k3AzDPil8QHIQnki6xXt4YRAjE52oRoBUXQ4bV+Wc5U=
|
||||
github.com/qiniu/go-sdk/v7 v7.19.0/go.mod h1:nqoYCNo53ZlGA521RvRethvxUDvXKt4gtYXOwye868w=
|
||||
github.com/qiniu/x v1.10.5/go.mod h1:03Ni9tj+N2h2aKnAz+6N0Xfl8FwMEDRC2PAlxekASDs=
|
||||
github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI=
|
||||
github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg=
|
||||
github.com/quic-go/quic-go v0.55.0 h1:zccPQIqYCXDt5NmcEabyYvOnomjs8Tlwl7tISjJh9Mk=
|
||||
github.com/quic-go/quic-go v0.55.0/go.mod h1:DR51ilwU1uE164KuWXhinFcKWGlEjzys2l8zUl5Ss1U=
|
||||
github.com/rafaeljusto/redigomock v0.0.0-20191117212112-00b2509252a1 h1:leEwA4MD1ew0lNgzz6Q4G76G3AEfeci+TMggN6WuFRs=
|
||||
github.com/rafaeljusto/redigomock v0.0.0-20191117212112-00b2509252a1/go.mod h1:JaY6n2sDr+z2WTsXkOmNRUfDy6FN0L6Nk7x06ndm4tY=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
|
|
@ -934,8 +936,9 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
|||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.563/go.mod h1:7sCQWVkxcsR38nffDW057DRGk8mUjK1Ing/EFOK8s8Y=
|
||||
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/kms v1.0.563/go.mod h1:uom4Nvi9W+Qkom0exYiJ9VWJjXwyxtPYTkKkaLMlfE0=
|
||||
|
|
@ -958,8 +961,8 @@ github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGr
|
|||
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
|
||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
|
||||
github.com/ugorji/go/codec v1.3.0 h1:Qd2W2sQawAfG8XSvzwhBeoGq71zXOC/Q1E9y/wUcsUA=
|
||||
github.com/ugorji/go/codec v1.3.0/go.mod h1:pRBVtBSKl77K30Bv8R2P+cLSGaTtex6fsA2Wjqmfxj4=
|
||||
github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE=
|
||||
github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
|
||||
github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8=
|
||||
github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
||||
github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
||||
|
|
@ -977,8 +980,8 @@ github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+
|
|||
github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI=
|
||||
github.com/weppos/publicsuffix-go v0.13.1-0.20210123135404-5fd73613514e/go.mod h1:HYux0V0Zi04bHNwOHy4cXJVz/TQjYonnF6aoYhj+3QE=
|
||||
github.com/weppos/publicsuffix-go v0.15.1-0.20210511084619-b1f36a2d6c0b/go.mod h1:HYux0V0Zi04bHNwOHy4cXJVz/TQjYonnF6aoYhj+3QE=
|
||||
github.com/wneessen/go-mail v0.7.1 h1:rvy63sp14N06/kdGqCYwW8Na5gDCXjTQM1E7So4PuKk=
|
||||
github.com/wneessen/go-mail v0.7.1/go.mod h1:+TkW6QP3EVkgTEqHtVmnAE/1MRhmzb8Y9/W3pweuS+k=
|
||||
github.com/wneessen/go-mail v0.6.2 h1:c6V7c8D2mz868z9WJ+8zDKtUyLfZ1++uAZmo2GRFji8=
|
||||
github.com/wneessen/go-mail v0.6.2/go.mod h1:L/PYjPK3/2ZlNb2/FjEBIn9n1rUWjW+Toy531oVmeb4=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||
github.com/xanzy/go-gitlab v0.31.0/go.mod h1:sPLojNBn68fMUWSxIJtdVVIP8uSBYqesTfDUseX11Ug=
|
||||
|
|
@ -1029,8 +1032,6 @@ go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
|||
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y=
|
||||
go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
||||
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
|
||||
|
|
@ -1043,8 +1044,9 @@ go4.org v0.0.0-20200411211856-f5505b9728dd/go.mod h1:CIiUVy99QCPfoE13bO4EZaz5GZM
|
|||
go4.org v0.0.0-20230225012048-214862532bf5 h1:nifaUDeh+rPaBCMPMQHZmvJf+QdpLFnuQPwx+LxVmtc=
|
||||
go4.org v0.0.0-20230225012048-214862532bf5/go.mod h1:F57wTi5Lrj6WLyswp5EYV1ncrEbFGHD4hhz6S1ZYeaU=
|
||||
gocloud.dev v0.19.0/go.mod h1:SmKwiR8YwIMMJvQBKLsC3fHNyMwXLw3PMDO+VVteJMI=
|
||||
golang.org/x/arch v0.22.0 h1:c/Zle32i5ttqRXjdLyyHZESLD/bB90DCU1g9l/0YBDI=
|
||||
golang.org/x/arch v0.22.0/go.mod h1:dNHoOeKiyja7GTvF9NJS1l3Z2yntpQNzgrjh1cU103A=
|
||||
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||
golang.org/x/arch v0.8.0 h1:3wRIsP3pM4yUptoR96otTUOXI367OS0+c9eeRi9doIc=
|
||||
golang.org/x/arch v0.8.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys=
|
||||
golang.org/x/crypto v0.0.0-20180501155221-613d6eafa307/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
|
|
@ -1067,8 +1069,12 @@ golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf/go.mod h1:P+XmwS30IXTQdn5
|
|||
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
|
||||
golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04=
|
||||
golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0=
|
||||
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||
golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M=
|
||||
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
|
||||
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
|
|
@ -1085,8 +1091,8 @@ golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e/go.mod h1:akd2r19cwCdwSwWeId
|
|||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190501045829-6d32002ffd75/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/image v0.18.0 h1:jGzIakQa/ZXI1I0Fxvaa9W7yP25TqT6cHIHn+6CqvSQ=
|
||||
golang.org/x/image v0.18.0/go.mod h1:4yyo5vMFQjVjUcVk4jEQcU9MGy/rulF5WvUILseCM2E=
|
||||
golang.org/x/image v0.0.0-20211028202545-6944b10bf410 h1:hTftEOvwiOq2+O8k2D5/Q7COC7k5Qcrgc2TFURJYnvQ=
|
||||
golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
|
|
@ -1111,8 +1117,12 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|||
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
|
||||
golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0=
|
||||
golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
|
@ -1167,9 +1177,14 @@ golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qx
|
|||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.0.0-20221002022538-bcab6841153b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
|
||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4=
|
||||
golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
|
||||
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
|
@ -1198,8 +1213,13 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
|
||||
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
|
||||
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
|
@ -1231,6 +1251,7 @@ golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
|
@ -1273,13 +1294,24 @@ golang.org/x/sys v0.0.0-20220928140112-f11e5e49a4ec/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
|
||||
golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
|
||||
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
|
||||
golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
|
@ -1291,8 +1323,13 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k=
|
||||
golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
|
||||
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
|
||||
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
|
|
@ -1366,8 +1403,11 @@ golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4f
|
|||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
|
||||
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24=
|
||||
golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
|
@ -1499,8 +1539,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba
|
|||
google.golang.org/protobuf v1.25.1-0.20200805231151-a709e31e5d12/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
|
||||
google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
|
||||
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
|
||||
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
|
|
@ -1513,6 +1553,7 @@ gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
|||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
|
||||
gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=
|
||||
gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y=
|
||||
gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ=
|
||||
gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno=
|
||||
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
|
|
@ -1572,8 +1613,10 @@ modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA=
|
|||
modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0=
|
||||
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
|
||||
modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
|
||||
nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50=
|
||||
pack.ag/amqp v0.11.2/go.mod h1:4/cbmt4EJXSKlG6LCfWHoqmN0uFdy5i/+YFz+fTfhV4=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||
|
|
|
|||
|
|
@ -130,7 +130,6 @@ type (
|
|||
Size int64
|
||||
UploadSessionID uuid.UUID
|
||||
Importing bool
|
||||
EncryptMetadata *types.EncryptMetadata
|
||||
}
|
||||
|
||||
RelocateEntityParameter struct {
|
||||
|
|
@ -189,7 +188,7 @@ type FileClient interface {
|
|||
// Copy copies a layer of file to its corresponding destination folder. dstMap is a map from src parent ID to dst parent Files.
|
||||
Copy(ctx context.Context, files []*ent.File, dstMap map[int][]*ent.File) (map[int][]*ent.File, StorageDiff, error)
|
||||
// Delete deletes a group of files (and related models) with given entity recycle option
|
||||
Delete(ctx context.Context, files []*ent.File, options *types.EntityProps) ([]*ent.Entity, StorageDiff, error)
|
||||
Delete(ctx context.Context, files []*ent.File, options *types.EntityRecycleOption) ([]*ent.Entity, StorageDiff, error)
|
||||
// StaleEntities returns stale entities of a given file. If ID is not provided, all entities
|
||||
// will be examined.
|
||||
StaleEntities(ctx context.Context, ids ...int) ([]*ent.Entity, error)
|
||||
|
|
@ -470,7 +469,7 @@ func (f *fileClient) DeleteByUser(ctx context.Context, uid int) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (f *fileClient) Delete(ctx context.Context, files []*ent.File, options *types.EntityProps) ([]*ent.Entity, StorageDiff, error) {
|
||||
func (f *fileClient) Delete(ctx context.Context, files []*ent.File, options *types.EntityRecycleOption) ([]*ent.Entity, StorageDiff, error) {
|
||||
// 1. Decrease reference count for all entities;
|
||||
// entities stores the relation between its reference count in `files` and entity ID.
|
||||
entities := make(map[int]int)
|
||||
|
|
@ -526,7 +525,7 @@ func (f *fileClient) Delete(ctx context.Context, files []*ent.File, options *typ
|
|||
for _, chunk := range chunks {
|
||||
if err := f.client.Entity.Update().
|
||||
Where(entity.IDIn(chunk...)).
|
||||
SetProps(options).
|
||||
SetRecycleOptions(options).
|
||||
Exec(ctx); err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to update recycle options for entities %v: %w", chunk, err)
|
||||
}
|
||||
|
|
@ -885,17 +884,6 @@ func (f *fileClient) RemoveStaleEntities(ctx context.Context, file *ent.File) (S
|
|||
|
||||
func (f *fileClient) CreateEntity(ctx context.Context, file *ent.File, args *EntityParameters) (*ent.Entity, StorageDiff, error) {
|
||||
createdBy := UserFromContext(ctx)
|
||||
var opt *types.EntityProps
|
||||
if args.EncryptMetadata != nil {
|
||||
opt = &types.EntityProps{
|
||||
EncryptMetadata: &types.EncryptMetadata{
|
||||
Algorithm: args.EncryptMetadata.Algorithm,
|
||||
Key: args.EncryptMetadata.Key,
|
||||
IV: args.EncryptMetadata.IV,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
stm := f.client.Entity.
|
||||
Create().
|
||||
SetType(int(args.EntityType)).
|
||||
|
|
@ -903,10 +891,6 @@ func (f *fileClient) CreateEntity(ctx context.Context, file *ent.File, args *Ent
|
|||
SetSize(args.Size).
|
||||
SetStoragePolicyID(args.StoragePolicyID)
|
||||
|
||||
if opt != nil {
|
||||
stm.SetProps(opt)
|
||||
}
|
||||
|
||||
if createdBy != nil && !IsAnonymousUser(createdBy) {
|
||||
stm.SetUser(createdBy)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -78,7 +78,7 @@ func (f *fileClient) searchQuery(q *ent.FileQuery, args *SearchFileParameters, p
|
|||
return metadata.And(metadata.NameEQ(item.Key), metadata.ValueEQ(item.Value))
|
||||
}
|
||||
|
||||
nameEq := metadata.And(metadata.IsPublic(true), metadata.NameEQ(item.Key))
|
||||
nameEq := metadata.NameEQ(item.Key)
|
||||
if item.Value == "" {
|
||||
return nameEq
|
||||
} else {
|
||||
|
|
@ -86,9 +86,8 @@ func (f *fileClient) searchQuery(q *ent.FileQuery, args *SearchFileParameters, p
|
|||
return metadata.And(nameEq, valueContain)
|
||||
}
|
||||
})
|
||||
q.Where(file.And(lo.Map(metaPredicates, func(item predicate.Metadata, index int) predicate.File {
|
||||
return file.HasMetadataWith(item)
|
||||
})...))
|
||||
metaPredicates = append(metaPredicates, metadata.IsPublic(true))
|
||||
q.Where(file.HasMetadataWith(metadata.And(metaPredicates...)))
|
||||
}
|
||||
|
||||
if args.SizeLte > 0 || args.SizeGte > 0 {
|
||||
|
|
|
|||
|
|
@ -1,81 +0,0 @@
|
|||
package inventory
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/fsevent"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/schema"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
|
||||
"github.com/gofrs/uuid"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
type FsEventClient interface {
|
||||
TxOperator
|
||||
// Create a new FsEvent
|
||||
Create(ctx context.Context, uid int, subscriberId uuid.UUID, events ...string) error
|
||||
// Delete all FsEvents by subscriber
|
||||
DeleteBySubscriber(ctx context.Context, subscriberId uuid.UUID) error
|
||||
// Delete all FsEvents
|
||||
DeleteAll(ctx context.Context) error
|
||||
// Get all FsEvents by subscriber and user
|
||||
TakeBySubscriber(ctx context.Context, subscriberId uuid.UUID, userId int) ([]*ent.FsEvent, error)
|
||||
}
|
||||
|
||||
func NewFsEventClient(client *ent.Client, dbType conf.DBType) FsEventClient {
|
||||
return &fsEventClient{client: client, maxSQlParam: sqlParamLimit(dbType)}
|
||||
}
|
||||
|
||||
type fsEventClient struct {
|
||||
maxSQlParam int
|
||||
client *ent.Client
|
||||
}
|
||||
|
||||
func (c *fsEventClient) SetClient(newClient *ent.Client) TxOperator {
|
||||
return &fsEventClient{client: newClient, maxSQlParam: c.maxSQlParam}
|
||||
}
|
||||
|
||||
func (c *fsEventClient) GetClient() *ent.Client {
|
||||
return c.client
|
||||
}
|
||||
|
||||
func (c *fsEventClient) Create(ctx context.Context, uid int, subscriberId uuid.UUID, events ...string) error {
|
||||
stms := lo.Map(events, func(event string, index int) *ent.FsEventCreate {
|
||||
res := c.client.FsEvent.
|
||||
Create().
|
||||
SetUserFsevent(uid).
|
||||
SetEvent(event).
|
||||
SetSubscriber(subscriberId).SetEvent(event)
|
||||
|
||||
return res
|
||||
})
|
||||
|
||||
_, err := c.client.FsEvent.CreateBulk(stms...).Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *fsEventClient) DeleteBySubscriber(ctx context.Context, subscriberId uuid.UUID) error {
|
||||
_, err := c.client.FsEvent.Delete().Where(fsevent.Subscriber(subscriberId)).Exec(schema.SkipSoftDelete(ctx))
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *fsEventClient) DeleteAll(ctx context.Context) error {
|
||||
_, err := c.client.FsEvent.Delete().Exec(schema.SkipSoftDelete(ctx))
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *fsEventClient) TakeBySubscriber(ctx context.Context, subscriberId uuid.UUID, userId int) ([]*ent.FsEvent, error) {
|
||||
res, err := c.client.FsEvent.Query().Where(fsevent.Subscriber(subscriberId), fsevent.UserFsevent(userId)).All(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Delete the FsEvents
|
||||
_, err = c.client.FsEvent.Delete().Where(fsevent.Subscriber(subscriberId), fsevent.UserFsevent(userId)).Exec(schema.SkipSoftDelete(ctx))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
|
@ -279,53 +279,6 @@ type (
|
|||
)
|
||||
|
||||
var patches = []Patch{
|
||||
{
|
||||
Name: "apply_default_archive_viewer",
|
||||
EndVersion: "4.7.0",
|
||||
Func: func(l logging.Logger, client *ent.Client, ctx context.Context) error {
|
||||
fileViewersSetting, err := client.Setting.Query().Where(setting.Name("file_viewers")).First(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query file_viewers setting: %w", err)
|
||||
}
|
||||
|
||||
var fileViewers []types.ViewerGroup
|
||||
if err := json.Unmarshal([]byte(fileViewersSetting.Value), &fileViewers); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal file_viewers setting: %w", err)
|
||||
}
|
||||
|
||||
fileViewerExisted := false
|
||||
for _, viewer := range fileViewers[0].Viewers {
|
||||
if viewer.ID == "archive" {
|
||||
fileViewerExisted = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// 2.2 If not existed, add it
|
||||
if !fileViewerExisted {
|
||||
// Found existing archive viewer default setting
|
||||
var defaultArchiveViewer types.Viewer
|
||||
for _, viewer := range defaultFileViewers[0].Viewers {
|
||||
if viewer.ID == "archive" {
|
||||
defaultArchiveViewer = viewer
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
fileViewers[0].Viewers = append(fileViewers[0].Viewers, defaultArchiveViewer)
|
||||
newFileViewersSetting, err := json.Marshal(fileViewers)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal file_viewers setting: %w", err)
|
||||
}
|
||||
|
||||
if _, err := client.Setting.UpdateOne(fileViewersSetting).SetValue(string(newFileViewersSetting)).Save(ctx); err != nil {
|
||||
return fmt.Errorf("failed to update file_viewers setting: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "apply_default_excalidraw_viewer",
|
||||
EndVersion: "4.1.0",
|
||||
|
|
@ -414,86 +367,6 @@ var patches = []Patch{
|
|||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "apply_email_title_magic_var",
|
||||
EndVersion: "4.7.0",
|
||||
Func: func(l logging.Logger, client *ent.Client, ctx context.Context) error {
|
||||
// 1. Activate Template
|
||||
mailActivationTemplateSetting, err := client.Setting.Query().Where(setting.Name("mail_activation_template")).First(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query mail_activation_template setting: %w", err)
|
||||
}
|
||||
|
||||
var mailActivationTemplate []struct {
|
||||
Title string `json:"title"`
|
||||
Body string `json:"body"`
|
||||
Language string `json:"language"`
|
||||
}
|
||||
if err := json.Unmarshal([]byte(mailActivationTemplateSetting.Value), &mailActivationTemplate); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal mail_activation_template setting: %w", err)
|
||||
}
|
||||
|
||||
for i, t := range mailActivationTemplate {
|
||||
mailActivationTemplate[i].Title = fmt.Sprintf("[{{ .CommonContext.SiteBasic.Name }}] %s", t.Title)
|
||||
}
|
||||
|
||||
newMailActivationTemplate, err := json.Marshal(mailActivationTemplate)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal mail_activation_template setting: %w", err)
|
||||
}
|
||||
|
||||
if _, err := client.Setting.UpdateOne(mailActivationTemplateSetting).SetValue(string(newMailActivationTemplate)).Save(ctx); err != nil {
|
||||
return fmt.Errorf("failed to update mail_activation_template setting: %w", err)
|
||||
}
|
||||
|
||||
// 2. Reset Password Template
|
||||
mailResetTemplateSetting, err := client.Setting.Query().Where(setting.Name("mail_reset_template")).First(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query mail_reset_template setting: %w", err)
|
||||
}
|
||||
|
||||
var mailResetTemplate []struct {
|
||||
Title string `json:"title"`
|
||||
Body string `json:"body"`
|
||||
Language string `json:"language"`
|
||||
}
|
||||
if err := json.Unmarshal([]byte(mailResetTemplateSetting.Value), &mailResetTemplate); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal mail_reset_template setting: %w", err)
|
||||
}
|
||||
|
||||
for i, t := range mailResetTemplate {
|
||||
mailResetTemplate[i].Title = fmt.Sprintf("[{{ .CommonContext.SiteBasic.Name }}] %s", t.Title)
|
||||
}
|
||||
|
||||
newMailResetTemplate, err := json.Marshal(mailResetTemplate)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal mail_reset_template setting: %w", err)
|
||||
}
|
||||
|
||||
if _, err := client.Setting.UpdateOne(mailResetTemplateSetting).SetValue(string(newMailResetTemplate)).Save(ctx); err != nil {
|
||||
return fmt.Errorf("failed to update mail_reset_template setting: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "apply_thumb_path_magic_var",
|
||||
EndVersion: "4.10.0",
|
||||
Func: func(l logging.Logger, client *ent.Client, ctx context.Context) error {
|
||||
thumbSuffixSetting, err := client.Setting.Query().Where(setting.Name("thumb_entity_suffix")).First(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query thumb_entity_suffix setting: %w", err)
|
||||
}
|
||||
|
||||
newThumbSuffix := fmt.Sprintf("{blob_path}/{blob_name}%s", thumbSuffixSetting.Value)
|
||||
if _, err := client.Setting.UpdateOne(thumbSuffixSetting).SetValue(newThumbSuffix).Save(ctx); err != nil {
|
||||
return fmt.Errorf("failed to update thumb_entity_suffix setting: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
},
|
||||
|
|
|
|||
File diff suppressed because one or more lines are too long
|
|
@ -101,10 +101,6 @@ type (
|
|||
SourceAuth bool `json:"source_auth,omitempty"`
|
||||
// QiniuUploadCdn whether to use CDN for Qiniu upload.
|
||||
QiniuUploadCdn bool `json:"qiniu_upload_cdn,omitempty"`
|
||||
// ChunkConcurrency the number of chunks to upload concurrently.
|
||||
ChunkConcurrency int `json:"chunk_concurrency,omitempty"`
|
||||
// Whether to enable file encryption.
|
||||
Encryption bool `json:"encryption,omitempty"`
|
||||
}
|
||||
|
||||
FileType int
|
||||
|
|
@ -156,18 +152,8 @@ type (
|
|||
MasterSiteVersion string `json:"master_site_version,omitempty"`
|
||||
}
|
||||
|
||||
EntityProps struct {
|
||||
UnlinkOnly bool `json:"unlink_only,omitempty"`
|
||||
EncryptMetadata *EncryptMetadata `json:"encrypt_metadata,omitempty"`
|
||||
}
|
||||
|
||||
Cipher string
|
||||
|
||||
EncryptMetadata struct {
|
||||
Algorithm Cipher `json:"algorithm"`
|
||||
Key []byte `json:"key"`
|
||||
KeyPlainText []byte `json:"key_plain_text,omitempty"`
|
||||
IV []byte `json:"iv"`
|
||||
EntityRecycleOption struct {
|
||||
UnlinkOnly bool `json:"unlink_only,omitempty"`
|
||||
}
|
||||
|
||||
DavAccountProps struct {
|
||||
|
|
@ -269,7 +255,6 @@ func FileTypeFromString(s string) FileType {
|
|||
const (
|
||||
DavAccountReadOnly DavAccountOption = iota
|
||||
DavAccountProxy
|
||||
DavAccountDisableSysFiles
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -306,19 +291,18 @@ const (
|
|||
|
||||
type (
|
||||
Viewer struct {
|
||||
ID string `json:"id"`
|
||||
Type ViewerType `json:"type"`
|
||||
DisplayName string `json:"display_name"`
|
||||
Exts []string `json:"exts"`
|
||||
Url string `json:"url,omitempty"`
|
||||
Icon string `json:"icon,omitempty"`
|
||||
WopiActions map[string]map[ViewerAction]string `json:"wopi_actions,omitempty"`
|
||||
Props map[string]string `json:"props,omitempty"`
|
||||
MaxSize int64 `json:"max_size,omitempty"`
|
||||
Disabled bool `json:"disabled,omitempty"`
|
||||
Templates []NewFileTemplate `json:"templates,omitempty"`
|
||||
Platform string `json:"platform,omitempty"`
|
||||
RequiredGroupPermission []GroupPermission `json:"required_group_permission,omitempty"`
|
||||
ID string `json:"id"`
|
||||
Type ViewerType `json:"type"`
|
||||
DisplayName string `json:"display_name"`
|
||||
Exts []string `json:"exts"`
|
||||
Url string `json:"url,omitempty"`
|
||||
Icon string `json:"icon,omitempty"`
|
||||
WopiActions map[string]map[ViewerAction]string `json:"wopi_actions,omitempty"`
|
||||
Props map[string]string `json:"props,omitempty"`
|
||||
MaxSize int64 `json:"max_size,omitempty"`
|
||||
Disabled bool `json:"disabled,omitempty"`
|
||||
Templates []NewFileTemplate `json:"templates,omitempty"`
|
||||
Platform string `json:"platform,omitempty"`
|
||||
}
|
||||
ViewerGroup struct {
|
||||
Viewers []Viewer `json:"viewers"`
|
||||
|
|
@ -359,7 +343,3 @@ const (
|
|||
ProfileAllShare = ShareLinksInProfileLevel("all_share")
|
||||
ProfileHideShare = ShareLinksInProfileLevel("hide_share")
|
||||
)
|
||||
|
||||
const (
|
||||
CipherAES256CTR Cipher = "aes-256-ctr"
|
||||
)
|
||||
|
|
|
|||
2
main.go
2
main.go
|
|
@ -1,10 +1,8 @@
|
|||
//go:debug rsa1024min=0
|
||||
package main
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
"flag"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/cmd"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/util"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -103,7 +103,6 @@ func InitializeHandling(dep dependency.Dep) gin.HandlerFunc {
|
|||
IP: clientIp,
|
||||
Host: c.Request.Host,
|
||||
UserAgent: c.Request.UserAgent(),
|
||||
ClientID: c.GetHeader(request.ClientIDHeader),
|
||||
}
|
||||
cid := uuid.FromStringOrNil(c.GetHeader(request.CorrelationHeader))
|
||||
if cid == uuid.Nil {
|
||||
|
|
|
|||
|
|
@ -22,6 +22,4 @@ type RequestInfo struct {
|
|||
Host string
|
||||
IP string
|
||||
UserAgent string
|
||||
// ID of sync client
|
||||
ClientID string
|
||||
}
|
||||
|
|
|
|||
|
|
@ -180,9 +180,9 @@ func SlaveFileContentUrl(base *url.URL, srcPath, name string, download bool, spe
|
|||
return base
|
||||
}
|
||||
|
||||
func SlaveMediaMetaRoute(src, ext, language string) string {
|
||||
func SlaveMediaMetaRoute(src, ext string) string {
|
||||
src = url.PathEscape(base64.URLEncoding.EncodeToString([]byte(src)))
|
||||
return fmt.Sprintf("file/meta/%s/%s?language=%s", src, url.PathEscape(ext), language)
|
||||
return fmt.Sprintf("file/meta/%s/%s", src, url.PathEscape(ext))
|
||||
}
|
||||
|
||||
func SlaveFileListRoute(srcPath string, recursive bool) string {
|
||||
|
|
|
|||
|
|
@ -38,29 +38,18 @@ func NewResetEmail(ctx context.Context, settings setting.Provider, user *ent.Use
|
|||
Url: url,
|
||||
}
|
||||
|
||||
tmplTitle, err := template.New("resetTitle").Parse(selected.Title)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("failed to parse email title: %w", err)
|
||||
}
|
||||
|
||||
var resTitle strings.Builder
|
||||
err = tmplTitle.Execute(&resTitle, resetCtx)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("failed to execute email title: %w", err)
|
||||
}
|
||||
|
||||
tmplBody, err := template.New("resetBody").Parse(selected.Body)
|
||||
tmpl, err := template.New("reset").Parse(selected.Body)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("failed to parse email template: %w", err)
|
||||
}
|
||||
|
||||
var resBody strings.Builder
|
||||
err = tmplBody.Execute(&resBody, resetCtx)
|
||||
var res strings.Builder
|
||||
err = tmpl.Execute(&res, resetCtx)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("failed to execute email template: %w", err)
|
||||
}
|
||||
|
||||
return resTitle.String(), resBody.String(), nil
|
||||
return fmt.Sprintf("[%s] %s", resetCtx.SiteBasic.Name, selected.Title), res.String(), nil
|
||||
}
|
||||
|
||||
// ActivationContext used for variables in activation email
|
||||
|
|
@ -84,29 +73,18 @@ func NewActivationEmail(ctx context.Context, settings setting.Provider, user *en
|
|||
Url: url,
|
||||
}
|
||||
|
||||
tmplTitle, err := template.New("activationTitle").Parse(selected.Title)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("failed to parse email title: %w", err)
|
||||
}
|
||||
|
||||
var resTitle strings.Builder
|
||||
err = tmplTitle.Execute(&resTitle, activationCtx)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("failed to execute email title: %w", err)
|
||||
}
|
||||
|
||||
tmplBody, err := template.New("activationBody").Parse(selected.Body)
|
||||
tmpl, err := template.New("activation").Parse(selected.Body)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("failed to parse email template: %w", err)
|
||||
}
|
||||
|
||||
var resBody strings.Builder
|
||||
err = tmplBody.Execute(&resBody, activationCtx)
|
||||
var res strings.Builder
|
||||
err = tmpl.Execute(&res, activationCtx)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("failed to execute email template: %w", err)
|
||||
}
|
||||
|
||||
return resTitle.String(), resBody.String(), nil
|
||||
return fmt.Sprintf("[%s] %s", activationCtx.SiteBasic.Name, selected.Title), res.String(), nil
|
||||
}
|
||||
|
||||
func commonContext(ctx context.Context, settings setting.Provider) *CommonContext {
|
||||
|
|
@ -144,4 +122,4 @@ func selectTemplate(templates []setting.EmailTemplate, u *ent.User) setting.Emai
|
|||
}
|
||||
|
||||
return selected
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -244,7 +244,7 @@ func (handler *Driver) Put(ctx context.Context, file *fs.UploadRequest) error {
|
|||
|
||||
mimeType := file.Props.MimeType
|
||||
if mimeType == "" {
|
||||
mimeType = handler.mime.TypeByName(file.Props.Uri.Name())
|
||||
handler.mime.TypeByName(file.Props.Uri.Name())
|
||||
}
|
||||
|
||||
// 是否允许覆盖
|
||||
|
|
@ -352,14 +352,6 @@ func (handler Driver) Thumb(ctx context.Context, expire *time.Time, ext string,
|
|||
w, h := handler.settings.ThumbSize(ctx)
|
||||
thumbParam := fmt.Sprintf("imageMogr2/thumbnail/%dx%d", w, h)
|
||||
|
||||
enco := handler.settings.ThumbEncode(ctx)
|
||||
switch enco.Format {
|
||||
case "jpg", "webp":
|
||||
thumbParam += fmt.Sprintf("/format/%s/rquality/%d", enco.Format, enco.Quality)
|
||||
case "png":
|
||||
thumbParam += fmt.Sprintf("/format/%s", enco.Format)
|
||||
}
|
||||
|
||||
source, err := handler.signSourceURL(
|
||||
ctx,
|
||||
e.Source(),
|
||||
|
|
@ -382,12 +374,7 @@ func (handler Driver) Thumb(ctx context.Context, expire *time.Time, ext string,
|
|||
func (handler Driver) Source(ctx context.Context, e fs.Entity, args *driver.GetSourceArgs) (string, error) {
|
||||
// 添加各项设置
|
||||
options := urlOption{}
|
||||
|
||||
if args.Speed > 0 {
|
||||
// Byte 转换为 bit
|
||||
args.Speed *= 8
|
||||
|
||||
// COS对速度值有范围限制
|
||||
if args.Speed < 819200 {
|
||||
args.Speed = 819200
|
||||
}
|
||||
|
|
@ -396,7 +383,6 @@ func (handler Driver) Source(ctx context.Context, e fs.Entity, args *driver.GetS
|
|||
}
|
||||
options.Speed = args.Speed
|
||||
}
|
||||
|
||||
if args.IsDownload {
|
||||
encodedFilename := url.PathEscape(args.DisplayName)
|
||||
options.ContentDescription = fmt.Sprintf(`attachment; filename="%s"; filename*=UTF-8''%s`,
|
||||
|
|
@ -455,7 +441,7 @@ func (handler Driver) Token(ctx context.Context, uploadSession *fs.UploadSession
|
|||
|
||||
mimeType := file.Props.MimeType
|
||||
if mimeType == "" {
|
||||
mimeType = handler.mime.TypeByName(file.Props.Uri.Name())
|
||||
handler.mime.TypeByName(file.Props.Uri.Name())
|
||||
}
|
||||
|
||||
// 初始化分片上传
|
||||
|
|
@ -594,7 +580,7 @@ func (handler Driver) Meta(ctx context.Context, path string) (*MetaData, error)
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (handler *Driver) MediaMeta(ctx context.Context, path, ext, language string) ([]driver.MediaMeta, error) {
|
||||
func (handler *Driver) MediaMeta(ctx context.Context, path, ext string) ([]driver.MediaMeta, error) {
|
||||
if util.ContainsString(supportedImageExt, ext) {
|
||||
return handler.extractImageMeta(ctx, path)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -83,7 +83,7 @@ type (
|
|||
Capabilities() *Capabilities
|
||||
|
||||
// MediaMeta extracts media metadata from the given file.
|
||||
MediaMeta(ctx context.Context, path, ext, language string) ([]MediaMeta, error)
|
||||
MediaMeta(ctx context.Context, path, ext string) ([]MediaMeta, error)
|
||||
}
|
||||
|
||||
Capabilities struct {
|
||||
|
|
@ -117,7 +117,6 @@ const (
|
|||
MetaTypeExif MetaType = "exif"
|
||||
MediaTypeMusic MetaType = "music"
|
||||
MetaTypeStreamMedia MetaType = "stream"
|
||||
MetaTypeGeocoding MetaType = "geocoding"
|
||||
)
|
||||
|
||||
type ForceUsePublicEndpointCtx struct{}
|
||||
|
|
|
|||
|
|
@ -219,7 +219,7 @@ func (handler *Driver) Put(ctx context.Context, file *fs.UploadRequest) error {
|
|||
|
||||
mimeType := file.Props.MimeType
|
||||
if mimeType == "" {
|
||||
mimeType = handler.mime.TypeByName(file.Props.Uri.Name())
|
||||
handler.mime.TypeByName(file.Props.Uri.Name())
|
||||
}
|
||||
|
||||
_, err := uploader.UploadWithContext(ctx, &s3manager.UploadInput{
|
||||
|
|
@ -298,48 +298,7 @@ func (handler *Driver) Delete(ctx context.Context, files ...string) ([]string, e
|
|||
|
||||
// Thumb 获取缩略图URL
|
||||
func (handler *Driver) Thumb(ctx context.Context, expire *time.Time, ext string, e fs.Entity) (string, error) {
|
||||
w, h := handler.settings.ThumbSize(ctx)
|
||||
thumbParam := fmt.Sprintf("@base@tag=imgScale&m=0&w=%d&h=%d", w, h)
|
||||
|
||||
enco := handler.settings.ThumbEncode(ctx)
|
||||
switch enco.Format {
|
||||
case "jpg", "webp":
|
||||
thumbParam += fmt.Sprintf("&q=%d&F=%s", enco.Quality, enco.Format)
|
||||
case "png":
|
||||
thumbParam += fmt.Sprintf("&F=%s", enco.Format)
|
||||
}
|
||||
|
||||
// 确保过期时间不小于 0 ,如果小于则设置为 7 天
|
||||
var ttl int64
|
||||
if expire != nil {
|
||||
ttl = int64(time.Until(*expire).Seconds())
|
||||
} else {
|
||||
ttl = 604800
|
||||
}
|
||||
|
||||
thumbUrl, err := handler.svc.GeneratePresignedUrl(&s3.GeneratePresignedUrlInput{
|
||||
HTTPMethod: s3.GET, // 请求方法
|
||||
Bucket: &handler.policy.BucketName, // 存储空间名称
|
||||
Key: aws.String(e.Source() + thumbParam), // 对象的key
|
||||
Expires: ttl, // 过期时间,转换为秒数
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// 将最终生成的签名URL域名换成用户自定义的加速域名(如果有)
|
||||
finalThumbURL, err := url.Parse(thumbUrl)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// 公有空间替换掉Key及不支持的头
|
||||
if !handler.policy.IsPrivate {
|
||||
finalThumbURL.RawQuery = ""
|
||||
}
|
||||
|
||||
return finalThumbURL.String(), nil
|
||||
return "", errors.New("not implemented")
|
||||
}
|
||||
|
||||
// Source 获取文件外链
|
||||
|
|
@ -347,7 +306,7 @@ func (handler *Driver) Source(ctx context.Context, e fs.Entity, args *driver.Get
|
|||
var contentDescription *string
|
||||
if args.IsDownload {
|
||||
encodedFilename := url.PathEscape(args.DisplayName)
|
||||
contentDescription = aws.String(fmt.Sprintf(`attachment; filename=%s`, encodedFilename))
|
||||
contentDescription = aws.String(fmt.Sprintf(`attachment; filename="%s"`, encodedFilename))
|
||||
}
|
||||
|
||||
// 确保过期时间不小于 0 ,如果小于则设置为 7 天
|
||||
|
|
@ -399,7 +358,7 @@ func (handler *Driver) Token(ctx context.Context, uploadSession *fs.UploadSessio
|
|||
|
||||
mimeType := file.Props.MimeType
|
||||
if mimeType == "" {
|
||||
mimeType = handler.mime.TypeByName(file.Props.Uri.Name())
|
||||
handler.mime.TypeByName(file.Props.Uri.Name())
|
||||
}
|
||||
|
||||
// 创建分片上传
|
||||
|
|
@ -505,7 +464,7 @@ func (handler *Driver) Capabilities() *driver.Capabilities {
|
|||
}
|
||||
|
||||
// MediaMeta 获取媒体元信息
|
||||
func (handler *Driver) MediaMeta(ctx context.Context, path, ext, language string) ([]driver.MediaMeta, error) {
|
||||
func (handler *Driver) MediaMeta(ctx context.Context, path, ext string) ([]driver.MediaMeta, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,14 +1,13 @@
|
|||
package local
|
||||
|
||||
import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/util"
|
||||
"github.com/gofrs/uuid"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
// NewLocalFileEntity creates a new local file entity.
|
||||
|
|
@ -74,11 +73,3 @@ func (l *localFileEntity) UploadSessionID() *uuid.UUID {
|
|||
func (l *localFileEntity) Model() *ent.Entity {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *localFileEntity) Props() *types.EntityProps {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *localFileEntity) Encrypted() bool {
|
||||
return false
|
||||
}
|
||||
|
|
|
|||
|
|
@ -140,9 +140,9 @@ func (handler *Driver) Put(ctx context.Context, file *fs.UploadRequest) error {
|
|||
}
|
||||
|
||||
openMode := os.O_CREATE | os.O_RDWR
|
||||
// if file.Mode&fs.ModeOverwrite == fs.ModeOverwrite && file.Offset == 0 {
|
||||
// openMode |= os.O_TRUNC
|
||||
// }
|
||||
if file.Mode&fs.ModeOverwrite == fs.ModeOverwrite && file.Offset == 0 {
|
||||
openMode |= os.O_TRUNC
|
||||
}
|
||||
|
||||
out, err := os.OpenFile(dst, openMode, Perm)
|
||||
if err != nil {
|
||||
|
|
@ -298,6 +298,6 @@ func (handler *Driver) Capabilities() *driver.Capabilities {
|
|||
return capabilities
|
||||
}
|
||||
|
||||
func (handler *Driver) MediaMeta(ctx context.Context, path, ext, language string) ([]driver.MediaMeta, error) {
|
||||
func (handler *Driver) MediaMeta(ctx context.Context, path, ext string) ([]driver.MediaMeta, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ import (
|
|||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
func (d *Driver) MediaMeta(ctx context.Context, path, ext, language string) ([]driver.MediaMeta, error) {
|
||||
func (d *Driver) MediaMeta(ctx context.Context, path, ext string) ([]driver.MediaMeta, error) {
|
||||
thumbURL, err := d.signSourceURL(&obs.CreateSignedUrlInput{
|
||||
Method: obs.HttpMethodGet,
|
||||
Bucket: d.policy.BucketName,
|
||||
|
|
|
|||
|
|
@ -335,23 +335,13 @@ func (d *Driver) LocalPath(ctx context.Context, path string) string {
|
|||
|
||||
func (d *Driver) Thumb(ctx context.Context, expire *time.Time, ext string, e fs.Entity) (string, error) {
|
||||
w, h := d.settings.ThumbSize(ctx)
|
||||
thumbParam := fmt.Sprintf("image/resize,m_lfit,w_%d,h_%d", w, h)
|
||||
|
||||
enco := d.settings.ThumbEncode(ctx)
|
||||
switch enco.Format {
|
||||
case "jpg", "webp":
|
||||
thumbParam += fmt.Sprintf("/format,%s/quality,q_%d", enco.Format, enco.Quality)
|
||||
case "png":
|
||||
thumbParam += fmt.Sprintf("/format,%s", enco.Format)
|
||||
}
|
||||
|
||||
thumbURL, err := d.signSourceURL(&obs.CreateSignedUrlInput{
|
||||
Method: obs.HttpMethodGet,
|
||||
Bucket: d.policy.BucketName,
|
||||
Key: e.Source(),
|
||||
Expires: int(time.Until(*expire).Seconds()),
|
||||
QueryParams: map[string]string{
|
||||
imageProcessHeader: thumbParam,
|
||||
imageProcessHeader: fmt.Sprintf("image/resize,m_lfit,w_%d,h_%d", w, h),
|
||||
},
|
||||
})
|
||||
|
||||
|
|
|
|||
|
|
@ -241,7 +241,7 @@ func (handler *Driver) Capabilities() *driver.Capabilities {
|
|||
}
|
||||
}
|
||||
|
||||
func (handler *Driver) MediaMeta(ctx context.Context, path, ext, language string) ([]driver.MediaMeta, error) {
|
||||
func (handler *Driver) MediaMeta(ctx context.Context, path, ext string) ([]driver.MediaMeta, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -10,13 +10,12 @@ import (
|
|||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/cache"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/request"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/cache"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/request"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
|
|||
|
|
@ -5,17 +5,16 @@ import (
|
|||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"github.com/aliyun/aliyun-oss-go-sdk/oss"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/mediameta"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/request"
|
||||
"github.com/samber/lo"
|
||||
"math"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/mediameta"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/request"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -266,14 +265,13 @@ func (handler *Driver) extractImageMeta(ctx context.Context, path string) ([]dri
|
|||
|
||||
// extractMediaInfo Sends API calls to OSS IMM service to extract media info.
|
||||
func (handler *Driver) extractMediaInfo(ctx context.Context, path string, category string, forceSign bool) (string, error) {
|
||||
mediaOption := []oss.Option{oss.Process(category)}
|
||||
mediaInfoExpire := time.Now().Add(mediaInfoTTL)
|
||||
thumbURL, err := handler.signSourceURL(
|
||||
ctx,
|
||||
path,
|
||||
&mediaInfoExpire,
|
||||
&oss.GetObjectRequest{
|
||||
Process: oss.Ptr(category),
|
||||
},
|
||||
mediaOption,
|
||||
forceSign,
|
||||
)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -15,8 +15,7 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss"
|
||||
"github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials"
|
||||
"github.com/aliyun/aliyun-oss-go-sdk/oss"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
|
||||
|
|
@ -53,6 +52,7 @@ type Driver struct {
|
|||
policy *ent.StoragePolicy
|
||||
|
||||
client *oss.Client
|
||||
bucket *oss.Bucket
|
||||
settings setting.Provider
|
||||
l logging.Logger
|
||||
config conf.ConfigProvider
|
||||
|
|
@ -65,12 +65,12 @@ type Driver struct {
|
|||
type key int
|
||||
|
||||
const (
|
||||
chunkRetrySleep = time.Duration(5) * time.Second
|
||||
maxDeleteBatch = 1000
|
||||
maxSignTTL = time.Duration(24) * time.Hour * 7
|
||||
completeAllHeader = "x-oss-complete-all"
|
||||
forbidOverwriteHeader = "x-oss-forbid-overwrite"
|
||||
trafficLimitHeader = "x-oss-traffic-limit"
|
||||
chunkRetrySleep = time.Duration(5) * time.Second
|
||||
uploadIdParam = "uploadId"
|
||||
partNumberParam = "partNumber"
|
||||
callbackParam = "callback"
|
||||
completeAllHeader = "x-oss-complete-all"
|
||||
maxDeleteBatch = 1000
|
||||
|
||||
// MultiPartUploadThreshold 服务端使用分片上传的阈值
|
||||
MultiPartUploadThreshold int64 = 5 * (1 << 30) // 5GB
|
||||
|
|
@ -102,27 +102,21 @@ func New(ctx context.Context, policy *ent.StoragePolicy, settings setting.Provid
|
|||
|
||||
// CORS 创建跨域策略
|
||||
func (handler *Driver) CORS() error {
|
||||
_, err := handler.client.PutBucketCors(context.Background(), &oss.PutBucketCorsRequest{
|
||||
Bucket: &handler.policy.BucketName,
|
||||
CORSConfiguration: &oss.CORSConfiguration{
|
||||
CORSRules: []oss.CORSRule{
|
||||
{
|
||||
AllowedOrigins: []string{"*"},
|
||||
AllowedMethods: []string{
|
||||
"GET",
|
||||
"POST",
|
||||
"PUT",
|
||||
"DELETE",
|
||||
"HEAD",
|
||||
},
|
||||
ExposeHeaders: []string{},
|
||||
AllowedHeaders: []string{"*"},
|
||||
MaxAgeSeconds: oss.Ptr(int64(3600)),
|
||||
},
|
||||
return handler.client.SetBucketCORS(handler.policy.BucketName, []oss.CORSRule{
|
||||
{
|
||||
AllowedOrigin: []string{"*"},
|
||||
AllowedMethod: []string{
|
||||
"GET",
|
||||
"POST",
|
||||
"PUT",
|
||||
"DELETE",
|
||||
"HEAD",
|
||||
},
|
||||
}})
|
||||
|
||||
return err
|
||||
ExposeHeader: []string{},
|
||||
AllowedHeader: []string{"*"},
|
||||
MaxAgeSeconds: 3600,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// InitOSSClient 初始化OSS鉴权客户端
|
||||
|
|
@ -131,28 +125,34 @@ func (handler *Driver) InitOSSClient(forceUsePublicEndpoint bool) error {
|
|||
return errors.New("empty policy")
|
||||
}
|
||||
|
||||
opt := make([]oss.ClientOption, 0)
|
||||
|
||||
// 决定是否使用内网 Endpoint
|
||||
endpoint := handler.policy.Server
|
||||
useCname := false
|
||||
if handler.policy.Settings.ServerSideEndpoint != "" && !forceUsePublicEndpoint {
|
||||
endpoint = handler.policy.Settings.ServerSideEndpoint
|
||||
} else if handler.policy.Settings.UseCname {
|
||||
useCname = true
|
||||
opt = append(opt, oss.UseCname(true))
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(endpoint, "http://") && !strings.HasPrefix(endpoint, "https://") {
|
||||
endpoint = "https://" + endpoint
|
||||
}
|
||||
|
||||
cfg := oss.LoadDefaultConfig().
|
||||
WithCredentialsProvider(credentials.NewStaticCredentialsProvider(handler.policy.AccessKey, handler.policy.SecretKey, "")).
|
||||
WithEndpoint(endpoint).
|
||||
WithRegion(handler.policy.Settings.Region).
|
||||
WithUseCName(useCname)
|
||||
|
||||
// 初始化客户端
|
||||
client := oss.NewClient(cfg)
|
||||
client, err := oss.New(endpoint, handler.policy.AccessKey, handler.policy.SecretKey, opt...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
handler.client = client
|
||||
|
||||
// 初始化存储桶
|
||||
bucket, err := client.Bucket(handler.policy.BucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
handler.bucket = bucket
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -166,40 +166,38 @@ func (handler *Driver) List(ctx context.Context, base string, onProgress driver.
|
|||
|
||||
var (
|
||||
delimiter string
|
||||
marker string
|
||||
objects []oss.ObjectProperties
|
||||
commons []oss.CommonPrefix
|
||||
commons []string
|
||||
)
|
||||
if !recursive {
|
||||
delimiter = "/"
|
||||
}
|
||||
|
||||
p := handler.client.NewListObjectsPaginator(&oss.ListObjectsRequest{
|
||||
Bucket: &handler.policy.BucketName,
|
||||
Prefix: &base,
|
||||
MaxKeys: 1000,
|
||||
Delimiter: &delimiter,
|
||||
})
|
||||
|
||||
for p.HasNext() {
|
||||
page, err := p.NextPage(ctx)
|
||||
for {
|
||||
subRes, err := handler.bucket.ListObjects(oss.Marker(marker), oss.Prefix(base),
|
||||
oss.MaxKeys(1000), oss.Delimiter(delimiter))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
objects = append(objects, page.Contents...)
|
||||
commons = append(commons, page.CommonPrefixes...)
|
||||
objects = append(objects, subRes.Objects...)
|
||||
commons = append(commons, subRes.CommonPrefixes...)
|
||||
marker = subRes.NextMarker
|
||||
if marker == "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// 处理列取结果
|
||||
res := make([]fs.PhysicalObject, 0, len(objects)+len(commons))
|
||||
// 处理目录
|
||||
for _, object := range commons {
|
||||
rel, err := filepath.Rel(base, *object.Prefix)
|
||||
rel, err := filepath.Rel(base, object)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
res = append(res, fs.PhysicalObject{
|
||||
Name: path.Base(*object.Prefix),
|
||||
Name: path.Base(object),
|
||||
RelativePath: filepath.ToSlash(rel),
|
||||
Size: 0,
|
||||
IsDir: true,
|
||||
|
|
@ -210,17 +208,17 @@ func (handler *Driver) List(ctx context.Context, base string, onProgress driver.
|
|||
|
||||
// 处理文件
|
||||
for _, object := range objects {
|
||||
rel, err := filepath.Rel(base, *object.Key)
|
||||
rel, err := filepath.Rel(base, object.Key)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
res = append(res, fs.PhysicalObject{
|
||||
Name: path.Base(*object.Key),
|
||||
Source: *object.Key,
|
||||
Name: path.Base(object.Key),
|
||||
Source: object.Key,
|
||||
RelativePath: filepath.ToSlash(rel),
|
||||
Size: object.Size,
|
||||
IsDir: false,
|
||||
LastModify: *object.LastModified,
|
||||
LastModify: object.LastModified,
|
||||
})
|
||||
}
|
||||
onProgress(len(res))
|
||||
|
|
@ -242,39 +240,30 @@ func (handler *Driver) Put(ctx context.Context, file *fs.UploadRequest) error {
|
|||
|
||||
mimeType := file.Props.MimeType
|
||||
if mimeType == "" {
|
||||
mimeType = handler.mime.TypeByName(file.Props.Uri.Name())
|
||||
handler.mime.TypeByName(file.Props.Uri.Name())
|
||||
}
|
||||
|
||||
// 是否允许覆盖
|
||||
overwrite := file.Mode&fs.ModeOverwrite == fs.ModeOverwrite
|
||||
forbidOverwrite := oss.Ptr(strconv.FormatBool(!overwrite))
|
||||
exipires := oss.Ptr(time.Now().Add(credentialTTL * time.Second).Format(time.RFC3339))
|
||||
options := []oss.Option{
|
||||
oss.WithContext(ctx),
|
||||
oss.Expires(time.Now().Add(credentialTTL * time.Second)),
|
||||
oss.ForbidOverWrite(!overwrite),
|
||||
oss.ContentType(mimeType),
|
||||
}
|
||||
|
||||
// 小文件直接上传
|
||||
if file.Props.Size < MultiPartUploadThreshold {
|
||||
_, err := handler.client.PutObject(ctx, &oss.PutObjectRequest{
|
||||
Bucket: &handler.policy.BucketName,
|
||||
Key: &file.Props.SavePath,
|
||||
Body: file,
|
||||
ForbidOverwrite: forbidOverwrite,
|
||||
ContentType: oss.Ptr(mimeType),
|
||||
})
|
||||
return err
|
||||
return handler.bucket.PutObject(file.Props.SavePath, file, options...)
|
||||
}
|
||||
|
||||
// 超过阈值时使用分片上传
|
||||
imur, err := handler.client.InitiateMultipartUpload(ctx, &oss.InitiateMultipartUploadRequest{
|
||||
Bucket: &handler.policy.BucketName,
|
||||
Key: &file.Props.SavePath,
|
||||
ContentType: oss.Ptr(mimeType),
|
||||
ForbidOverwrite: forbidOverwrite,
|
||||
Expires: exipires,
|
||||
})
|
||||
imur, err := handler.bucket.InitiateMultipartUpload(file.Props.SavePath, options...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initiate multipart upload: %w", err)
|
||||
}
|
||||
|
||||
parts := make([]*oss.UploadPartResult, 0)
|
||||
parts := make([]oss.UploadPart, 0)
|
||||
|
||||
chunks := chunk.NewChunkGroup(file, handler.chunkSize, &backoff.ConstantBackoff{
|
||||
Max: handler.settings.ChunkRetryLimit(ctx),
|
||||
|
|
@ -282,13 +271,7 @@ func (handler *Driver) Put(ctx context.Context, file *fs.UploadRequest) error {
|
|||
}, handler.settings.UseChunkBuffer(ctx), handler.l, handler.settings.TempPath(ctx))
|
||||
|
||||
uploadFunc := func(current *chunk.ChunkGroup, content io.Reader) error {
|
||||
part, err := handler.client.UploadPart(ctx, &oss.UploadPartRequest{
|
||||
Bucket: &handler.policy.BucketName,
|
||||
Key: &file.Props.SavePath,
|
||||
UploadId: imur.UploadId,
|
||||
PartNumber: int32(current.Index() + 1),
|
||||
Body: content,
|
||||
})
|
||||
part, err := handler.bucket.UploadPart(imur, content, current.Length(), current.Index()+1, oss.WithContext(ctx))
|
||||
if err == nil {
|
||||
parts = append(parts, part)
|
||||
}
|
||||
|
|
@ -297,27 +280,14 @@ func (handler *Driver) Put(ctx context.Context, file *fs.UploadRequest) error {
|
|||
|
||||
for chunks.Next() {
|
||||
if err := chunks.Process(uploadFunc); err != nil {
|
||||
handler.cancelUpload(*imur)
|
||||
handler.cancelUpload(imur)
|
||||
return fmt.Errorf("failed to upload chunk #%d: %w", chunks.Index(), err)
|
||||
}
|
||||
}
|
||||
|
||||
_, err = handler.client.CompleteMultipartUpload(ctx, &oss.CompleteMultipartUploadRequest{
|
||||
Bucket: &handler.policy.BucketName,
|
||||
Key: imur.Key,
|
||||
UploadId: imur.UploadId,
|
||||
CompleteMultipartUpload: &oss.CompleteMultipartUpload{
|
||||
Parts: lo.Map(parts, func(part *oss.UploadPartResult, i int) oss.UploadPart {
|
||||
return oss.UploadPart{
|
||||
PartNumber: int32(i + 1),
|
||||
ETag: part.ETag,
|
||||
}
|
||||
}),
|
||||
},
|
||||
ForbidOverwrite: oss.Ptr(strconv.FormatBool(!overwrite)),
|
||||
})
|
||||
_, err = handler.bucket.CompleteMultipartUpload(imur, parts, oss.ForbidOverWrite(!overwrite), oss.WithContext(ctx))
|
||||
if err != nil {
|
||||
handler.cancelUpload(*imur)
|
||||
handler.cancelUpload(imur)
|
||||
}
|
||||
|
||||
return err
|
||||
|
|
@ -332,12 +302,7 @@ func (handler *Driver) Delete(ctx context.Context, files ...string) ([]string, e
|
|||
for index, group := range groups {
|
||||
handler.l.Debug("Process delete group #%d: %v", index, group)
|
||||
// 删除文件
|
||||
delRes, err := handler.client.DeleteMultipleObjects(ctx, &oss.DeleteMultipleObjectsRequest{
|
||||
Bucket: &handler.policy.BucketName,
|
||||
Objects: lo.Map(group, func(v string, i int) oss.DeleteObject {
|
||||
return oss.DeleteObject{Key: &v}
|
||||
}),
|
||||
})
|
||||
delRes, err := handler.bucket.DeleteObjects(group)
|
||||
if err != nil {
|
||||
failed = append(failed, group...)
|
||||
lastError = err
|
||||
|
|
@ -345,14 +310,7 @@ func (handler *Driver) Delete(ctx context.Context, files ...string) ([]string, e
|
|||
}
|
||||
|
||||
// 统计未删除的文件
|
||||
failed = append(
|
||||
failed,
|
||||
util.SliceDifference(files,
|
||||
lo.Map(delRes.DeletedObjects, func(v oss.DeletedInfo, i int) string {
|
||||
return *v.Key
|
||||
}),
|
||||
)...,
|
||||
)
|
||||
failed = append(failed, util.SliceDifference(files, delRes.DeletedObjects)...)
|
||||
}
|
||||
|
||||
if len(failed) > 0 && lastError == nil {
|
||||
|
|
@ -376,23 +334,12 @@ func (handler *Driver) Thumb(ctx context.Context, expire *time.Time, ext string,
|
|||
|
||||
w, h := handler.settings.ThumbSize(ctx)
|
||||
thumbParam := fmt.Sprintf("image/resize,m_lfit,h_%d,w_%d", h, w)
|
||||
|
||||
enco := handler.settings.ThumbEncode(ctx)
|
||||
switch enco.Format {
|
||||
case "jpg", "webp":
|
||||
thumbParam += fmt.Sprintf("/format,%s/quality,q_%d", enco.Format, enco.Quality)
|
||||
case "png":
|
||||
thumbParam += fmt.Sprintf("/format,%s", enco.Format)
|
||||
}
|
||||
|
||||
req := &oss.GetObjectRequest{
|
||||
Process: oss.Ptr(thumbParam),
|
||||
}
|
||||
thumbOption := []oss.Option{oss.Process(thumbParam)}
|
||||
thumbURL, err := handler.signSourceURL(
|
||||
ctx,
|
||||
e.Source(),
|
||||
expire,
|
||||
req,
|
||||
thumbOption,
|
||||
false,
|
||||
)
|
||||
if err != nil {
|
||||
|
|
@ -414,11 +361,11 @@ func (handler *Driver) Source(ctx context.Context, e fs.Entity, args *driver.Get
|
|||
}
|
||||
|
||||
// 添加各项设置
|
||||
req := &oss.GetObjectRequest{}
|
||||
var signOptions = make([]oss.Option, 0, 2)
|
||||
if args.IsDownload {
|
||||
encodedFilename := url.PathEscape(args.DisplayName)
|
||||
req.ResponseContentDisposition = oss.Ptr(fmt.Sprintf(`attachment; filename="%s"; filename*=UTF-8''%s`,
|
||||
encodedFilename, encodedFilename))
|
||||
signOptions = append(signOptions, oss.ResponseContentDisposition(fmt.Sprintf(`attachment; filename="%s"; filename*=UTF-8''%s`,
|
||||
encodedFilename, encodedFilename)))
|
||||
}
|
||||
if args.Speed > 0 {
|
||||
// Byte 转换为 bit
|
||||
|
|
@ -431,39 +378,25 @@ func (handler *Driver) Source(ctx context.Context, e fs.Entity, args *driver.Get
|
|||
if args.Speed > 838860800 {
|
||||
args.Speed = 838860800
|
||||
}
|
||||
req.Parameters = map[string]string{
|
||||
trafficLimitHeader: strconv.FormatInt(args.Speed, 10),
|
||||
}
|
||||
signOptions = append(signOptions, oss.TrafficLimitParam(args.Speed))
|
||||
}
|
||||
|
||||
return handler.signSourceURL(ctx, e.Source(), args.Expire, req, false)
|
||||
return handler.signSourceURL(ctx, e.Source(), args.Expire, signOptions, false)
|
||||
}
|
||||
|
||||
func (handler *Driver) signSourceURL(ctx context.Context, path string, expire *time.Time, req *oss.GetObjectRequest, forceSign bool) (string, error) {
|
||||
// V4 Sign 最大过期时间为7天
|
||||
ttl := maxSignTTL
|
||||
func (handler *Driver) signSourceURL(ctx context.Context, path string, expire *time.Time, options []oss.Option, forceSign bool) (string, error) {
|
||||
ttl := int64(86400 * 365 * 20)
|
||||
if expire != nil {
|
||||
ttl = time.Until(*expire)
|
||||
if ttl > maxSignTTL {
|
||||
ttl = maxSignTTL
|
||||
}
|
||||
ttl = int64(time.Until(*expire).Seconds())
|
||||
}
|
||||
|
||||
if req == nil {
|
||||
req = &oss.GetObjectRequest{}
|
||||
}
|
||||
|
||||
req.Bucket = &handler.policy.BucketName
|
||||
req.Key = &path
|
||||
|
||||
// signedURL, err := handler.client.Presign(path, oss.HTTPGet, ttl, options...)
|
||||
result, err := handler.client.Presign(ctx, req, oss.PresignExpires(ttl))
|
||||
signedURL, err := handler.bucket.SignURL(path, oss.HTTPGet, ttl, options...)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// 将最终生成的签名URL域名换成用户自定义的加速域名(如果有)
|
||||
finalURL, err := url.Parse(result.URL)
|
||||
finalURL, err := url.Parse(signedURL)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
|
@ -471,12 +404,10 @@ func (handler *Driver) signSourceURL(ctx context.Context, path string, expire *t
|
|||
// 公有空间替换掉Key及不支持的头
|
||||
if !handler.policy.IsPrivate && !forceSign {
|
||||
query := finalURL.Query()
|
||||
query.Del("x-oss-credential")
|
||||
query.Del("x-oss-date")
|
||||
query.Del("x-oss-expires")
|
||||
query.Del("x-oss-signature")
|
||||
query.Del("x-oss-signature-version")
|
||||
query.Del("OSSAccessKeyId")
|
||||
query.Del("Signature")
|
||||
query.Del("response-content-disposition")
|
||||
query.Del("x-oss-traffic-limit")
|
||||
finalURL.RawQuery = query.Encode()
|
||||
}
|
||||
return finalURL.String(), nil
|
||||
|
|
@ -510,45 +441,38 @@ func (handler *Driver) Token(ctx context.Context, uploadSession *fs.UploadSessio
|
|||
|
||||
mimeType := file.Props.MimeType
|
||||
if mimeType == "" {
|
||||
mimeType = handler.mime.TypeByName(file.Props.Uri.Name())
|
||||
handler.mime.TypeByName(file.Props.Uri.Name())
|
||||
}
|
||||
|
||||
// 初始化分片上传
|
||||
imur, err := handler.client.InitiateMultipartUpload(ctx, &oss.InitiateMultipartUploadRequest{
|
||||
Bucket: &handler.policy.BucketName,
|
||||
Key: &file.Props.SavePath,
|
||||
ContentType: oss.Ptr(mimeType),
|
||||
ForbidOverwrite: oss.Ptr(strconv.FormatBool(true)),
|
||||
Expires: oss.Ptr(uploadSession.Props.ExpireAt.Format(time.RFC3339)),
|
||||
})
|
||||
options := []oss.Option{
|
||||
oss.WithContext(ctx),
|
||||
oss.Expires(uploadSession.Props.ExpireAt),
|
||||
oss.ForbidOverWrite(true),
|
||||
oss.ContentType(mimeType),
|
||||
}
|
||||
imur, err := handler.bucket.InitiateMultipartUpload(file.Props.SavePath, options...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize multipart upload: %w", err)
|
||||
}
|
||||
uploadSession.UploadID = *imur.UploadId
|
||||
uploadSession.UploadID = imur.UploadID
|
||||
|
||||
// 为每个分片签名上传 URL
|
||||
chunks := chunk.NewChunkGroup(file, handler.chunkSize, &backoff.ConstantBackoff{}, false, handler.l, "")
|
||||
urls := make([]string, chunks.Num())
|
||||
ttl := time.Until(uploadSession.Props.ExpireAt)
|
||||
ttl := int64(time.Until(uploadSession.Props.ExpireAt).Seconds())
|
||||
for chunks.Next() {
|
||||
err := chunks.Process(func(c *chunk.ChunkGroup, chunk io.Reader) error {
|
||||
signedURL, err := handler.client.Presign(ctx, &oss.UploadPartRequest{
|
||||
Bucket: &handler.policy.BucketName,
|
||||
Key: &file.Props.SavePath,
|
||||
UploadId: imur.UploadId,
|
||||
PartNumber: int32(c.Index() + 1),
|
||||
Body: chunk,
|
||||
RequestCommon: oss.RequestCommon{
|
||||
Headers: map[string]string{
|
||||
"Content-Type": "application/octet-stream",
|
||||
},
|
||||
},
|
||||
}, oss.PresignExpires(ttl))
|
||||
signedURL, err := handler.bucket.SignURL(file.Props.SavePath, oss.HTTPPut,
|
||||
ttl,
|
||||
oss.AddParam(partNumberParam, strconv.Itoa(c.Index()+1)),
|
||||
oss.AddParam(uploadIdParam, imur.UploadID),
|
||||
oss.ContentType("application/octet-stream"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
urls[c.Index()] = signedURL.URL
|
||||
urls[c.Index()] = signedURL
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
|
|
@ -557,43 +481,29 @@ func (handler *Driver) Token(ctx context.Context, uploadSession *fs.UploadSessio
|
|||
}
|
||||
|
||||
// 签名完成分片上传的URL
|
||||
completeURL, err := handler.client.Presign(ctx, &oss.CompleteMultipartUploadRequest{
|
||||
Bucket: &handler.policy.BucketName,
|
||||
Key: &file.Props.SavePath,
|
||||
UploadId: imur.UploadId,
|
||||
RequestCommon: oss.RequestCommon{
|
||||
Parameters: map[string]string{
|
||||
"callback": callbackPolicyEncoded,
|
||||
},
|
||||
Headers: map[string]string{
|
||||
"Content-Type": "application/octet-stream",
|
||||
completeAllHeader: "yes",
|
||||
forbidOverwriteHeader: "true",
|
||||
},
|
||||
},
|
||||
}, oss.PresignExpires(ttl))
|
||||
completeURL, err := handler.bucket.SignURL(file.Props.SavePath, oss.HTTPPost, ttl,
|
||||
oss.ContentType("application/octet-stream"),
|
||||
oss.AddParam(uploadIdParam, imur.UploadID),
|
||||
oss.Expires(time.Now().Add(time.Duration(ttl)*time.Second)),
|
||||
oss.SetHeader(completeAllHeader, "yes"),
|
||||
oss.ForbidOverWrite(true),
|
||||
oss.AddParam(callbackParam, callbackPolicyEncoded))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &fs.UploadCredential{
|
||||
UploadID: *imur.UploadId,
|
||||
UploadID: imur.UploadID,
|
||||
UploadURLs: urls,
|
||||
CompleteURL: completeURL.URL,
|
||||
CompleteURL: completeURL,
|
||||
SessionID: uploadSession.Props.UploadSessionID,
|
||||
ChunkSize: handler.chunkSize,
|
||||
Callback: callbackPolicyEncoded,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// 取消上传凭证
|
||||
func (handler *Driver) CancelToken(ctx context.Context, uploadSession *fs.UploadSession) error {
|
||||
_, err := handler.client.AbortMultipartUpload(ctx, &oss.AbortMultipartUploadRequest{
|
||||
Bucket: &handler.policy.BucketName,
|
||||
Key: &uploadSession.Props.SavePath,
|
||||
UploadId: &uploadSession.UploadID,
|
||||
})
|
||||
return err
|
||||
return handler.bucket.AbortMultipartUpload(oss.InitiateMultipartUploadResult{UploadID: uploadSession.UploadID, Key: uploadSession.Props.SavePath}, oss.WithContext(ctx))
|
||||
}
|
||||
|
||||
func (handler *Driver) CompleteUpload(ctx context.Context, session *fs.UploadSession) error {
|
||||
|
|
@ -616,7 +526,7 @@ func (handler *Driver) Capabilities() *driver.Capabilities {
|
|||
}
|
||||
}
|
||||
|
||||
func (handler *Driver) MediaMeta(ctx context.Context, path, ext, language string) ([]driver.MediaMeta, error) {
|
||||
func (handler *Driver) MediaMeta(ctx context.Context, path, ext string) ([]driver.MediaMeta, error) {
|
||||
if util.ContainsString(supportedImageExt, ext) {
|
||||
return handler.extractImageMeta(ctx, path)
|
||||
}
|
||||
|
|
@ -637,11 +547,7 @@ func (handler *Driver) LocalPath(ctx context.Context, path string) string {
|
|||
}
|
||||
|
||||
func (handler *Driver) cancelUpload(imur oss.InitiateMultipartUploadResult) {
|
||||
if _, err := handler.client.AbortMultipartUpload(context.Background(), &oss.AbortMultipartUploadRequest{
|
||||
Bucket: &handler.policy.BucketName,
|
||||
Key: imur.Key,
|
||||
UploadId: imur.UploadId,
|
||||
}); err != nil {
|
||||
if err := handler.bucket.AbortMultipartUpload(imur); err != nil {
|
||||
handler.l.Warning("failed to abort multipart upload: %s", err)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -223,7 +223,7 @@ func (handler *Driver) Put(ctx context.Context, file *fs.UploadRequest) error {
|
|||
|
||||
mimeType := file.Props.MimeType
|
||||
if mimeType == "" {
|
||||
mimeType = handler.mime.TypeByName(file.Props.Uri.Name())
|
||||
handler.mime.TypeByName(file.Props.Uri.Name())
|
||||
}
|
||||
|
||||
err = resumeUploader.CompleteParts(ctx, upToken, upHost, nil, handler.policy.BucketName,
|
||||
|
|
@ -277,20 +277,10 @@ func (handler *Driver) Delete(ctx context.Context, files ...string) ([]string, e
|
|||
// Thumb 获取文件缩略图
|
||||
func (handler *Driver) Thumb(ctx context.Context, expire *time.Time, ext string, e fs.Entity) (string, error) {
|
||||
w, h := handler.settings.ThumbSize(ctx)
|
||||
thumbParam := fmt.Sprintf("imageView2/1/w/%d/h/%d", w, h)
|
||||
|
||||
enco := handler.settings.ThumbEncode(ctx)
|
||||
switch enco.Format {
|
||||
case "jpg", "webp":
|
||||
thumbParam += fmt.Sprintf("/format/%s/q/%d", enco.Format, enco.Quality)
|
||||
case "png":
|
||||
thumbParam += fmt.Sprintf("/format/%s", enco.Format)
|
||||
}
|
||||
|
||||
return handler.signSourceURL(
|
||||
e.Source(),
|
||||
url.Values{
|
||||
thumbParam: []string{},
|
||||
fmt.Sprintf("imageView2/1/w/%d/h/%d", w, h): []string{},
|
||||
},
|
||||
expire,
|
||||
), nil
|
||||
|
|
@ -389,7 +379,7 @@ func (handler *Driver) Token(ctx context.Context, uploadSession *fs.UploadSessio
|
|||
|
||||
mimeType := file.Props.MimeType
|
||||
if mimeType == "" {
|
||||
mimeType = handler.mime.TypeByName(file.Props.Uri.Name())
|
||||
handler.mime.TypeByName(file.Props.Uri.Name())
|
||||
}
|
||||
|
||||
uploadSession.UploadID = ret.UploadID
|
||||
|
|
@ -433,7 +423,7 @@ func (handler *Driver) Capabilities() *driver.Capabilities {
|
|||
}
|
||||
}
|
||||
|
||||
func (handler *Driver) MediaMeta(ctx context.Context, path, ext, language string) ([]driver.MediaMeta, error) {
|
||||
func (handler *Driver) MediaMeta(ctx context.Context, path, ext string) ([]driver.MediaMeta, error) {
|
||||
if util.ContainsString(supportedImageExt, ext) {
|
||||
return handler.extractImageMeta(ctx, path)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ type Client interface {
|
|||
// DeleteUploadSession deletes remote upload session
|
||||
DeleteUploadSession(ctx context.Context, sessionID string) error
|
||||
// MediaMeta gets media meta from remote server
|
||||
MediaMeta(ctx context.Context, src, ext, language string) ([]driver.MediaMeta, error)
|
||||
MediaMeta(ctx context.Context, src, ext string) ([]driver.MediaMeta, error)
|
||||
// DeleteFiles deletes files from remote server
|
||||
DeleteFiles(ctx context.Context, files ...string) ([]string, error)
|
||||
// List lists files from remote server
|
||||
|
|
@ -183,10 +183,10 @@ func (c *remoteClient) DeleteFiles(ctx context.Context, files ...string) ([]stri
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *remoteClient) MediaMeta(ctx context.Context, src, ext, language string) ([]driver.MediaMeta, error) {
|
||||
func (c *remoteClient) MediaMeta(ctx context.Context, src, ext string) ([]driver.MediaMeta, error) {
|
||||
resp, err := c.httpClient.Request(
|
||||
http.MethodGet,
|
||||
routes.SlaveMediaMetaRoute(src, ext, language),
|
||||
routes.SlaveMediaMetaRoute(src, ext),
|
||||
nil,
|
||||
request.WithContext(ctx),
|
||||
request.WithLogger(c.l),
|
||||
|
|
|
|||
|
|
@ -179,6 +179,6 @@ func (handler *Driver) Capabilities() *driver.Capabilities {
|
|||
}
|
||||
}
|
||||
|
||||
func (handler *Driver) MediaMeta(ctx context.Context, path, ext, language string) ([]driver.MediaMeta, error) {
|
||||
return handler.uploadClient.MediaMeta(ctx, path, ext, language)
|
||||
func (handler *Driver) MediaMeta(ctx context.Context, path, ext string) ([]driver.MediaMeta, error) {
|
||||
return handler.uploadClient.MediaMeta(ctx, path, ext)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -207,7 +207,7 @@ func (handler *Driver) Put(ctx context.Context, file *fs.UploadRequest) error {
|
|||
|
||||
mimeType := file.Props.MimeType
|
||||
if mimeType == "" {
|
||||
mimeType = handler.mime.TypeByName(file.Props.Uri.Name())
|
||||
handler.mime.TypeByName(file.Props.Uri.Name())
|
||||
}
|
||||
|
||||
_, err := uploader.UploadWithContext(ctx, &s3manager.UploadInput{
|
||||
|
|
@ -344,7 +344,7 @@ func (handler *Driver) Token(ctx context.Context, uploadSession *fs.UploadSessio
|
|||
|
||||
mimeType := file.Props.MimeType
|
||||
if mimeType == "" {
|
||||
mimeType = handler.mime.TypeByName(file.Props.Uri.Name())
|
||||
handler.mime.TypeByName(file.Props.Uri.Name())
|
||||
}
|
||||
|
||||
// 创建分片上传
|
||||
|
|
@ -482,7 +482,7 @@ func (handler *Driver) Capabilities() *driver.Capabilities {
|
|||
}
|
||||
}
|
||||
|
||||
func (handler *Driver) MediaMeta(ctx context.Context, path, ext, language string) ([]driver.MediaMeta, error) {
|
||||
func (handler *Driver) MediaMeta(ctx context.Context, path, ext string) ([]driver.MediaMeta, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -161,7 +161,7 @@ func (handler *Driver) Put(ctx context.Context, file *fs.UploadRequest) error {
|
|||
|
||||
mimeType := file.Props.MimeType
|
||||
if mimeType == "" {
|
||||
mimeType = handler.mime.TypeByName(file.Props.Uri.Name())
|
||||
handler.mime.TypeByName(file.Props.Uri.Name())
|
||||
}
|
||||
|
||||
err := handler.up.Put(&upyun.PutObjectConfig{
|
||||
|
|
@ -203,16 +203,8 @@ func (handler *Driver) Delete(ctx context.Context, files ...string) ([]string, e
|
|||
// Thumb 获取文件缩略图
|
||||
func (handler *Driver) Thumb(ctx context.Context, expire *time.Time, ext string, e fs.Entity) (string, error) {
|
||||
w, h := handler.settings.ThumbSize(ctx)
|
||||
|
||||
thumbParam := fmt.Sprintf("!/fwfh/%dx%d", w, h)
|
||||
|
||||
enco := handler.settings.ThumbEncode(ctx)
|
||||
switch enco.Format {
|
||||
case "jpg", "webp":
|
||||
thumbParam += fmt.Sprintf("/format/%s/quality/%d", enco.Format, enco.Quality)
|
||||
case "png":
|
||||
thumbParam += fmt.Sprintf("/format/%s", enco.Format)
|
||||
}
|
||||
|
||||
thumbURL, err := handler.signURL(ctx, e.Source()+thumbParam, nil, expire)
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
|
@ -309,7 +301,7 @@ func (handler *Driver) Token(ctx context.Context, uploadSession *fs.UploadSessio
|
|||
|
||||
mimeType := file.Props.MimeType
|
||||
if mimeType == "" {
|
||||
mimeType = handler.mime.TypeByName(file.Props.Uri.Name())
|
||||
handler.mime.TypeByName(file.Props.Uri.Name())
|
||||
}
|
||||
|
||||
return &fs.UploadCredential{
|
||||
|
|
@ -345,7 +337,7 @@ func (handler *Driver) Capabilities() *driver.Capabilities {
|
|||
}
|
||||
}
|
||||
|
||||
func (handler *Driver) MediaMeta(ctx context.Context, path, ext, language string) ([]driver.MediaMeta, error) {
|
||||
func (handler *Driver) MediaMeta(ctx context.Context, path, ext string) ([]driver.MediaMeta, error) {
|
||||
return handler.extractImageMeta(ctx, path)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,360 +0,0 @@
|
|||
// Package encrypt provides AES-256-CTR encryption and decryption functionality
|
||||
// compatible with the JavaScript EncryptedBlob implementation.
|
||||
//
|
||||
// # Usage Example
|
||||
//
|
||||
// Basic usage with encrypted metadata:
|
||||
//
|
||||
// // Create AES256CTR instance
|
||||
// aes := NewAES256CTR(masterKeyVault)
|
||||
//
|
||||
// // Load encrypted metadata (key is encrypted with master key)
|
||||
// err := aes.LoadMetadata(ctx, encryptedMetadata, masterKeyVault)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// // Set encrypted source stream
|
||||
// err = aes.SetSource(encryptedStream, 0)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// // Read decrypted data
|
||||
// decryptedData, err := io.ReadAll(aes)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// aes.Close()
|
||||
//
|
||||
// Usage with plain metadata (already decrypted):
|
||||
//
|
||||
// aes := NewAES256CTR(masterKeyVault)
|
||||
// err := aes.LoadPlainMetadata(plainMetadata)
|
||||
// err = aes.SetSource(encryptedStream, 0)
|
||||
// // Read decrypted data...
|
||||
//
|
||||
// Usage with counter offset (for chunked/sliced streams):
|
||||
//
|
||||
// // If reading from byte offset 1048576 (1MB) of the encrypted file
|
||||
// aes := NewAES256CTR(masterKeyVault)
|
||||
// err := aes.LoadPlainMetadata(metadata)
|
||||
// err = aes.SetSource(encryptedStreamStartingAt1MB, 1048576)
|
||||
// // This ensures proper counter alignment for correct decryption
|
||||
//
|
||||
// Using the Seeker interface (requires seekable source):
|
||||
//
|
||||
// aes := NewAES256CTR(masterKeyVault)
|
||||
// err := aes.LoadPlainMetadata(metadata)
|
||||
// err = aes.SetSource(seekableEncryptedStream, 0)
|
||||
// aes.SetSize(totalFileSize) // Required for io.SeekEnd
|
||||
//
|
||||
// // Seek to position 1048576
|
||||
// newPos, err := aes.Seek(1048576, io.SeekStart)
|
||||
// // Read from that position...
|
||||
//
|
||||
// // Seek relative to current position
|
||||
// newPos, err = aes.Seek(100, io.SeekCurrent)
|
||||
//
|
||||
// // Seek from end (requires SetSize to be called first)
|
||||
// newPos, err = aes.Seek(-1024, io.SeekEnd)
|
||||
//
|
||||
// Using the factory pattern:
|
||||
//
|
||||
// factory := NewDecrypterFactory(masterKeyVault)
|
||||
// decrypter, err := factory(types.CipherAES256CTR)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// err = decrypter.LoadMetadata(ctx, encryptedMetadata, masterKeyVault)
|
||||
// err = decrypter.SetSource(encryptedStream, 0)
|
||||
// defer decrypter.Close()
|
||||
package encrypt
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
)
|
||||
|
||||
// AES256CTR provides both encryption and decryption for AES-256-CTR.
|
||||
// It implements both Cryptor and Decrypter interfaces.
|
||||
type AES256CTR struct {
|
||||
masterKeyVault MasterEncryptKeyVault
|
||||
|
||||
// Decryption fields
|
||||
src io.ReadCloser // Source encrypted stream
|
||||
seeker io.Seeker // Seeker for the source stream
|
||||
stream cipher.Stream // AES-CTR cipher stream
|
||||
metadata *types.EncryptMetadata
|
||||
counterOffset int64 // Byte offset for sliced streams
|
||||
pos int64 // Current read position relative to counterOffset
|
||||
size int64 // Total size of encrypted data (for SeekEnd support, -1 if unknown)
|
||||
eof bool // EOF flag
|
||||
}
|
||||
|
||||
func NewAES256CTR(masterKeyVault MasterEncryptKeyVault) *AES256CTR {
|
||||
return &AES256CTR{
|
||||
masterKeyVault: masterKeyVault,
|
||||
size: -1, // Unknown by default
|
||||
}
|
||||
}
|
||||
|
||||
func (e *AES256CTR) GenerateMetadata(ctx context.Context) (*types.EncryptMetadata, error) {
|
||||
// Generate random 32-byte key for AES-256
|
||||
key := make([]byte, 32)
|
||||
if _, err := io.ReadFull(rand.Reader, key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Generate random 16-byte IV for CTR mode
|
||||
iv := make([]byte, 16)
|
||||
if _, err := io.ReadFull(rand.Reader, iv); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get master key from vault
|
||||
masterKey, err := e.masterKeyVault.GetMasterKey(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Encrypt the key with master key
|
||||
encryptedKey, err := EncryptWithMasterKey(masterKey, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &types.EncryptMetadata{
|
||||
Algorithm: types.CipherAES256CTR,
|
||||
Key: encryptedKey,
|
||||
KeyPlainText: key,
|
||||
IV: iv,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// LoadMetadata loads and decrypts the encryption metadata using the master key.
|
||||
func (e *AES256CTR) LoadMetadata(ctx context.Context, encryptedMetadata *types.EncryptMetadata) error {
|
||||
if encryptedMetadata == nil {
|
||||
return fmt.Errorf("encryption metadata is nil")
|
||||
}
|
||||
|
||||
if encryptedMetadata.Algorithm != types.CipherAES256CTR {
|
||||
return fmt.Errorf("unsupported algorithm: %s", encryptedMetadata.Algorithm)
|
||||
}
|
||||
|
||||
if len(encryptedMetadata.KeyPlainText) > 0 {
|
||||
e.metadata = encryptedMetadata
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decrypt the encryption key
|
||||
decryptedKey, err := DecriptKey(ctx, e.masterKeyVault, encryptedMetadata.Key)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to decrypt encryption key: %w", err)
|
||||
}
|
||||
|
||||
// Store decrypted metadata
|
||||
e.metadata = &types.EncryptMetadata{
|
||||
Algorithm: encryptedMetadata.Algorithm,
|
||||
KeyPlainText: decryptedKey,
|
||||
IV: encryptedMetadata.IV,
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetSource sets the encrypted data source and initializes the cipher stream.
|
||||
// The counterOffset parameter allows for proper decryption of sliced streams,
|
||||
// where the stream doesn't start at byte 0 of the original file.
|
||||
//
|
||||
// For non-block-aligned offsets (offset % 16 != 0), this method advances the
|
||||
// cipher stream to the correct position within the block to ensure proper decryption.
|
||||
func (e *AES256CTR) SetSource(src io.ReadCloser, seeker io.Seeker, size, counterOffset int64) error {
|
||||
if e.metadata == nil {
|
||||
return fmt.Errorf("metadata not loaded, call LoadMetadata first")
|
||||
}
|
||||
|
||||
e.src = src
|
||||
e.seeker = seeker
|
||||
e.counterOffset = counterOffset
|
||||
e.pos = 0 // Reset position to start
|
||||
e.eof = false // Reset EOF flag
|
||||
e.size = size
|
||||
|
||||
// Initialize cipher stream at counterOffset position
|
||||
return e.initCipherStream(counterOffset)
|
||||
}
|
||||
|
||||
// Read implements io.Reader interface to read decrypted data.
|
||||
// It reads encrypted data from the source and decrypts it on-the-fly.
|
||||
func (e *AES256CTR) Read(p []byte) (int, error) {
|
||||
if e.src == nil {
|
||||
return 0, fmt.Errorf("source not set, call SetSource first")
|
||||
}
|
||||
|
||||
if e.eof {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
// Read encrypted data from source
|
||||
n, err := e.src.Read(p)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
e.eof = true
|
||||
if n == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
} else {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
|
||||
// Decrypt data in place
|
||||
if n > 0 {
|
||||
e.stream.XORKeyStream(p[:n], p[:n])
|
||||
e.pos += int64(n) // Update current position
|
||||
}
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Close implements io.Closer interface.
|
||||
func (e *AES256CTR) Close() error {
|
||||
if e.src != nil {
|
||||
return e.src.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Seek implements io.Seeker interface for seeking within the encrypted stream.
|
||||
// It properly adjusts the AES-CTR counter based on the seek position.
|
||||
//
|
||||
// Parameters:
|
||||
// - offset: byte offset relative to whence
|
||||
// - whence: io.SeekStart, io.SeekCurrent, or io.SeekEnd
|
||||
//
|
||||
// Returns the new absolute position (relative to counterOffset start).
|
||||
//
|
||||
// Note: For io.SeekEnd to work, you must call SetSize() first, otherwise it returns an error.
|
||||
// Also note that seeking requires the underlying source to support seeking (io.Seeker).
|
||||
func (e *AES256CTR) Seek(offset int64, whence int) (int64, error) {
|
||||
if e.metadata == nil {
|
||||
return 0, fmt.Errorf("metadata not loaded, call LoadMetadata first")
|
||||
}
|
||||
|
||||
if e.src == nil {
|
||||
return 0, fmt.Errorf("source not set, call SetSource first")
|
||||
}
|
||||
|
||||
// Check if source supports seeking
|
||||
if e.seeker == nil {
|
||||
return 0, fmt.Errorf("source does not support seeking")
|
||||
}
|
||||
|
||||
// Calculate new absolute position
|
||||
var newPos int64
|
||||
switch whence {
|
||||
case io.SeekStart:
|
||||
newPos = offset
|
||||
case io.SeekCurrent:
|
||||
newPos = e.pos + offset
|
||||
case io.SeekEnd:
|
||||
if e.size < 0 {
|
||||
return 0, fmt.Errorf("size unknown, call SetSize before using SeekEnd")
|
||||
}
|
||||
newPos = e.size + offset
|
||||
default:
|
||||
return 0, fmt.Errorf("invalid whence: %d", whence)
|
||||
}
|
||||
|
||||
// Validate new position
|
||||
if newPos < 0 {
|
||||
return 0, fmt.Errorf("negative position: %d", newPos)
|
||||
}
|
||||
|
||||
// Seek in the underlying source stream
|
||||
// The absolute position in the source is counterOffset + newPos
|
||||
absPos := e.counterOffset + newPos
|
||||
_, err := e.seeker.Seek(absPos, io.SeekStart)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to seek source: %w", err)
|
||||
}
|
||||
|
||||
// Reinitialize cipher stream with new counter position
|
||||
if err := e.initCipherStream(absPos); err != nil {
|
||||
return 0, fmt.Errorf("failed to reinitialize cipher stream: %w", err)
|
||||
}
|
||||
|
||||
// Update position and reset EOF flag
|
||||
e.pos = newPos
|
||||
e.eof = false
|
||||
|
||||
return newPos, nil
|
||||
}
|
||||
|
||||
// initCipherStream initializes the cipher stream with proper counter alignment
|
||||
// for the given absolute byte position.
|
||||
func (e *AES256CTR) initCipherStream(absolutePosition int64) error {
|
||||
// Create AES cipher block
|
||||
block, err := aes.NewCipher(e.metadata.KeyPlainText)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create AES cipher: %w", err)
|
||||
}
|
||||
|
||||
// Create counter value (16 bytes IV) and apply offset for position
|
||||
counter := make([]byte, 16)
|
||||
copy(counter, e.metadata.IV)
|
||||
|
||||
// Apply counter offset based on byte position (each block is 16 bytes)
|
||||
if absolutePosition > 0 {
|
||||
blockOffset := absolutePosition / 16
|
||||
incrementCounter(counter, blockOffset)
|
||||
}
|
||||
|
||||
// Create CTR cipher stream
|
||||
e.stream = cipher.NewCTR(block, counter)
|
||||
|
||||
// For non-block-aligned offsets, we need to advance the stream position
|
||||
// within the current block to match the offset
|
||||
offsetInBlock := absolutePosition % 16
|
||||
if offsetInBlock > 0 {
|
||||
// Create a dummy buffer to advance the stream
|
||||
dummy := make([]byte, offsetInBlock)
|
||||
e.stream.XORKeyStream(dummy, dummy)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// incrementCounter increments a counter ([]byte) by a given number of blocks.
|
||||
// This matches the JavaScript implementation's incrementCounter function.
|
||||
// The counter is treated as a big-endian 128-bit integer.
|
||||
func incrementCounter(counter []byte, blocks int64) {
|
||||
// Convert blocks to add into bytes (big-endian)
|
||||
// We only need to handle the lower 64 bits since blocks is int64
|
||||
for i := 15; i >= 0 && blocks > 0; i-- {
|
||||
// Add the lowest byte of blocks to current counter byte
|
||||
sum := uint64(counter[i]) + uint64(blocks&0xff)
|
||||
counter[i] = byte(sum & 0xff)
|
||||
|
||||
// Shift blocks right by 8 bits for next iteration
|
||||
blocks = blocks >> 8
|
||||
|
||||
// Add carry from this position to the next
|
||||
if sum > 0xff {
|
||||
carry := sum >> 8
|
||||
// Propagate carry to higher bytes
|
||||
for j := i - 1; j >= 0 && carry > 0; j-- {
|
||||
sum = uint64(counter[j]) + carry
|
||||
counter[j] = byte(sum & 0xff)
|
||||
carry = sum >> 8
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,97 +0,0 @@
|
|||
package encrypt
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
)
|
||||
|
||||
type (
|
||||
Cryptor interface {
|
||||
io.ReadCloser
|
||||
io.Seeker
|
||||
// LoadMetadata loads and decrypts the encryption metadata using the master key
|
||||
LoadMetadata(ctx context.Context, encryptedMetadata *types.EncryptMetadata) error
|
||||
// SetSource sets the encrypted data source and initializes the cipher stream
|
||||
SetSource(src io.ReadCloser, seeker io.Seeker, size, counterOffset int64) error
|
||||
// GenerateMetadata generates a new encryption metadata
|
||||
GenerateMetadata(ctx context.Context) (*types.EncryptMetadata, error)
|
||||
}
|
||||
|
||||
CryptorFactory func(algorithm types.Cipher) (Cryptor, error)
|
||||
)
|
||||
|
||||
func NewCryptorFactory(masterKeyVault MasterEncryptKeyVault) CryptorFactory {
|
||||
return func(algorithm types.Cipher) (Cryptor, error) {
|
||||
switch algorithm {
|
||||
case types.CipherAES256CTR:
|
||||
return NewAES256CTR(masterKeyVault), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown algorithm: %s", algorithm)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// EncryptWithMasterKey encrypts data using the master key with AES-256-CTR
|
||||
// Returns: [16-byte IV] + [encrypted data]
|
||||
func EncryptWithMasterKey(masterKey, data []byte) ([]byte, error) {
|
||||
// Create AES cipher with master key
|
||||
block, err := aes.NewCipher(masterKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Generate random IV for encryption
|
||||
iv := make([]byte, 16)
|
||||
if _, err := io.ReadFull(rand.Reader, iv); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Encrypt data
|
||||
stream := cipher.NewCTR(block, iv)
|
||||
encrypted := make([]byte, len(data))
|
||||
stream.XORKeyStream(encrypted, data)
|
||||
|
||||
// Return IV + encrypted data
|
||||
result := append(iv, encrypted...)
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func DecriptKey(ctx context.Context, keyVault MasterEncryptKeyVault, encryptedKey []byte) ([]byte, error) {
|
||||
masterKey, err := keyVault.GetMasterKey(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get master key: %w", err)
|
||||
}
|
||||
return DecryptWithMasterKey(masterKey, encryptedKey)
|
||||
}
|
||||
|
||||
// DecryptWithMasterKey decrypts data using the master key with AES-256-CTR
|
||||
// Input format: [16-byte IV] + [encrypted data]
|
||||
func DecryptWithMasterKey(masterKey, encryptedData []byte) ([]byte, error) {
|
||||
// Validate input length
|
||||
if len(encryptedData) < 16 {
|
||||
return nil, aes.KeySizeError(len(encryptedData))
|
||||
}
|
||||
|
||||
// Extract IV and encrypted data
|
||||
iv := encryptedData[:16]
|
||||
encrypted := encryptedData[16:]
|
||||
|
||||
// Create AES cipher with master key
|
||||
block, err := aes.NewCipher(masterKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Decrypt data
|
||||
stream := cipher.NewCTR(block, iv)
|
||||
decrypted := make([]byte, len(encrypted))
|
||||
stream.XORKeyStream(decrypted, encrypted)
|
||||
|
||||
return decrypted, nil
|
||||
}
|
||||
|
|
@ -1,105 +0,0 @@
|
|||
package encrypt
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
|
||||
)
|
||||
|
||||
const (
|
||||
EnvMasterEncryptKey = "CR_ENCRYPT_MASTER_KEY"
|
||||
)
|
||||
|
||||
// MasterEncryptKeyVault is a vault for the master encrypt key.
|
||||
type MasterEncryptKeyVault interface {
|
||||
GetMasterKey(ctx context.Context) ([]byte, error)
|
||||
}
|
||||
|
||||
func NewMasterEncryptKeyVault(ctx context.Context, settings setting.Provider) MasterEncryptKeyVault {
|
||||
vaultType := settings.MasterEncryptKeyVault(ctx)
|
||||
switch vaultType {
|
||||
case setting.MasterEncryptKeyVaultTypeEnv:
|
||||
return NewEnvMasterEncryptKeyVault()
|
||||
case setting.MasterEncryptKeyVaultTypeFile:
|
||||
return NewFileMasterEncryptKeyVault(settings.MasterEncryptKeyFile(ctx))
|
||||
default:
|
||||
return NewSettingMasterEncryptKeyVault(settings)
|
||||
}
|
||||
}
|
||||
|
||||
// settingMasterEncryptKeyVault is a vault for the master encrypt key that gets the key from the setting KV.
|
||||
type settingMasterEncryptKeyVault struct {
|
||||
setting setting.Provider
|
||||
}
|
||||
|
||||
func NewSettingMasterEncryptKeyVault(setting setting.Provider) MasterEncryptKeyVault {
|
||||
return &settingMasterEncryptKeyVault{setting: setting}
|
||||
}
|
||||
|
||||
func (v *settingMasterEncryptKeyVault) GetMasterKey(ctx context.Context) ([]byte, error) {
|
||||
key := v.setting.MasterEncryptKey(ctx)
|
||||
if key == nil {
|
||||
return nil, errors.New("master encrypt key is not set")
|
||||
}
|
||||
return key, nil
|
||||
}
|
||||
|
||||
func NewEnvMasterEncryptKeyVault() MasterEncryptKeyVault {
|
||||
return &envMasterEncryptKeyVault{}
|
||||
}
|
||||
|
||||
type envMasterEncryptKeyVault struct {
|
||||
}
|
||||
|
||||
var envMasterKeyCache = []byte{}
|
||||
|
||||
func (v *envMasterEncryptKeyVault) GetMasterKey(ctx context.Context) ([]byte, error) {
|
||||
if len(envMasterKeyCache) > 0 {
|
||||
return envMasterKeyCache, nil
|
||||
}
|
||||
|
||||
key := os.Getenv(EnvMasterEncryptKey)
|
||||
if key == "" {
|
||||
return nil, errors.New("master encrypt key is not set")
|
||||
}
|
||||
|
||||
decodedKey, err := base64.StdEncoding.DecodeString(key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decode master encrypt key: %w", err)
|
||||
}
|
||||
|
||||
envMasterKeyCache = decodedKey
|
||||
return decodedKey, nil
|
||||
}
|
||||
|
||||
func NewFileMasterEncryptKeyVault(path string) MasterEncryptKeyVault {
|
||||
return &fileMasterEncryptKeyVault{path: path}
|
||||
}
|
||||
|
||||
var fileMasterKeyCache = []byte{}
|
||||
|
||||
type fileMasterEncryptKeyVault struct {
|
||||
path string
|
||||
}
|
||||
|
||||
func (v *fileMasterEncryptKeyVault) GetMasterKey(ctx context.Context) ([]byte, error) {
|
||||
if len(fileMasterKeyCache) > 0 {
|
||||
return fileMasterKeyCache, nil
|
||||
}
|
||||
|
||||
key, err := os.ReadFile(v.path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid master encrypt key file")
|
||||
}
|
||||
|
||||
decodedKey, err := base64.StdEncoding.DecodeString(string(key))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid master encrypt key")
|
||||
}
|
||||
fileMasterKeyCache = decodedKey
|
||||
return fileMasterKeyCache, nil
|
||||
}
|
||||
|
|
@ -1,193 +0,0 @@
|
|||
package eventhub
|
||||
|
||||
import "errors"
|
||||
|
||||
type (
|
||||
Event struct {
|
||||
Type EventType `json:"type"`
|
||||
FileID string `json:"file_id"`
|
||||
From string `json:"from"`
|
||||
To string `json:"to"`
|
||||
}
|
||||
|
||||
EventType string
|
||||
)
|
||||
|
||||
const (
|
||||
EventTypeCreate = "create"
|
||||
EventTypeModify = "modify"
|
||||
EventTypeRename = "rename"
|
||||
EventTypeDelete = "delete"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrEventHubClosed is returned when operations are attempted on a closed EventHub.
|
||||
ErrEventHubClosed = errors.New("event hub is closed")
|
||||
)
|
||||
|
||||
// eventState tracks the accumulated state for each file
|
||||
type eventState struct {
|
||||
baseType EventType // The base event type (Create, Delete, or first event type)
|
||||
originalSrc string // Original source path (for Create or first Rename)
|
||||
currentDst string // Current destination path
|
||||
}
|
||||
|
||||
/*
|
||||
Modify + Modify → keep only the last Modify;
|
||||
Create + Modify → fold into a single Create with final metadata/content.
|
||||
Create + Rename(a→b) → Create at b.
|
||||
Create + Delete → drop both (ephemeral object never needs to reach clients).
|
||||
Modify + Delete → Delete (intermediate Modify is irrelevant to final state).
|
||||
Rename(a→b) + Rename(b→c) → Rename(a→c).
|
||||
Rename(a→b) + Modify → emit Rename(a→b) then a single Modify at b (or fold Modify into Create if the chain starts with Create).
|
||||
Rename(a→b) + Delete → emit only Delete(object_id);
|
||||
Rename(a→b) + Rename(b→a) with no intervening Modify → drop both (rename there-and-back is a no-op).
|
||||
Delete + Create might be a valid case, e.g. user restore same file from trash bin.
|
||||
*/
|
||||
// DebounceEvents takes time-ordered events and returns debounced/merged events.
|
||||
func DebounceEvents(in []*Event) []*Event {
|
||||
if len(in) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
states := make(map[string]*eventState) // keyed by FileID
|
||||
order := make([]string, 0) // to preserve order of first appearance
|
||||
|
||||
for _, e := range in {
|
||||
state, exists := states[e.FileID]
|
||||
|
||||
if !exists {
|
||||
// First event for this file
|
||||
order = append(order, e.FileID)
|
||||
states[e.FileID] = &eventState{
|
||||
baseType: e.Type,
|
||||
originalSrc: e.From,
|
||||
currentDst: e.To,
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
switch e.Type {
|
||||
case EventTypeCreate:
|
||||
// Delete + Create → keep as Create (e.g. restore from trash)
|
||||
if state.baseType == EventTypeDelete {
|
||||
state.baseType = EventTypeCreate
|
||||
state.originalSrc = e.From
|
||||
state.currentDst = ""
|
||||
}
|
||||
|
||||
case EventTypeModify:
|
||||
switch state.baseType {
|
||||
case EventTypeCreate:
|
||||
// Create + Modify → fold into Create (no change needed, Create already implies content)
|
||||
case EventTypeModify:
|
||||
// Modify + Modify → keep only last Modify (state already correct)
|
||||
case EventTypeRename:
|
||||
// Rename + Modify → fold into first Rename
|
||||
case EventTypeDelete:
|
||||
// Delete + Modify → should not happen, but ignore Modify
|
||||
}
|
||||
|
||||
case EventTypeRename:
|
||||
switch state.baseType {
|
||||
case EventTypeCreate:
|
||||
// Create + Rename(a→b) → Create at b
|
||||
state.originalSrc = e.To
|
||||
state.currentDst = ""
|
||||
case EventTypeModify:
|
||||
// Modify + Rename → emit Rename only
|
||||
state.baseType = EventTypeRename
|
||||
state.currentDst = e.To
|
||||
state.originalSrc = e.From
|
||||
|
||||
case EventTypeRename:
|
||||
// Rename(a→b) + Rename(b→c) → Rename(a→c)
|
||||
// Check for no-op: Rename(a→b) + Rename(b→a) → drop both
|
||||
if state.originalSrc == e.To {
|
||||
// Rename there-and-back, drop both
|
||||
delete(states, e.FileID)
|
||||
// Remove from order
|
||||
for i, id := range order {
|
||||
if id == e.FileID {
|
||||
order = append(order[:i], order[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
state.currentDst = e.To
|
||||
}
|
||||
case EventTypeDelete:
|
||||
// Delete + Rename → should not happen, ignore
|
||||
}
|
||||
|
||||
case EventTypeDelete:
|
||||
switch state.baseType {
|
||||
case EventTypeCreate:
|
||||
// Create + Delete → drop both (ephemeral object)
|
||||
delete(states, e.FileID)
|
||||
// Remove from order
|
||||
for i, id := range order {
|
||||
if id == e.FileID {
|
||||
order = append(order[:i], order[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
case EventTypeModify:
|
||||
// Modify + Delete → Delete
|
||||
state.baseType = EventTypeDelete
|
||||
state.originalSrc = e.From
|
||||
state.currentDst = ""
|
||||
case EventTypeRename:
|
||||
// Rename + Delete → Delete only
|
||||
state.baseType = EventTypeDelete
|
||||
state.originalSrc = e.From
|
||||
state.currentDst = ""
|
||||
case EventTypeDelete:
|
||||
// Delete + Delete → keep Delete (should not happen normally)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Build output events in order
|
||||
result := make([]*Event, 0, len(order))
|
||||
for _, fileID := range order {
|
||||
state, exists := states[fileID]
|
||||
if !exists {
|
||||
continue
|
||||
}
|
||||
|
||||
switch state.baseType {
|
||||
case EventTypeCreate:
|
||||
result = append(result, &Event{
|
||||
Type: EventTypeCreate,
|
||||
FileID: fileID,
|
||||
From: state.originalSrc,
|
||||
})
|
||||
case EventTypeModify:
|
||||
result = append(result, &Event{
|
||||
Type: EventTypeModify,
|
||||
FileID: fileID,
|
||||
From: state.originalSrc,
|
||||
})
|
||||
case EventTypeRename:
|
||||
// If hasModify and base was originally Modify (converted to Rename),
|
||||
// we need to emit Modify first at original location
|
||||
// But in our current logic, Modify+Rename sets hasModify=true
|
||||
// We emit Rename, then Modify if needed
|
||||
result = append(result, &Event{
|
||||
Type: EventTypeRename,
|
||||
FileID: fileID,
|
||||
From: state.originalSrc,
|
||||
To: state.currentDst,
|
||||
})
|
||||
case EventTypeDelete:
|
||||
result = append(result, &Event{
|
||||
Type: EventTypeDelete,
|
||||
FileID: fileID,
|
||||
From: state.originalSrc,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
|
@ -1,199 +0,0 @@
|
|||
package eventhub
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
)
|
||||
|
||||
type (
|
||||
EventHub interface {
|
||||
// Subscribe to a topic and return a channel to receive events.
|
||||
// If a subscriber with the same ID already exists and is offline,
|
||||
// it will be reactivated and any buffered events will be flushed.
|
||||
Subscribe(ctx context.Context, topic int, id string) (chan *Event, bool, error)
|
||||
// Unsubscribe marks the subscriber as offline instead of removing it.
|
||||
// Buffered events will be kept for when the subscriber reconnects.
|
||||
// Subscribers that remain offline for more than 14 days will be permanently removed.
|
||||
Unsubscribe(ctx context.Context, topic int, id string)
|
||||
// Get subscribers of a topic.
|
||||
GetSubscribers(ctx context.Context, topic int) []Subscriber
|
||||
// Close shuts down the event hub and disconnects all subscribers.
|
||||
Close()
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
bufSize = 16
|
||||
cleanupPeriod = 1 * time.Hour
|
||||
)
|
||||
|
||||
type eventHub struct {
|
||||
mu sync.RWMutex
|
||||
topics map[int]map[string]*subscriber
|
||||
userClient inventory.UserClient
|
||||
fsEventClient inventory.FsEventClient
|
||||
closed bool
|
||||
closeCh chan struct{}
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
func NewEventHub(userClient inventory.UserClient, fsEventClient inventory.FsEventClient) EventHub {
|
||||
e := &eventHub{
|
||||
topics: make(map[int]map[string]*subscriber),
|
||||
userClient: userClient,
|
||||
fsEventClient: fsEventClient,
|
||||
closeCh: make(chan struct{}),
|
||||
}
|
||||
|
||||
// Remove all existing FsEvents
|
||||
fsEventClient.DeleteAll(context.Background())
|
||||
|
||||
// Start background cleanup goroutine
|
||||
e.wg.Add(1)
|
||||
go e.cleanupLoop()
|
||||
|
||||
return e
|
||||
}
|
||||
|
||||
// cleanupLoop periodically removes subscribers that have been offline for too long.
|
||||
func (e *eventHub) cleanupLoop() {
|
||||
defer e.wg.Done()
|
||||
|
||||
ticker := time.NewTicker(cleanupPeriod)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-e.closeCh:
|
||||
return
|
||||
case <-ticker.C:
|
||||
e.cleanupExpiredSubscribers()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// cleanupExpiredSubscribers removes subscribers that have been offline for more than 14 days.
|
||||
func (e *eventHub) cleanupExpiredSubscribers() {
|
||||
e.mu.Lock()
|
||||
defer e.mu.Unlock()
|
||||
|
||||
if e.closed {
|
||||
return
|
||||
}
|
||||
|
||||
for topic, subs := range e.topics {
|
||||
for id, sub := range subs {
|
||||
if sub.shouldExpire() {
|
||||
sub.close()
|
||||
delete(subs, id)
|
||||
}
|
||||
}
|
||||
if len(subs) == 0 {
|
||||
delete(e.topics, topic)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (e *eventHub) GetSubscribers(ctx context.Context, topic int) []Subscriber {
|
||||
e.mu.RLock()
|
||||
defer e.mu.RUnlock()
|
||||
|
||||
subs := make([]Subscriber, 0, len(e.topics[topic]))
|
||||
for _, v := range e.topics[topic] {
|
||||
subs = append(subs, v)
|
||||
}
|
||||
return subs
|
||||
}
|
||||
|
||||
func (e *eventHub) Subscribe(ctx context.Context, topic int, id string) (chan *Event, bool, error) {
|
||||
l := logging.FromContext(ctx)
|
||||
l.Info("Subscribing to event hub for topic %d with id %s", topic, id)
|
||||
|
||||
e.mu.Lock()
|
||||
defer e.mu.Unlock()
|
||||
|
||||
if e.closed {
|
||||
return nil, false, ErrEventHubClosed
|
||||
}
|
||||
|
||||
subs, ok := e.topics[topic]
|
||||
if !ok {
|
||||
subs = make(map[string]*subscriber)
|
||||
e.topics[topic] = subs
|
||||
}
|
||||
|
||||
// Check if subscriber already exists
|
||||
if existingSub, ok := subs[id]; ok {
|
||||
if existingSub.isClosed() {
|
||||
// Subscriber was closed, create a new one
|
||||
delete(subs, id)
|
||||
} else {
|
||||
// Reactivate the offline subscriber
|
||||
l.Info("Reactivating offline subscriber %s for topic %d", id, topic)
|
||||
existingSub.setOnline(ctx)
|
||||
return existingSub.ch, true, nil
|
||||
}
|
||||
}
|
||||
|
||||
sub, err := newSubscriber(ctx, id, e.userClient, e.fsEventClient)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
e.topics[topic][id] = sub
|
||||
return sub.ch, false, nil
|
||||
}
|
||||
|
||||
func (e *eventHub) Unsubscribe(ctx context.Context, topic int, id string) {
|
||||
l := logging.FromContext(ctx)
|
||||
l.Info("Marking subscriber offline for topic %d with id %s", topic, id)
|
||||
|
||||
e.mu.Lock()
|
||||
defer e.mu.Unlock()
|
||||
|
||||
if e.closed {
|
||||
return
|
||||
}
|
||||
|
||||
subs, ok := e.topics[topic]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
if sub, ok := subs[id]; ok {
|
||||
// Stop debounce timer but keep events in buffer
|
||||
sub.Stop()
|
||||
// Mark as offline instead of deleting
|
||||
sub.setOffline()
|
||||
}
|
||||
}
|
||||
|
||||
// Close shuts down the event hub and disconnects all subscribers.
|
||||
func (e *eventHub) Close() {
|
||||
e.mu.Lock()
|
||||
|
||||
if e.closed {
|
||||
e.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
e.closed = true
|
||||
close(e.closeCh)
|
||||
|
||||
// Close all subscribers
|
||||
for _, subs := range e.topics {
|
||||
for _, sub := range subs {
|
||||
sub.close()
|
||||
}
|
||||
}
|
||||
e.topics = nil
|
||||
|
||||
e.mu.Unlock()
|
||||
|
||||
// Wait for cleanup goroutine to finish
|
||||
e.wg.Wait()
|
||||
}
|
||||
|
|
@ -1,317 +0,0 @@
|
|||
package eventhub
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/gofrs/uuid"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
type Subscriber interface {
|
||||
ID() string
|
||||
Ch() chan *Event
|
||||
Publish(evt Event)
|
||||
Stop()
|
||||
Buffer() []*Event
|
||||
// Owner returns the owner of the subscriber.
|
||||
Owner() (*ent.User, error)
|
||||
// Online returns whether the subscriber is online.
|
||||
Online() bool
|
||||
// OfflineSince returns when the subscriber went offline.
|
||||
// Returns zero time if the subscriber is online.
|
||||
OfflineSince() time.Time
|
||||
}
|
||||
|
||||
const (
|
||||
debounceDelay = 5 * time.Second
|
||||
userCacheTTL = 1 * time.Hour
|
||||
offlineMaxAge = 14 * 24 * time.Hour // 14 days
|
||||
)
|
||||
|
||||
type subscriber struct {
|
||||
mu sync.Mutex
|
||||
userClient inventory.UserClient
|
||||
fsEventClient inventory.FsEventClient
|
||||
|
||||
id string
|
||||
uid int
|
||||
ch chan *Event
|
||||
|
||||
// Online status
|
||||
online bool
|
||||
offlineSince time.Time
|
||||
|
||||
// Debounce buffer for pending events
|
||||
buffer []*Event
|
||||
timer *time.Timer
|
||||
|
||||
// Owner info
|
||||
ownerCached *ent.User
|
||||
cachedAt time.Time
|
||||
|
||||
// Close signal
|
||||
closed bool
|
||||
closedCh chan struct{}
|
||||
}
|
||||
|
||||
func newSubscriber(ctx context.Context, id string, userClient inventory.UserClient, fsEventClient inventory.FsEventClient) (*subscriber, error) {
|
||||
user := inventory.UserFromContext(ctx)
|
||||
if user == nil || inventory.IsAnonymousUser(user) {
|
||||
return nil, errors.New("user not found")
|
||||
}
|
||||
|
||||
return &subscriber{
|
||||
id: id,
|
||||
ch: make(chan *Event, bufSize),
|
||||
userClient: userClient,
|
||||
fsEventClient: fsEventClient,
|
||||
ownerCached: user,
|
||||
uid: user.ID,
|
||||
cachedAt: time.Now(),
|
||||
online: true,
|
||||
closedCh: make(chan struct{}),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *subscriber) ID() string {
|
||||
return s.id
|
||||
}
|
||||
|
||||
func (s *subscriber) Ch() chan *Event {
|
||||
return s.ch
|
||||
}
|
||||
|
||||
func (s *subscriber) Online() bool {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return s.online
|
||||
}
|
||||
|
||||
func (s *subscriber) OfflineSince() time.Time {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return s.offlineSince
|
||||
}
|
||||
|
||||
func (s *subscriber) Owner() (*ent.User, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if time.Since(s.cachedAt) > userCacheTTL || s.ownerCached == nil {
|
||||
user, err := s.userClient.GetLoginUserByID(context.Background(), s.uid)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get login user: %w", err)
|
||||
}
|
||||
|
||||
s.ownerCached = user
|
||||
s.cachedAt = time.Now()
|
||||
}
|
||||
|
||||
return s.ownerCached, nil
|
||||
}
|
||||
|
||||
// Publish adds an event to the buffer and starts/resets the debounce timer.
|
||||
// Events will be flushed to the channel after the debounce delay.
|
||||
// If the subscriber is offline, events are kept in the buffer only.
|
||||
func (s *subscriber) Publish(evt Event) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if s.closed {
|
||||
return
|
||||
}
|
||||
|
||||
s.publishLocked(evt)
|
||||
}
|
||||
|
||||
// publishLocked adds an event to the buffer and manages the debounce timer.
|
||||
// Caller must hold s.mu.
|
||||
func (s *subscriber) publishLocked(evt Event) {
|
||||
// Add event to buffer
|
||||
s.buffer = append(s.buffer, &evt)
|
||||
|
||||
// Reset or start the debounce timer
|
||||
if s.timer != nil {
|
||||
s.timer.Stop()
|
||||
}
|
||||
s.timer = time.AfterFunc(debounceDelay, s.flush)
|
||||
}
|
||||
|
||||
// flush sends all buffered events to the channel.
|
||||
// Called by the debounce timer.
|
||||
func (s *subscriber) flush() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
s.flushLocked(context.Background())
|
||||
}
|
||||
|
||||
// flushLocked sends all buffered events to the channel.
|
||||
// Caller must hold s.mu.
|
||||
func (s *subscriber) flushLocked(ctx context.Context) {
|
||||
if len(s.buffer) == 0 || s.closed {
|
||||
return
|
||||
}
|
||||
|
||||
if !s.online {
|
||||
_ = s.fsEventClient.Create(ctx, s.ownerCached.ID, uuid.FromStringOrNil(s.id), lo.Map(s.buffer, func(item *Event, index int) string {
|
||||
res, _ := json.Marshal(item)
|
||||
return string(res)
|
||||
})...)
|
||||
} else {
|
||||
// TODO: implement event merging logic here
|
||||
// For now, send all buffered events individually
|
||||
debouncedEvents := DebounceEvents(s.buffer)
|
||||
for _, evt := range debouncedEvents {
|
||||
select {
|
||||
case s.ch <- evt:
|
||||
default:
|
||||
// Non-blocking send; drop if subscriber is slow
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clear the buffer
|
||||
s.buffer = nil
|
||||
s.timer = nil
|
||||
}
|
||||
|
||||
// Stop cancels any pending debounce timer and flushes remaining events.
|
||||
// Should be called before closing the subscriber.
|
||||
func (s *subscriber) Stop() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if s.timer != nil {
|
||||
s.timer.Stop()
|
||||
s.timer = nil
|
||||
}
|
||||
|
||||
// Flush any remaining events before stopping
|
||||
s.flushLocked(context.Background())
|
||||
}
|
||||
|
||||
// setOnline marks the subscriber as online and flushes any buffered events.
|
||||
func (s *subscriber) setOnline(ctx context.Context) {
|
||||
l := logging.FromContext(ctx)
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if s.closed {
|
||||
return
|
||||
}
|
||||
|
||||
s.online = true
|
||||
s.ownerCached = nil
|
||||
s.offlineSince = time.Time{}
|
||||
|
||||
// Retrieve events from inventory
|
||||
events, err := s.fsEventClient.TakeBySubscriber(ctx, uuid.FromStringOrNil(s.id), s.uid)
|
||||
if err != nil {
|
||||
l.Error("Failed to get events from inventory: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Append events to buffer
|
||||
for _, event := range events {
|
||||
var eventParsed Event
|
||||
err := json.Unmarshal([]byte(event.Event), &eventParsed)
|
||||
if err != nil {
|
||||
l.Error("Failed to unmarshal event: %s", err)
|
||||
continue
|
||||
}
|
||||
s.buffer = append(s.buffer, &eventParsed)
|
||||
}
|
||||
|
||||
// Flush buffered events if any
|
||||
if len(s.buffer) > 0 {
|
||||
if s.timer != nil {
|
||||
s.timer.Stop()
|
||||
}
|
||||
s.timer = time.AfterFunc(debounceDelay, s.flush)
|
||||
}
|
||||
}
|
||||
|
||||
// setOffline marks the subscriber as offline.
|
||||
func (s *subscriber) setOffline() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if s.closed {
|
||||
return
|
||||
}
|
||||
|
||||
s.online = false
|
||||
s.offlineSince = time.Now()
|
||||
|
||||
// Stop the timer, events will be kept in buffer
|
||||
if s.timer != nil {
|
||||
s.timer.Stop()
|
||||
s.timer = nil
|
||||
}
|
||||
|
||||
// flush the buffer
|
||||
s.flushLocked(context.Background())
|
||||
}
|
||||
|
||||
// close permanently closes the subscriber.
|
||||
func (s *subscriber) close() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if s.closed {
|
||||
return
|
||||
}
|
||||
|
||||
s.closed = true
|
||||
if s.timer != nil {
|
||||
s.timer.Stop()
|
||||
s.timer = nil
|
||||
}
|
||||
|
||||
// Delete the FsEvent
|
||||
s.fsEventClient.DeleteBySubscriber(context.Background(), uuid.FromStringOrNil(s.id))
|
||||
|
||||
// Signal close and close the channel
|
||||
close(s.closedCh)
|
||||
close(s.ch)
|
||||
s.buffer = nil
|
||||
}
|
||||
|
||||
// isClosed returns whether the subscriber is closed.
|
||||
func (s *subscriber) isClosed() bool {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return s.closed
|
||||
}
|
||||
|
||||
// shouldExpire returns whether the subscriber should be expired (offline for too long).
|
||||
func (s *subscriber) shouldExpire() bool {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return !s.online && !s.offlineSince.IsZero() && time.Since(s.offlineSince) > offlineMaxAge
|
||||
}
|
||||
|
||||
// Buffer returns a copy of the current buffered events.
|
||||
// Useful for debugging or implementing custom merging logic.
|
||||
func (s *subscriber) Buffer() []*Event {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if len(s.buffer) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Return a copy to avoid data races
|
||||
buf := make([]*Event, len(s.buffer))
|
||||
copy(buf, s.buffer)
|
||||
return buf
|
||||
}
|
||||
|
|
@ -4,8 +4,12 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
|
@ -14,8 +18,6 @@ import (
|
|||
"github.com/cloudreve/Cloudreve/v4/inventory"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/cache"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/encrypt"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/eventhub"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/lock"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
|
||||
|
|
@ -45,7 +47,7 @@ type (
|
|||
func NewDatabaseFS(u *ent.User, fileClient inventory.FileClient, shareClient inventory.ShareClient,
|
||||
l logging.Logger, ls lock.LockSystem, settingClient setting.Provider,
|
||||
storagePolicyClient inventory.StoragePolicyClient, hasher hashid.Encoder, userClient inventory.UserClient,
|
||||
cache, stateKv cache.Driver, directLinkClient inventory.DirectLinkClient, encryptorFactory encrypt.CryptorFactory, eventHub eventhub.EventHub) fs.FileSystem {
|
||||
cache, stateKv cache.Driver, directLinkClient inventory.DirectLinkClient) fs.FileSystem {
|
||||
return &DBFS{
|
||||
user: u,
|
||||
navigators: make(map[string]Navigator),
|
||||
|
|
@ -60,8 +62,6 @@ func NewDatabaseFS(u *ent.User, fileClient inventory.FileClient, shareClient inv
|
|||
cache: cache,
|
||||
stateKv: stateKv,
|
||||
directLinkClient: directLinkClient,
|
||||
encryptorFactory: encryptorFactory,
|
||||
eventHub: eventHub,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -80,8 +80,6 @@ type DBFS struct {
|
|||
cache cache.Driver
|
||||
stateKv cache.Driver
|
||||
mu sync.Mutex
|
||||
encryptorFactory encrypt.CryptorFactory
|
||||
eventHub eventhub.EventHub
|
||||
}
|
||||
|
||||
func (f *DBFS) Recycle() {
|
||||
|
|
@ -125,7 +123,7 @@ func (f *DBFS) List(ctx context.Context, path *fs.URI, opts ...fs.Option) (fs.Fi
|
|||
|
||||
parent, err := f.getFileByPath(ctx, navigator, path)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("parent not exist: %w", err)
|
||||
return nil, nil, fmt.Errorf("Parent not exist: %w", err)
|
||||
}
|
||||
|
||||
pageSize := 0
|
||||
|
|
@ -289,7 +287,6 @@ func (f *DBFS) CreateEntity(ctx context.Context, file fs.File, policy *ent.Stora
|
|||
Source: req.Props.SavePath,
|
||||
Size: req.Props.Size,
|
||||
UploadSessionID: uuid.FromStringOrNil(o.UploadRequest.Props.UploadSessionID),
|
||||
EncryptMetadata: o.encryptMetadata,
|
||||
})
|
||||
if err != nil {
|
||||
_ = inventory.Rollback(tx)
|
||||
|
|
@ -620,7 +617,6 @@ func (f *DBFS) createFile(ctx context.Context, parent *File, name string, fileTy
|
|||
ModifiedAt: o.UploadRequest.Props.LastModified,
|
||||
UploadSessionID: uuid.FromStringOrNil(o.UploadRequest.Props.UploadSessionID),
|
||||
Importing: o.UploadRequest.ImportFrom != nil,
|
||||
EncryptMetadata: o.encryptMetadata,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -646,23 +642,7 @@ func (f *DBFS) createFile(ctx context.Context, parent *File, name string, fileTy
|
|||
}
|
||||
|
||||
file.SetEntities([]*ent.Entity{entity})
|
||||
newFile := newFile(parent, file)
|
||||
f.emitFileCreated(ctx, newFile)
|
||||
return newFile, nil
|
||||
}
|
||||
|
||||
func (f *DBFS) generateEncryptMetadata(ctx context.Context, uploadRequest *fs.UploadRequest, policy *ent.StoragePolicy) (*types.EncryptMetadata, error) {
|
||||
relayEnabled := policy.Settings != nil && policy.Settings.Relay
|
||||
if (len(uploadRequest.Props.EncryptionSupported) > 0 && uploadRequest.Props.EncryptionSupported[0] == types.CipherAES256CTR) || relayEnabled {
|
||||
encryptor, err := f.encryptorFactory(types.CipherAES256CTR)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get encryptor: %w", err)
|
||||
}
|
||||
|
||||
return encryptor.GenerateMetadata(ctx)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
return newFile(parent, file), nil
|
||||
}
|
||||
|
||||
// getPreferredPolicy tries to get the preferred storage policy for the given file.
|
||||
|
|
@ -788,16 +768,71 @@ func (f *DBFS) navigatorId(path *fs.URI) string {
|
|||
// generateSavePath generates the physical save path for the upload request.
|
||||
func generateSavePath(policy *ent.StoragePolicy, req *fs.UploadRequest, user *ent.User) string {
|
||||
currentTime := time.Now()
|
||||
dynamicReplace := func(rule string, pathAvailable bool) string {
|
||||
return util.ReplaceMagicVar(rule, fs.Separator, pathAvailable, false, currentTime, user.ID, req.Props.Uri.Name(), req.Props.Uri.Dir(), "")
|
||||
originName := req.Props.Uri.Name()
|
||||
|
||||
dynamicReplace := func(regPattern string, rule string, pathAvailable bool) string {
|
||||
re := regexp.MustCompile(regPattern)
|
||||
return re.ReplaceAllStringFunc(rule, func(match string) string {
|
||||
switch match {
|
||||
case "{timestamp}":
|
||||
return strconv.FormatInt(currentTime.Unix(), 10)
|
||||
case "{timestamp_nano}":
|
||||
return strconv.FormatInt(currentTime.UnixNano(), 10)
|
||||
case "{datetime}":
|
||||
return currentTime.Format("20060102150405")
|
||||
case "{date}":
|
||||
return currentTime.Format("20060102")
|
||||
case "{year}":
|
||||
return currentTime.Format("2006")
|
||||
case "{month}":
|
||||
return currentTime.Format("01")
|
||||
case "{day}":
|
||||
return currentTime.Format("02")
|
||||
case "{hour}":
|
||||
return currentTime.Format("15")
|
||||
case "{minute}":
|
||||
return currentTime.Format("04")
|
||||
case "{second}":
|
||||
return currentTime.Format("05")
|
||||
case "{uid}":
|
||||
return strconv.Itoa(user.ID)
|
||||
case "{randomkey16}":
|
||||
return util.RandStringRunes(16)
|
||||
case "{randomkey8}":
|
||||
return util.RandStringRunes(8)
|
||||
case "{randomnum8}":
|
||||
return strconv.Itoa(rand.Intn(8))
|
||||
case "{randomnum4}":
|
||||
return strconv.Itoa(rand.Intn(4))
|
||||
case "{randomnum3}":
|
||||
return strconv.Itoa(rand.Intn(3))
|
||||
case "{randomnum2}":
|
||||
return strconv.Itoa(rand.Intn(2))
|
||||
case "{uuid}":
|
||||
return uuid.Must(uuid.NewV4()).String()
|
||||
case "{path}":
|
||||
if pathAvailable {
|
||||
return req.Props.Uri.Dir() + fs.Separator
|
||||
}
|
||||
return match
|
||||
case "{originname}":
|
||||
return originName
|
||||
case "{ext}":
|
||||
return filepath.Ext(originName)
|
||||
case "{originname_without_ext}":
|
||||
return strings.TrimSuffix(originName, filepath.Ext(originName))
|
||||
default:
|
||||
return match
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
dirRule := policy.DirNameRule
|
||||
dirRule = filepath.ToSlash(dirRule)
|
||||
dirRule = dynamicReplace(dirRule, true)
|
||||
dirRule = dynamicReplace(`\{[^{}]+\}`, dirRule, true)
|
||||
|
||||
nameRule := policy.FileNameRule
|
||||
nameRule = dynamicReplace(nameRule, false)
|
||||
nameRule = dynamicReplace(`\{[^{}]+\}`, nameRule, false)
|
||||
|
||||
return path.Join(path.Clean(dirRule), nameRule)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,150 +0,0 @@
|
|||
package dbfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/auth/requestinfo"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/eventhub"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
func (f *DBFS) emitFileCreated(ctx context.Context, file *File) {
|
||||
subscribers := f.getEligibleSubscriber(ctx, file, true)
|
||||
for _, subscriber := range subscribers {
|
||||
subscriber.Publish(eventhub.Event{
|
||||
Type: eventhub.EventTypeCreate,
|
||||
FileID: hashid.EncodeFileID(f.hasher, file.Model.ID),
|
||||
From: subscriber.relativePath(file),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (f *DBFS) emitFileModified(ctx context.Context, file *File) {
|
||||
subscribers := f.getEligibleSubscriber(ctx, file, true)
|
||||
for _, subscriber := range subscribers {
|
||||
subscriber.Publish(eventhub.Event{
|
||||
Type: eventhub.EventTypeModify,
|
||||
FileID: hashid.EncodeFileID(f.hasher, file.Model.ID),
|
||||
From: subscriber.relativePath(file),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (f *DBFS) emitFileRenamed(ctx context.Context, file *File, newName string) {
|
||||
subscribers := f.getEligibleSubscriber(ctx, file, true)
|
||||
for _, subscriber := range subscribers {
|
||||
from := subscriber.relativePath(file)
|
||||
to := strings.TrimSuffix(from, file.Name()) + newName
|
||||
subscriber.Publish(eventhub.Event{
|
||||
Type: eventhub.EventTypeRename,
|
||||
FileID: hashid.EncodeFileID(f.hasher, file.Model.ID),
|
||||
From: subscriber.relativePath(file),
|
||||
To: to,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (f *DBFS) emitFileDeleted(ctx context.Context, files ...*File) {
|
||||
for _, file := range files {
|
||||
subscribers := f.getEligibleSubscriber(ctx, file, true)
|
||||
for _, subscriber := range subscribers {
|
||||
subscriber.Publish(eventhub.Event{
|
||||
Type: eventhub.EventTypeDelete,
|
||||
FileID: hashid.EncodeFileID(f.hasher, file.Model.ID),
|
||||
From: subscriber.relativePath(file),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *DBFS) emitFileMoved(ctx context.Context, src, dst *File) {
|
||||
srcSubMap := lo.SliceToMap(f.getEligibleSubscriber(ctx, src, true), func(subscriber foundSubscriber) (string, *foundSubscriber) {
|
||||
return subscriber.ID(), &subscriber
|
||||
})
|
||||
dstSubMap := lo.SliceToMap(f.getEligibleSubscriber(ctx, dst, false), func(subscriber foundSubscriber) (string, *foundSubscriber) {
|
||||
return subscriber.ID(), &subscriber
|
||||
})
|
||||
|
||||
for _, subscriber := range srcSubMap {
|
||||
subId := subscriber.ID()
|
||||
if dstSub, ok := dstSubMap[subId]; ok {
|
||||
// Src and Dst subscribed by the same subscriber
|
||||
subscriber.Publish(eventhub.Event{
|
||||
Type: eventhub.EventTypeRename,
|
||||
FileID: hashid.EncodeFileID(f.hasher, src.Model.ID),
|
||||
From: subscriber.relativePath(src),
|
||||
To: path.Join(dstSub.relativePath(dst), src.Name()),
|
||||
})
|
||||
delete(dstSubMap, subId)
|
||||
} else {
|
||||
// Only Src is subscribed by the subscriber
|
||||
subscriber.Publish(eventhub.Event{
|
||||
Type: eventhub.EventTypeDelete,
|
||||
FileID: hashid.EncodeFileID(f.hasher, src.Model.ID),
|
||||
From: subscriber.relativePath(src),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
for _, subscriber := range dstSubMap {
|
||||
// Only Dst is subscribed by the subscriber
|
||||
subscriber.Publish(eventhub.Event{
|
||||
Type: eventhub.EventTypeCreate,
|
||||
FileID: hashid.EncodeFileID(f.hasher, src.Model.ID),
|
||||
From: path.Join(subscriber.relativePath(dst), src.Name()),
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (f *DBFS) getEligibleSubscriber(ctx context.Context, file *File, checkParentPerm bool) []foundSubscriber {
|
||||
roots := file.Ancestors()
|
||||
if !checkParentPerm {
|
||||
// Include file itself
|
||||
roots = file.AncestorsChain()
|
||||
}
|
||||
requestInfo := requestinfo.RequestInfoFromContext(ctx)
|
||||
eligibleSubscribers := make([]foundSubscriber, 0)
|
||||
|
||||
for _, root := range roots {
|
||||
subscribers := f.eventHub.GetSubscribers(ctx, root.Model.ID)
|
||||
subscribers = lo.Filter(subscribers, func(subscriber eventhub.Subscriber, index int) bool {
|
||||
// Exlucde self from subscribers
|
||||
if requestInfo != nil && subscriber.ID() == requestInfo.ClientID {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
eligibleSubscribers = append(eligibleSubscribers, lo.Map(subscribers, func(subscriber eventhub.Subscriber, index int) foundSubscriber {
|
||||
return foundSubscriber{
|
||||
Subscriber: subscriber,
|
||||
root: root,
|
||||
}
|
||||
})...)
|
||||
}
|
||||
|
||||
return eligibleSubscribers
|
||||
|
||||
}
|
||||
|
||||
type foundSubscriber struct {
|
||||
eventhub.Subscriber
|
||||
root *File
|
||||
}
|
||||
|
||||
func (s *foundSubscriber) relativePath(file *File) string {
|
||||
res := strings.TrimPrefix(file.Uri(true).Path(), s.root.Uri(true).Path())
|
||||
if res == "" {
|
||||
res = fs.Separator
|
||||
}
|
||||
|
||||
if res[0] != fs.Separator[0] {
|
||||
res = fs.Separator + res
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
|
@ -119,7 +119,6 @@ func (f *DBFS) Create(ctx context.Context, path *fs.URI, fileType types.FileType
|
|||
}
|
||||
|
||||
ancestor = newFile(ancestor, newFolder)
|
||||
f.emitFileCreated(ctx, ancestor)
|
||||
} else {
|
||||
// valide file name
|
||||
policy, err := f.getPreferredPolicy(ctx, ancestor)
|
||||
|
|
@ -226,8 +225,6 @@ func (f *DBFS) Rename(ctx context.Context, path *fs.URI, newName string) (fs.Fil
|
|||
return nil, serializer.NewError(serializer.CodeDBError, "Failed to commit rename change", err)
|
||||
}
|
||||
|
||||
f.emitFileRenamed(ctx, target, newName)
|
||||
|
||||
return target.Replace(updated), nil
|
||||
}
|
||||
|
||||
|
|
@ -306,8 +303,6 @@ func (f *DBFS) SoftDelete(ctx context.Context, path ...*fs.URI) error {
|
|||
return serializer.NewError(serializer.CodeDBError, "Failed to commit soft-delete change", err)
|
||||
}
|
||||
|
||||
f.emitFileDeleted(ctx, targets...)
|
||||
|
||||
return ae.Aggregate()
|
||||
}
|
||||
|
||||
|
|
@ -317,9 +312,9 @@ func (f *DBFS) Delete(ctx context.Context, path []*fs.URI, opts ...fs.Option) ([
|
|||
o.apply(opt)
|
||||
}
|
||||
|
||||
var opt *types.EntityProps
|
||||
var opt *types.EntityRecycleOption
|
||||
if o.UnlinkOnly {
|
||||
opt = &types.EntityProps{
|
||||
opt = &types.EntityRecycleOption{
|
||||
UnlinkOnly: true,
|
||||
}
|
||||
}
|
||||
|
|
@ -390,7 +385,7 @@ func (f *DBFS) Delete(ctx context.Context, path []*fs.URI, opts ...fs.Option) ([
|
|||
if err := inventory.CommitWithStorageDiff(ctx, tx, f.l, f.userClient); err != nil {
|
||||
return nil, serializer.NewError(serializer.CodeDBError, "Failed to commit delete change", err)
|
||||
}
|
||||
f.emitFileDeleted(ctx, targets...)
|
||||
|
||||
return newStaleEntities, ae.Aggregate()
|
||||
}
|
||||
|
||||
|
|
@ -608,11 +603,10 @@ func (f *DBFS) MoveOrCopy(ctx context.Context, path []*fs.URI, dst *fs.URI, isCo
|
|||
}
|
||||
|
||||
var (
|
||||
copiedNewTargetsMap map[int]*ent.File
|
||||
storageDiff inventory.StorageDiff
|
||||
storageDiff inventory.StorageDiff
|
||||
)
|
||||
if isCopy {
|
||||
copiedNewTargetsMap, storageDiff, err = f.copyFiles(ctx, fileNavGroup, destination, fc)
|
||||
_, storageDiff, err = f.copyFiles(ctx, fileNavGroup, destination, fc)
|
||||
} else {
|
||||
storageDiff, err = f.moveFiles(ctx, targets, destination, fc, dstNavigator)
|
||||
}
|
||||
|
|
@ -627,14 +621,6 @@ func (f *DBFS) MoveOrCopy(ctx context.Context, path []*fs.URI, dst *fs.URI, isCo
|
|||
return serializer.NewError(serializer.CodeDBError, "Failed to commit move change", err)
|
||||
}
|
||||
|
||||
for _, target := range targets {
|
||||
if isCopy {
|
||||
f.emitFileCreated(ctx, newFile(destination, copiedNewTargetsMap[target.ID()]))
|
||||
} else {
|
||||
f.emitFileMoved(ctx, target, destination)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: after move, dbfs cache should be cleared
|
||||
}
|
||||
|
||||
|
|
@ -730,8 +716,6 @@ func (f *DBFS) deleteEntity(ctx context.Context, target *File, entityId int) (in
|
|||
return nil, serializer.NewError(serializer.CodeDBError, "Failed to remove upload session metadata", err)
|
||||
}
|
||||
}
|
||||
|
||||
f.emitFileModified(ctx, target)
|
||||
return diff, nil
|
||||
}
|
||||
|
||||
|
|
@ -769,14 +753,14 @@ func (f *DBFS) setCurrentVersion(ctx context.Context, target *File, versionId in
|
|||
return serializer.NewError(serializer.CodeDBError, "Failed to commit set current version", err)
|
||||
}
|
||||
|
||||
f.emitFileModified(ctx, target)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *DBFS) deleteFiles(ctx context.Context, targets map[Navigator][]*File, fc inventory.FileClient, opt *types.EntityProps) ([]fs.Entity, inventory.StorageDiff, error) {
|
||||
func (f *DBFS) deleteFiles(ctx context.Context, targets map[Navigator][]*File, fc inventory.FileClient, opt *types.EntityRecycleOption) ([]fs.Entity, inventory.StorageDiff, error) {
|
||||
if f.user.Edges.Group == nil {
|
||||
return nil, nil, fmt.Errorf("user group not loaded")
|
||||
}
|
||||
limit := max(f.user.Edges.Group.Settings.MaxWalkedFiles, 1)
|
||||
allStaleEntities := make([]fs.Entity, 0, len(targets))
|
||||
storageDiff := make(inventory.StorageDiff)
|
||||
for n, files := range targets {
|
||||
|
|
@ -790,7 +774,8 @@ func (f *DBFS) deleteFiles(ctx context.Context, targets map[Navigator][]*File, f
|
|||
|
||||
// List all files to be deleted
|
||||
toBeDeletedFiles := make([]*File, 0, len(files))
|
||||
if err := n.Walk(ctx, files, intsets.MaxInt, intsets.MaxInt, func(targets []*File, level int) error {
|
||||
if err := n.Walk(ctx, files, limit, intsets.MaxInt, func(targets []*File, level int) error {
|
||||
limit -= len(targets)
|
||||
toBeDeletedFiles = append(toBeDeletedFiles, targets...)
|
||||
return nil
|
||||
}); err != nil {
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@ package dbfs
|
|||
|
||||
import (
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
)
|
||||
|
||||
|
|
@ -27,7 +26,6 @@ type dbfsOption struct {
|
|||
streamListResponseCallback func(parent fs.File, file []fs.File)
|
||||
ancestor *File
|
||||
notRoot bool
|
||||
encryptMetadata *types.EncryptMetadata
|
||||
}
|
||||
|
||||
func newDbfsOption() *dbfsOption {
|
||||
|
|
@ -52,13 +50,6 @@ func (f optionFunc) Apply(o any) {
|
|||
}
|
||||
}
|
||||
|
||||
// WithEncryptMetadata sets the encrypt metadata for the upload operation.
|
||||
func WithEncryptMetadata(encryptMetadata *types.EncryptMetadata) fs.Option {
|
||||
return optionFunc(func(o *dbfsOption) {
|
||||
o.encryptMetadata = encryptMetadata
|
||||
})
|
||||
}
|
||||
|
||||
// WithFilePublicMetadata enables loading file public metadata.
|
||||
func WithFilePublicMetadata() fs.Option {
|
||||
return optionFunc(func(o *dbfsOption) {
|
||||
|
|
|
|||
|
|
@ -129,20 +129,6 @@ func (f *DBFS) PrepareUpload(ctx context.Context, req *fs.UploadRequest, opts ..
|
|||
return nil, err
|
||||
}
|
||||
|
||||
// Encryption setting
|
||||
var (
|
||||
encryptMetadata *types.EncryptMetadata
|
||||
)
|
||||
if !policy.Settings.Encryption || req.ImportFrom != nil || len(req.Props.EncryptionSupported) == 0 {
|
||||
req.Props.EncryptionSupported = nil
|
||||
} else {
|
||||
res, err := f.generateEncryptMetadata(ctx, req, policy)
|
||||
if err != nil {
|
||||
return nil, serializer.NewError(serializer.CodeInternalSetting, "Failed to generate encrypt metadata", err)
|
||||
}
|
||||
encryptMetadata = res
|
||||
}
|
||||
|
||||
// validate upload request
|
||||
if err := validateNewFile(req.Props.Uri.Name(), req.Props.Size, policy); err != nil {
|
||||
return nil, err
|
||||
|
|
@ -160,7 +146,11 @@ func (f *DBFS) PrepareUpload(ctx context.Context, req *fs.UploadRequest, opts ..
|
|||
if req.Props.SavePath == "" || isThumbnailAndPolicyNotAvailable {
|
||||
req.Props.SavePath = generateSavePath(policy, req, f.user)
|
||||
if isThumbnailAndPolicyNotAvailable {
|
||||
req.Props.SavePath = path.Clean(util.ReplaceMagicVar(f.settingClient.ThumbEntitySuffix(ctx), fs.Separator, true, true, time.Now(), f.user.ID, req.Props.Uri.Name(), req.Props.Uri.Path(), req.Props.SavePath))
|
||||
req.Props.SavePath = fmt.Sprintf(
|
||||
"%s.%s%s",
|
||||
req.Props.SavePath,
|
||||
util.RandStringRunes(16),
|
||||
f.settingClient.ThumbEntitySuffix(ctx))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -184,7 +174,6 @@ func (f *DBFS) PrepareUpload(ctx context.Context, req *fs.UploadRequest, opts ..
|
|||
entity, err := f.CreateEntity(ctx, ancestor, policy, entityType, req,
|
||||
WithPreviousVersion(req.Props.PreviousVersion),
|
||||
fs.WithUploadRequest(req),
|
||||
WithEncryptMetadata(encryptMetadata),
|
||||
WithRemoveStaleEntities(),
|
||||
)
|
||||
if err != nil {
|
||||
|
|
@ -200,7 +189,6 @@ func (f *DBFS) PrepareUpload(ctx context.Context, req *fs.UploadRequest, opts ..
|
|||
WithPreferredStoragePolicy(policy),
|
||||
WithErrorOnConflict(),
|
||||
WithAncestor(ancestor),
|
||||
WithEncryptMetadata(encryptMetadata),
|
||||
)
|
||||
if err != nil {
|
||||
_ = inventory.Rollback(dbTx)
|
||||
|
|
@ -231,15 +219,14 @@ func (f *DBFS) PrepareUpload(ctx context.Context, req *fs.UploadRequest, opts ..
|
|||
|
||||
session := &fs.UploadSession{
|
||||
Props: &fs.UploadProps{
|
||||
Uri: req.Props.Uri,
|
||||
Size: req.Props.Size,
|
||||
SavePath: req.Props.SavePath,
|
||||
LastModified: req.Props.LastModified,
|
||||
UploadSessionID: req.Props.UploadSessionID,
|
||||
ExpireAt: req.Props.ExpireAt,
|
||||
EntityType: req.Props.EntityType,
|
||||
Metadata: req.Props.Metadata,
|
||||
ClientSideEncrypted: req.Props.ClientSideEncrypted,
|
||||
Uri: req.Props.Uri,
|
||||
Size: req.Props.Size,
|
||||
SavePath: req.Props.SavePath,
|
||||
LastModified: req.Props.LastModified,
|
||||
UploadSessionID: req.Props.UploadSessionID,
|
||||
ExpireAt: req.Props.ExpireAt,
|
||||
EntityType: req.Props.EntityType,
|
||||
Metadata: req.Props.Metadata,
|
||||
},
|
||||
FileID: fileId,
|
||||
NewFileCreated: !fileExisted,
|
||||
|
|
@ -251,10 +238,6 @@ func (f *DBFS) PrepareUpload(ctx context.Context, req *fs.UploadRequest, opts ..
|
|||
LockToken: lockToken, // Prevent lock being released.
|
||||
}
|
||||
|
||||
if encryptMetadata != nil {
|
||||
session.EncryptMetadata = encryptMetadata
|
||||
}
|
||||
|
||||
// TODO: frontend should create new upload session if resumed session does not exist.
|
||||
return session, nil
|
||||
}
|
||||
|
|
@ -355,8 +338,6 @@ func (f *DBFS) CompleteUpload(ctx context.Context, session *fs.UploadSession) (f
|
|||
}
|
||||
}
|
||||
|
||||
f.emitFileModified(ctx, filePrivate)
|
||||
|
||||
file, err = f.Get(ctx, session.Props.Uri, WithFileEntities(), WithNotRoot())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get updated file: %w", err)
|
||||
|
|
|
|||
|
|
@ -183,8 +183,6 @@ type (
|
|||
UploadSessionID() *uuid.UUID
|
||||
CreatedBy() *ent.User
|
||||
Model() *ent.Entity
|
||||
Props() *types.EntityProps
|
||||
Encrypted() bool
|
||||
}
|
||||
|
||||
FileExtendedInfo struct {
|
||||
|
|
@ -240,40 +238,38 @@ type (
|
|||
|
||||
// UploadCredential for uploading files in client side.
|
||||
UploadCredential struct {
|
||||
SessionID string `json:"session_id"`
|
||||
ChunkSize int64 `json:"chunk_size"` // 分块大小,0 为部分快
|
||||
Expires int64 `json:"expires"` // 上传凭证过期时间, Unix 时间戳
|
||||
UploadURLs []string `json:"upload_urls,omitempty"`
|
||||
Credential string `json:"credential,omitempty"`
|
||||
UploadID string `json:"uploadID,omitempty"`
|
||||
Callback string `json:"callback,omitempty"`
|
||||
Uri string `json:"uri,omitempty"` // 存储路径
|
||||
AccessKey string `json:"ak,omitempty"`
|
||||
KeyTime string `json:"keyTime,omitempty"` // COS用有效期
|
||||
CompleteURL string `json:"completeURL,omitempty"`
|
||||
StoragePolicy *ent.StoragePolicy
|
||||
CallbackSecret string `json:"callback_secret,omitempty"`
|
||||
MimeType string `json:"mime_type,omitempty"` // Expected mimetype
|
||||
UploadPolicy string `json:"upload_policy,omitempty"` // Upyun upload policy
|
||||
EncryptMetadata *types.EncryptMetadata `json:"encrypt_metadata,omitempty"`
|
||||
SessionID string `json:"session_id"`
|
||||
ChunkSize int64 `json:"chunk_size"` // 分块大小,0 为部分快
|
||||
Expires int64 `json:"expires"` // 上传凭证过期时间, Unix 时间戳
|
||||
UploadURLs []string `json:"upload_urls,omitempty"`
|
||||
Credential string `json:"credential,omitempty"`
|
||||
UploadID string `json:"uploadID,omitempty"`
|
||||
Callback string `json:"callback,omitempty"` // 回调地址
|
||||
Uri string `json:"uri,omitempty"` // 存储路径
|
||||
AccessKey string `json:"ak,omitempty"`
|
||||
KeyTime string `json:"keyTime,omitempty"` // COS用有效期
|
||||
CompleteURL string `json:"completeURL,omitempty"`
|
||||
StoragePolicy *ent.StoragePolicy
|
||||
CallbackSecret string `json:"callback_secret,omitempty"`
|
||||
MimeType string `json:"mime_type,omitempty"` // Expected mimetype
|
||||
UploadPolicy string `json:"upload_policy,omitempty"` // Upyun upload policy
|
||||
}
|
||||
|
||||
// UploadSession stores the information of an upload session, used in server side.
|
||||
UploadSession struct {
|
||||
UID int // 发起者
|
||||
Policy *ent.StoragePolicy
|
||||
FileID int // ID of the placeholder file
|
||||
EntityID int // ID of the new entity
|
||||
Callback string // 回调 URL 地址
|
||||
CallbackSecret string // Callback secret
|
||||
UploadID string // Multi-part upload ID
|
||||
UploadURL string
|
||||
Credential string
|
||||
ChunkSize int64
|
||||
SentinelTaskID int
|
||||
NewFileCreated bool // If new file is created for this session
|
||||
Importing bool // If the upload is importing from another file
|
||||
EncryptMetadata *types.EncryptMetadata
|
||||
UID int // 发起者
|
||||
Policy *ent.StoragePolicy
|
||||
FileID int // ID of the placeholder file
|
||||
EntityID int // ID of the new entity
|
||||
Callback string // 回调 URL 地址
|
||||
CallbackSecret string // Callback secret
|
||||
UploadID string // Multi-part upload ID
|
||||
UploadURL string
|
||||
Credential string
|
||||
ChunkSize int64
|
||||
SentinelTaskID int
|
||||
NewFileCreated bool // If new file is created for this session
|
||||
Importing bool // If the upload is importing from another file
|
||||
|
||||
LockToken string // Token of the locked placeholder file
|
||||
Props *UploadProps
|
||||
|
|
@ -292,10 +288,8 @@ type (
|
|||
PreviousVersion string
|
||||
// EntityType is the type of the entity to be created. If not set, a new file will be created
|
||||
// with a default version entity. This will be set in update request for existing files.
|
||||
EntityType *types.EntityType
|
||||
ExpireAt time.Time
|
||||
EncryptionSupported []types.Cipher
|
||||
ClientSideEncrypted bool // Whether the file stream is already encrypted by client side.
|
||||
EntityType *types.EntityType
|
||||
ExpireAt time.Time
|
||||
}
|
||||
|
||||
// FsOption options for underlying file system.
|
||||
|
|
@ -705,8 +699,6 @@ func LockSessionToContext(ctx context.Context, session LockSession) context.Cont
|
|||
return context.WithValue(ctx, LockSessionCtxKey{}, session)
|
||||
}
|
||||
|
||||
// FindDesiredEntity finds the desired entity from the file.
|
||||
// entityType is optional, if it is not nil, it will only return the entity with the given type.
|
||||
func FindDesiredEntity(file File, version string, hasher hashid.Encoder, entityType *types.EntityType) (bool, Entity) {
|
||||
if version == "" {
|
||||
return true, file.PrimaryEntity()
|
||||
|
|
@ -788,14 +780,6 @@ func (e *DbEntity) Model() *ent.Entity {
|
|||
return e.model
|
||||
}
|
||||
|
||||
func (e *DbEntity) Props() *types.EntityProps {
|
||||
return e.model.Props
|
||||
}
|
||||
|
||||
func (e *DbEntity) Encrypted() bool {
|
||||
return e.model.Props != nil && e.model.Props.EncryptMetadata != nil
|
||||
}
|
||||
|
||||
func NewEmptyEntity(u *ent.User) Entity {
|
||||
return &DbEntity{
|
||||
model: &ent.Entity{
|
||||
|
|
|
|||
|
|
@ -36,11 +36,5 @@ func (d *mimeDetector) TypeByName(p string) string {
|
|||
return m
|
||||
}
|
||||
|
||||
m := mime.TypeByExtension(ext)
|
||||
if m != "" {
|
||||
return m
|
||||
}
|
||||
|
||||
// Fallback
|
||||
return "application/octet-stream"
|
||||
return mime.TypeByExtension(ext)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,153 +3,19 @@ package manager
|
|||
import (
|
||||
"archive/zip"
|
||||
"context"
|
||||
"encoding/gob"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/bodgit/sevenzip"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/dbfs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/manager/entitysource"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/util"
|
||||
"golang.org/x/text/encoding"
|
||||
"golang.org/x/text/encoding/charmap"
|
||||
"golang.org/x/text/encoding/japanese"
|
||||
"golang.org/x/text/encoding/korean"
|
||||
"golang.org/x/text/encoding/simplifiedchinese"
|
||||
"golang.org/x/text/encoding/traditionalchinese"
|
||||
"golang.org/x/text/encoding/unicode"
|
||||
"golang.org/x/tools/container/intsets"
|
||||
)
|
||||
|
||||
type (
|
||||
ArchivedFile struct {
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
UpdatedAt *time.Time `json:"updated_at"`
|
||||
IsDirectory bool `json:"is_directory"`
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
ArchiveListCacheTTL = 3600 // 1 hour
|
||||
)
|
||||
|
||||
func init() {
|
||||
gob.Register([]ArchivedFile{})
|
||||
}
|
||||
|
||||
var ZipEncodings = map[string]encoding.Encoding{
|
||||
"ibm866": charmap.CodePage866,
|
||||
"iso8859_2": charmap.ISO8859_2,
|
||||
"iso8859_3": charmap.ISO8859_3,
|
||||
"iso8859_4": charmap.ISO8859_4,
|
||||
"iso8859_5": charmap.ISO8859_5,
|
||||
"iso8859_6": charmap.ISO8859_6,
|
||||
"iso8859_7": charmap.ISO8859_7,
|
||||
"iso8859_8": charmap.ISO8859_8,
|
||||
"iso8859_8I": charmap.ISO8859_8I,
|
||||
"iso8859_10": charmap.ISO8859_10,
|
||||
"iso8859_13": charmap.ISO8859_13,
|
||||
"iso8859_14": charmap.ISO8859_14,
|
||||
"iso8859_15": charmap.ISO8859_15,
|
||||
"iso8859_16": charmap.ISO8859_16,
|
||||
"koi8r": charmap.KOI8R,
|
||||
"koi8u": charmap.KOI8U,
|
||||
"macintosh": charmap.Macintosh,
|
||||
"windows874": charmap.Windows874,
|
||||
"windows1250": charmap.Windows1250,
|
||||
"windows1251": charmap.Windows1251,
|
||||
"windows1252": charmap.Windows1252,
|
||||
"windows1253": charmap.Windows1253,
|
||||
"windows1254": charmap.Windows1254,
|
||||
"windows1255": charmap.Windows1255,
|
||||
"windows1256": charmap.Windows1256,
|
||||
"windows1257": charmap.Windows1257,
|
||||
"windows1258": charmap.Windows1258,
|
||||
"macintoshcyrillic": charmap.MacintoshCyrillic,
|
||||
"gbk": simplifiedchinese.GBK,
|
||||
"gb18030": simplifiedchinese.GB18030,
|
||||
"big5": traditionalchinese.Big5,
|
||||
"eucjp": japanese.EUCJP,
|
||||
"iso2022jp": japanese.ISO2022JP,
|
||||
"shiftjis": japanese.ShiftJIS,
|
||||
"euckr": korean.EUCKR,
|
||||
"utf16be": unicode.UTF16(unicode.BigEndian, unicode.IgnoreBOM),
|
||||
"utf16le": unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM),
|
||||
}
|
||||
|
||||
func (m *manager) ListArchiveFiles(ctx context.Context, uri *fs.URI, entity, zipEncoding string) ([]ArchivedFile, error) {
|
||||
file, err := m.fs.Get(ctx, uri, dbfs.WithFileEntities(), dbfs.WithRequiredCapabilities(dbfs.NavigatorCapabilityDownloadFile))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get file: %w", err)
|
||||
}
|
||||
|
||||
if file.Type() != types.FileTypeFile {
|
||||
return nil, fs.ErrNotSupportedAction.WithError(fmt.Errorf("path %s is not a file", uri))
|
||||
}
|
||||
|
||||
// Validate file size
|
||||
if m.user.Edges.Group.Settings.DecompressSize > 0 && file.Size() > m.user.Edges.Group.Settings.DecompressSize {
|
||||
return nil, fs.ErrFileSizeTooBig.WithError(fmt.Errorf("file size %d exceeds the limit %d", file.Size(), m.user.Edges.Group.Settings.DecompressSize))
|
||||
}
|
||||
|
||||
found, targetEntity := fs.FindDesiredEntity(file, entity, m.hasher, nil)
|
||||
if !found {
|
||||
return nil, fs.ErrEntityNotExist
|
||||
}
|
||||
|
||||
var (
|
||||
enc encoding.Encoding
|
||||
ok bool
|
||||
)
|
||||
if zipEncoding != "" {
|
||||
enc, ok = ZipEncodings[strings.ToLower(zipEncoding)]
|
||||
if !ok {
|
||||
return nil, fs.ErrNotSupportedAction.WithError(fmt.Errorf("not supported zip encoding: %s", zipEncoding))
|
||||
}
|
||||
}
|
||||
|
||||
cacheKey := getArchiveListCacheKey(targetEntity.ID(), zipEncoding)
|
||||
kv := m.kv
|
||||
res, found := kv.Get(cacheKey)
|
||||
if found {
|
||||
return res.([]ArchivedFile), nil
|
||||
}
|
||||
|
||||
es, err := m.GetEntitySource(ctx, 0, fs.WithEntity(targetEntity))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get entity source: %w", err)
|
||||
}
|
||||
|
||||
es.Apply(entitysource.WithContext(ctx))
|
||||
defer es.Close()
|
||||
|
||||
var readerFunc func(ctx context.Context, file io.ReaderAt, size int64, textEncoding encoding.Encoding) ([]ArchivedFile, error)
|
||||
switch file.Ext() {
|
||||
case "zip":
|
||||
readerFunc = getZipFileList
|
||||
case "7z":
|
||||
readerFunc = get7zFileList
|
||||
default:
|
||||
return nil, fs.ErrNotSupportedAction.WithError(fmt.Errorf("not supported archive format: %s", file.Ext()))
|
||||
}
|
||||
|
||||
sr := io.NewSectionReader(es, 0, targetEntity.Size())
|
||||
fileList, err := readerFunc(ctx, sr, targetEntity.Size(), enc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read file list: %w", err)
|
||||
}
|
||||
|
||||
kv.Set(cacheKey, fileList, ArchiveListCacheTTL)
|
||||
return fileList, nil
|
||||
}
|
||||
|
||||
func (m *manager) CreateArchive(ctx context.Context, uris []*fs.URI, writer io.Writer, opts ...fs.Option) (int, error) {
|
||||
o := newOption()
|
||||
for _, opt := range opts {
|
||||
|
|
@ -256,62 +122,3 @@ func (m *manager) compressFileToArchive(ctx context.Context, parent string, file
|
|||
return err
|
||||
|
||||
}
|
||||
|
||||
func getZipFileList(ctx context.Context, file io.ReaderAt, size int64, textEncoding encoding.Encoding) ([]ArchivedFile, error) {
|
||||
zr, err := zip.NewReader(file, size)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create zip reader: %w", err)
|
||||
}
|
||||
|
||||
fileList := make([]ArchivedFile, 0, len(zr.File))
|
||||
for _, f := range zr.File {
|
||||
hdr := f.FileHeader
|
||||
if hdr.NonUTF8 && textEncoding != nil {
|
||||
dec := textEncoding.NewDecoder()
|
||||
filename, err := dec.String(hdr.Name)
|
||||
if err == nil {
|
||||
hdr.Name = filename
|
||||
}
|
||||
if hdr.Comment != "" {
|
||||
comment, err := dec.String(hdr.Comment)
|
||||
if err == nil {
|
||||
hdr.Comment = comment
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info := f.FileInfo()
|
||||
modTime := info.ModTime()
|
||||
fileList = append(fileList, ArchivedFile{
|
||||
Name: util.FormSlash(hdr.Name),
|
||||
Size: info.Size(),
|
||||
UpdatedAt: &modTime,
|
||||
IsDirectory: info.IsDir(),
|
||||
})
|
||||
}
|
||||
return fileList, nil
|
||||
}
|
||||
|
||||
func get7zFileList(ctx context.Context, file io.ReaderAt, size int64, extEncoding encoding.Encoding) ([]ArchivedFile, error) {
|
||||
zr, err := sevenzip.NewReader(file, size)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create 7z reader: %w", err)
|
||||
}
|
||||
|
||||
fileList := make([]ArchivedFile, 0, len(zr.File))
|
||||
for _, f := range zr.File {
|
||||
info := f.FileInfo()
|
||||
modTime := info.ModTime()
|
||||
fileList = append(fileList, ArchivedFile{
|
||||
Name: util.FormSlash(f.Name),
|
||||
Size: info.Size(),
|
||||
UpdatedAt: &modTime,
|
||||
IsDirectory: info.IsDir(),
|
||||
})
|
||||
}
|
||||
return fileList, nil
|
||||
}
|
||||
|
||||
func getArchiveListCacheKey(entity int, encoding string) string {
|
||||
return fmt.Sprintf("archive_list_%d_%s", entity, encoding)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -120,7 +120,7 @@ func (m *manager) GetDirectLink(ctx context.Context, urls ...*fs.URI) ([]DirectL
|
|||
}
|
||||
|
||||
source := entitysource.NewEntitySource(target, d, policy, m.auth, m.settings, m.hasher, m.dep.RequestClient(),
|
||||
m.l, m.config, m.dep.MimeDetector(ctx), m.dep.EncryptorFactory(ctx))
|
||||
m.l, m.config, m.dep.MimeDetector(ctx))
|
||||
sourceUrl, err := source.Url(ctx,
|
||||
entitysource.WithSpeedLimit(int64(m.user.Edges.Group.SpeedLimit)),
|
||||
entitysource.WithDisplayName(file.Name()),
|
||||
|
|
@ -182,7 +182,7 @@ func (m *manager) GetUrlForRedirectedDirectLink(ctx context.Context, dl *ent.Dir
|
|||
}
|
||||
|
||||
source := entitysource.NewEntitySource(primaryEntity, d, policy, m.auth, m.settings, m.hasher, m.dep.RequestClient(),
|
||||
m.l, m.config, m.dep.MimeDetector(ctx), m.dep.EncryptorFactory(ctx))
|
||||
m.l, m.config, m.dep.MimeDetector(ctx))
|
||||
downloadUrl, err := source.Url(ctx,
|
||||
entitysource.WithExpire(o.Expire),
|
||||
entitysource.WithDownload(o.IsDownload),
|
||||
|
|
@ -282,7 +282,7 @@ func (m *manager) GetEntityUrls(ctx context.Context, args []GetEntityUrlArgs, op
|
|||
|
||||
// Cache miss, Generate new url
|
||||
source := entitysource.NewEntitySource(target, d, policy, m.auth, m.settings, m.hasher, m.dep.RequestClient(),
|
||||
m.l, m.config, m.dep.MimeDetector(ctx), m.dep.EncryptorFactory(ctx))
|
||||
m.l, m.config, m.dep.MimeDetector(ctx))
|
||||
downloadUrl, err := source.Url(ctx,
|
||||
entitysource.WithExpire(o.Expire),
|
||||
entitysource.WithDownload(o.IsDownload),
|
||||
|
|
@ -349,7 +349,7 @@ func (m *manager) GetEntitySource(ctx context.Context, entityID int, opts ...fs.
|
|||
}
|
||||
|
||||
return entitysource.NewEntitySource(entity, handler, policy, m.auth, m.settings, m.hasher, m.dep.RequestClient(), m.l,
|
||||
m.config, m.dep.MimeDetector(ctx), m.dep.EncryptorFactory(ctx), entitysource.WithContext(ctx), entitysource.WithThumb(o.IsThumb)), nil
|
||||
m.config, m.dep.MimeDetector(ctx), entitysource.WithContext(ctx), entitysource.WithThumb(o.IsThumb)), nil
|
||||
}
|
||||
|
||||
func (l *manager) SetCurrentVersion(ctx context.Context, path *fs.URI, version int) error {
|
||||
|
|
|
|||
|
|
@ -22,7 +22,6 @@ import (
|
|||
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver/local"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/encrypt"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/mime"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
|
||||
|
|
@ -84,7 +83,6 @@ type EntitySourceOptions struct {
|
|||
OneTimeDownloadKey string
|
||||
Ctx context.Context
|
||||
IsThumb bool
|
||||
DisableCryptor bool
|
||||
}
|
||||
|
||||
type EntityUrl struct {
|
||||
|
|
@ -145,31 +143,22 @@ func WithThumb(isThumb bool) EntitySourceOption {
|
|||
})
|
||||
}
|
||||
|
||||
// WithDisableCryptor disable cryptor for file source, file stream will be
|
||||
// presented as is.
|
||||
func WithDisableCryptor() EntitySourceOption {
|
||||
return EntitySourceOptionFunc(func(option any) {
|
||||
option.(*EntitySourceOptions).DisableCryptor = true
|
||||
})
|
||||
}
|
||||
|
||||
func (f EntitySourceOptionFunc) Apply(option any) {
|
||||
f(option)
|
||||
}
|
||||
|
||||
type (
|
||||
entitySource struct {
|
||||
e fs.Entity
|
||||
handler driver.Handler
|
||||
policy *ent.StoragePolicy
|
||||
generalAuth auth.Auth
|
||||
settings setting.Provider
|
||||
hasher hashid.Encoder
|
||||
c request.Client
|
||||
l logging.Logger
|
||||
config conf.ConfigProvider
|
||||
mime mime.MimeDetector
|
||||
encryptorFactory encrypt.CryptorFactory
|
||||
e fs.Entity
|
||||
handler driver.Handler
|
||||
policy *ent.StoragePolicy
|
||||
generalAuth auth.Auth
|
||||
settings setting.Provider
|
||||
hasher hashid.Encoder
|
||||
c request.Client
|
||||
l logging.Logger
|
||||
config conf.ConfigProvider
|
||||
mime mime.MimeDetector
|
||||
|
||||
rsc io.ReadCloser
|
||||
pos int64
|
||||
|
|
@ -208,22 +197,20 @@ func NewEntitySource(
|
|||
l logging.Logger,
|
||||
config conf.ConfigProvider,
|
||||
mime mime.MimeDetector,
|
||||
encryptorFactory encrypt.CryptorFactory,
|
||||
opts ...EntitySourceOption,
|
||||
) EntitySource {
|
||||
s := &entitySource{
|
||||
e: e,
|
||||
handler: handler,
|
||||
policy: policy,
|
||||
generalAuth: generalAuth,
|
||||
settings: settings,
|
||||
hasher: hasher,
|
||||
c: c,
|
||||
config: config,
|
||||
l: l,
|
||||
mime: mime,
|
||||
encryptorFactory: encryptorFactory,
|
||||
o: &EntitySourceOptions{},
|
||||
e: e,
|
||||
handler: handler,
|
||||
policy: policy,
|
||||
generalAuth: generalAuth,
|
||||
settings: settings,
|
||||
hasher: hasher,
|
||||
c: c,
|
||||
config: config,
|
||||
l: l,
|
||||
mime: mime,
|
||||
o: &EntitySourceOptions{},
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt.Apply(s.o)
|
||||
|
|
@ -250,7 +237,7 @@ func (f *entitySource) CloneToLocalSrc(t types.EntityType, src string) (EntitySo
|
|||
policy := &ent.StoragePolicy{Type: types.PolicyTypeLocal}
|
||||
handler := local.New(policy, f.l, f.config)
|
||||
|
||||
newSrc := NewEntitySource(e, handler, policy, f.generalAuth, f.settings, f.hasher, f.c, f.l, f.config, f.mime, f.encryptorFactory).(*entitySource)
|
||||
newSrc := NewEntitySource(e, handler, policy, f.generalAuth, f.settings, f.hasher, f.c, f.l, f.config, f.mime).(*entitySource)
|
||||
newSrc.o = f.o
|
||||
return newSrc, nil
|
||||
}
|
||||
|
|
@ -341,20 +328,6 @@ func (f *entitySource) Serve(w http.ResponseWriter, r *http.Request, opts ...Ent
|
|||
response.Header.Del("ETag")
|
||||
response.Header.Del("Content-Disposition")
|
||||
response.Header.Del("Cache-Control")
|
||||
|
||||
// If the response is successful, decrypt the body if needed
|
||||
if response.StatusCode >= 200 && response.StatusCode < 300 {
|
||||
// Parse offset from Content-Range header if present
|
||||
offset := parseContentRangeOffset(response.Header.Get("Content-Range"))
|
||||
|
||||
body, err := f.getDecryptedRsc(response.Body, offset)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get decrypted rsc: %w", err)
|
||||
}
|
||||
|
||||
response.Body = body
|
||||
}
|
||||
|
||||
logging.Request(f.l,
|
||||
false,
|
||||
response.StatusCode,
|
||||
|
|
@ -581,7 +554,7 @@ func (f *entitySource) ShouldInternalProxy(opts ...EntitySourceOption) bool {
|
|||
}
|
||||
handlerCapability := f.handler.Capabilities()
|
||||
return f.e.ID() == 0 || handlerCapability.StaticFeatures.Enabled(int(driver.HandlerCapabilityProxyRequired)) ||
|
||||
(f.policy.Settings.InternalProxy || f.e.Encrypted()) && !f.o.NoInternalProxy
|
||||
f.policy.Settings.InternalProxy && !f.o.NoInternalProxy
|
||||
}
|
||||
|
||||
func (f *entitySource) Url(ctx context.Context, opts ...EntitySourceOption) (*EntityUrl, error) {
|
||||
|
|
@ -609,7 +582,6 @@ func (f *entitySource) Url(ctx context.Context, opts ...EntitySourceOption) (*En
|
|||
// 1. Internal proxy is required by driver's definition
|
||||
// 2. Internal proxy is enabled in Policy setting and not disabled by option
|
||||
// 3. It's an empty entity.
|
||||
// 4. The entity is encrypted and internal proxy not disabled by option
|
||||
handlerCapability := f.handler.Capabilities()
|
||||
if f.ShouldInternalProxy() {
|
||||
siteUrl := f.settings.SiteURL(ctx)
|
||||
|
|
@ -683,7 +655,6 @@ func (f *entitySource) resetRequest() error {
|
|||
|
||||
func (f *entitySource) getRsc(pos int64) (io.ReadCloser, error) {
|
||||
// For inbound files, we can use the handler to open the file directly
|
||||
var rsc io.ReadCloser
|
||||
if f.IsLocal() {
|
||||
file, err := f.handler.Open(f.o.Ctx, f.e.Source())
|
||||
if err != nil {
|
||||
|
|
@ -699,75 +670,46 @@ func (f *entitySource) getRsc(pos int64) (io.ReadCloser, error) {
|
|||
|
||||
if f.o.SpeedLimit > 0 {
|
||||
bucket := ratelimit.NewBucketWithRate(float64(f.o.SpeedLimit), f.o.SpeedLimit)
|
||||
rsc = lrs{file, ratelimit.Reader(file, bucket)}
|
||||
return lrs{f.rsc, ratelimit.Reader(f.rsc, bucket)}, nil
|
||||
} else {
|
||||
rsc = file
|
||||
return file, nil
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
var urlStr string
|
||||
now := time.Now()
|
||||
|
||||
// Check if we have a valid cached URL and expiry
|
||||
if f.cachedUrl != "" && now.Before(f.cachedExpiry.Add(-time.Minute)) {
|
||||
// Use cached URL if it's still valid (with 1 minute buffer before expiry)
|
||||
urlStr = f.cachedUrl
|
||||
} else {
|
||||
var urlStr string
|
||||
now := time.Now()
|
||||
|
||||
// Check if we have a valid cached URL and expiry
|
||||
if f.cachedUrl != "" && now.Before(f.cachedExpiry.Add(-time.Minute)) {
|
||||
// Use cached URL if it's still valid (with 1 minute buffer before expiry)
|
||||
urlStr = f.cachedUrl
|
||||
} else {
|
||||
// Generate new URL and cache it
|
||||
expire := now.Add(defaultUrlExpire)
|
||||
u, err := f.Url(driver.WithForcePublicEndpoint(f.o.Ctx, false), WithNoInternalProxy(), WithExpire(&expire))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate download url: %w", err)
|
||||
}
|
||||
|
||||
// Cache the URL and expiry
|
||||
f.cachedUrl = u.Url
|
||||
f.cachedExpiry = expire
|
||||
urlStr = u.Url
|
||||
}
|
||||
|
||||
h := http.Header{}
|
||||
h.Set("Range", fmt.Sprintf("bytes=%d-", pos))
|
||||
resp := f.c.Request(http.MethodGet, urlStr, nil,
|
||||
request.WithContext(f.o.Ctx),
|
||||
request.WithLogger(f.l),
|
||||
request.WithHeader(h),
|
||||
).CheckHTTPResponse(http.StatusOK, http.StatusPartialContent)
|
||||
if resp.Err != nil {
|
||||
return nil, fmt.Errorf("failed to request download url: %w", resp.Err)
|
||||
}
|
||||
|
||||
rsc = resp.Response.Body
|
||||
}
|
||||
|
||||
var err error
|
||||
rsc, err = f.getDecryptedRsc(rsc, pos)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get decrypted rsc: %w", err)
|
||||
}
|
||||
|
||||
return rsc, nil
|
||||
}
|
||||
|
||||
func (f *entitySource) getDecryptedRsc(rsc io.ReadCloser, pos int64) (io.ReadCloser, error) {
|
||||
props := f.e.Props()
|
||||
if props != nil && props.EncryptMetadata != nil && !f.o.DisableCryptor {
|
||||
cryptor, err := f.encryptorFactory(props.EncryptMetadata.Algorithm)
|
||||
// Generate new URL and cache it
|
||||
expire := now.Add(defaultUrlExpire)
|
||||
u, err := f.Url(driver.WithForcePublicEndpoint(f.o.Ctx, false), WithNoInternalProxy(), WithExpire(&expire))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create decryptor: %w", err)
|
||||
}
|
||||
err = cryptor.LoadMetadata(f.o.Ctx, props.EncryptMetadata)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load metadata: %w", err)
|
||||
return nil, fmt.Errorf("failed to generate download url: %w", err)
|
||||
}
|
||||
|
||||
if err := cryptor.SetSource(rsc, nil, f.e.Size(), pos); err != nil {
|
||||
return nil, fmt.Errorf("failed to set source: %w", err)
|
||||
}
|
||||
|
||||
return cryptor, nil
|
||||
// Cache the URL and expiry
|
||||
f.cachedUrl = u.Url
|
||||
f.cachedExpiry = expire
|
||||
urlStr = u.Url
|
||||
}
|
||||
|
||||
return rsc, nil
|
||||
h := http.Header{}
|
||||
h.Set("Range", fmt.Sprintf("bytes=%d-", pos))
|
||||
resp := f.c.Request(http.MethodGet, urlStr, nil,
|
||||
request.WithContext(f.o.Ctx),
|
||||
request.WithLogger(f.l),
|
||||
request.WithHeader(h),
|
||||
).CheckHTTPResponse(http.StatusOK, http.StatusPartialContent)
|
||||
if resp.Err != nil {
|
||||
return nil, fmt.Errorf("failed to request download url: %w", resp.Err)
|
||||
}
|
||||
|
||||
return resp.Response.Body, nil
|
||||
}
|
||||
|
||||
// capExpireTime make sure expire time is not too long or too short (if min or max is set)
|
||||
|
|
@ -1060,33 +1002,6 @@ func sumRangesSize(ranges []httpRange) (size int64) {
|
|||
return
|
||||
}
|
||||
|
||||
// parseContentRangeOffset parses the start offset from a Content-Range header.
|
||||
// Content-Range format: "bytes start-end/total" (e.g., "bytes 100-200/1000")
|
||||
// Returns 0 if the header is empty, invalid, or cannot be parsed.
|
||||
func parseContentRangeOffset(contentRange string) int64 {
|
||||
if contentRange == "" {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Content-Range format: "bytes start-end/total"
|
||||
if !strings.HasPrefix(contentRange, "bytes ") {
|
||||
return 0
|
||||
}
|
||||
|
||||
rangeSpec := strings.TrimPrefix(contentRange, "bytes ")
|
||||
dashPos := strings.Index(rangeSpec, "-")
|
||||
if dashPos <= 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
start, err := strconv.ParseInt(rangeSpec[:dashPos], 10, 64)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
return start
|
||||
}
|
||||
|
||||
// countingWriter counts how many bytes have been written to it.
|
||||
type countingWriter int64
|
||||
|
||||
|
|
|
|||
|
|
@ -85,10 +85,7 @@ type (
|
|||
}
|
||||
|
||||
Archiver interface {
|
||||
// CreateArchive creates an archive
|
||||
CreateArchive(ctx context.Context, uris []*fs.URI, writer io.Writer, opts ...fs.Option) (int, error)
|
||||
// ListArchiveFiles lists files in an archive
|
||||
ListArchiveFiles(ctx context.Context, uri *fs.URI, entity, zipEncoding string) ([]ArchivedFile, error)
|
||||
}
|
||||
|
||||
FileManager interface {
|
||||
|
|
@ -147,8 +144,7 @@ func NewFileManager(dep dependency.Dep, u *ent.User) FileManager {
|
|||
user: u,
|
||||
settings: dep.SettingProvider(),
|
||||
fs: dbfs.NewDatabaseFS(u, dep.FileClient(), dep.ShareClient(), dep.Logger(), dep.LockSystem(),
|
||||
dep.SettingProvider(), dep.StoragePolicyClient(), dep.HashIDEncoder(), dep.UserClient(), dep.KV(), dep.NavigatorStateKV(),
|
||||
dep.DirectLinkClient(), dep.EncryptorFactory(context.TODO()), dep.EventHub()),
|
||||
dep.SettingProvider(), dep.StoragePolicyClient(), dep.HashIDEncoder(), dep.UserClient(), dep.KV(), dep.NavigatorStateKV(), dep.DirectLinkClient()),
|
||||
kv: dep.KV(),
|
||||
config: config,
|
||||
auth: dep.GeneralAuth(),
|
||||
|
|
|
|||
|
|
@ -14,7 +14,6 @@ import (
|
|||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/dbfs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/mediameta"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/queue"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/util"
|
||||
"github.com/samber/lo"
|
||||
|
|
@ -107,11 +106,6 @@ func (m *manager) ExtractAndSaveMediaMeta(ctx context.Context, uri *fs.URI, enti
|
|||
return nil
|
||||
}
|
||||
|
||||
language := ""
|
||||
if file.Owner().Settings != nil {
|
||||
language = file.Owner().Settings.Language
|
||||
}
|
||||
|
||||
var (
|
||||
metas []driver.MediaMeta
|
||||
)
|
||||
|
|
@ -123,7 +117,7 @@ func (m *manager) ExtractAndSaveMediaMeta(ctx context.Context, uri *fs.URI, enti
|
|||
driverCaps := d.Capabilities()
|
||||
if util.IsInExtensionList(driverCaps.MediaMetaSupportedExts, file.Name()) {
|
||||
m.l.Debug("Using native driver to generate media meta.")
|
||||
metas, err = d.MediaMeta(ctx, targetVersion.Source(), file.Ext(), language)
|
||||
metas, err = d.MediaMeta(ctx, targetVersion.Source(), file.Ext())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get media meta using native driver: %w", err)
|
||||
}
|
||||
|
|
@ -136,7 +130,7 @@ func (m *manager) ExtractAndSaveMediaMeta(ctx context.Context, uri *fs.URI, enti
|
|||
return fmt.Errorf("failed to get entity source: %w", err)
|
||||
}
|
||||
|
||||
metas, err = extractor.Extract(ctx, file.Ext(), source, mediameta.WithLanguage(language))
|
||||
metas, err = extractor.Extract(ctx, file.Ext(), source)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to extract media meta using local extractor: %w", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -97,15 +97,6 @@ var (
|
|||
},
|
||||
},
|
||||
"dav": {},
|
||||
// Allow manipulating thumbnail metadata via public PatchMetadata API
|
||||
"thumb": {
|
||||
// Only supported thumb metadata currently is thumb:disabled
|
||||
dbfs.ThumbDisabledKey: func(ctx context.Context, m *manager, patch *fs.MetadataPatch) error {
|
||||
// Presence of this key disables thumbnails; value is ignored.
|
||||
// We allow both setting and removing this key.
|
||||
return nil
|
||||
},
|
||||
},
|
||||
customizeMetadataSuffix: {
|
||||
iconColorMetadataKey: validateColor(false),
|
||||
emojiIconMetadataKey: func(ctx context.Context, m *manager, patch *fs.MetadataPatch) error {
|
||||
|
|
|
|||
|
|
@ -222,7 +222,7 @@ func (m *manager) RecycleEntities(ctx context.Context, force bool, entityIDs ...
|
|||
|
||||
toBeDeletedSrc := lo.Map(lo.Filter(chunk, func(item fs.Entity, index int) bool {
|
||||
// Only delete entities that are not marked as "unlink only"
|
||||
return item.Model().Props == nil || !item.Model().Props.UnlinkOnly
|
||||
return item.Model().RecycleOptions == nil || !item.Model().RecycleOptions.UnlinkOnly
|
||||
}), func(entity fs.Entity, index int) string {
|
||||
return entity.Source()
|
||||
})
|
||||
|
|
|
|||
|
|
@ -4,8 +4,8 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/thumb"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
|
|
@ -18,7 +18,6 @@ import (
|
|||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/manager/entitysource"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/queue"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/thumb"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/util"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
|
@ -65,8 +64,7 @@ func (m *manager) Thumbnail(ctx context.Context, uri *fs.URI) (entitysource.Enti
|
|||
capabilities := handler.Capabilities()
|
||||
// Check if file extension and size is supported by native policy generator.
|
||||
if capabilities.ThumbSupportAllExts || util.IsInExtensionList(capabilities.ThumbSupportedExts, file.DisplayName()) &&
|
||||
(capabilities.ThumbMaxSize == 0 || latest.Size() <= capabilities.ThumbMaxSize) &&
|
||||
!latest.Encrypted() {
|
||||
(capabilities.ThumbMaxSize == 0 || latest.Size() <= capabilities.ThumbMaxSize) {
|
||||
thumbSource, err := m.GetEntitySource(ctx, 0, fs.WithEntity(latest), fs.WithUseThumb(true))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get latest entity source: %w", err)
|
||||
|
|
@ -184,9 +182,14 @@ func (m *manager) generateThumb(ctx context.Context, uri *fs.URI, ext string, es
|
|||
entityType := types.EntityTypeThumbnail
|
||||
req := &fs.UploadRequest{
|
||||
Props: &fs.UploadProps{
|
||||
Uri: uri,
|
||||
Size: fileInfo.Size(),
|
||||
SavePath: path.Clean(util.ReplaceMagicVar(m.settings.ThumbEntitySuffix(ctx), fs.Separator, true, true, time.Now(), m.user.ID, uri.Name(), uri.Path(), es.Entity().Source())),
|
||||
Uri: uri,
|
||||
Size: fileInfo.Size(),
|
||||
SavePath: fmt.Sprintf(
|
||||
"%s.%s%s",
|
||||
es.Entity().Source(),
|
||||
util.RandStringRunes(16),
|
||||
m.settings.ThumbEntitySuffix(ctx),
|
||||
),
|
||||
MimeType: m.dep.MimeDetector(ctx).TypeByName("thumb.jpg"),
|
||||
EntityType: &entityType,
|
||||
},
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ type (
|
|||
// ConfirmUploadSession confirms whether upload session is valid for upload.
|
||||
ConfirmUploadSession(ctx context.Context, session *fs.UploadSession, chunkIndex int) (fs.File, error)
|
||||
// Upload uploads file data to storage
|
||||
Upload(ctx context.Context, req *fs.UploadRequest, policy *ent.StoragePolicy, session *fs.UploadSession) error
|
||||
Upload(ctx context.Context, req *fs.UploadRequest, policy *ent.StoragePolicy) error
|
||||
// CompleteUpload completes upload session and returns file object
|
||||
CompleteUpload(ctx context.Context, session *fs.UploadSession) (fs.File, error)
|
||||
// CancelUploadSession cancels upload session
|
||||
|
|
@ -93,8 +93,7 @@ func (m *manager) CreateUploadSession(ctx context.Context, req *fs.UploadRequest
|
|||
uploadSession.ChunkSize = uploadSession.Policy.Settings.ChunkSize
|
||||
// Create upload credential for underlying storage driver
|
||||
credential := &fs.UploadCredential{}
|
||||
unrelayed := !uploadSession.Policy.Settings.Relay || m.stateless
|
||||
if unrelayed {
|
||||
if !uploadSession.Policy.Settings.Relay || m.stateless {
|
||||
credential, err = d.Token(ctx, uploadSession, req)
|
||||
if err != nil {
|
||||
m.OnUploadFailed(ctx, uploadSession)
|
||||
|
|
@ -104,18 +103,12 @@ func (m *manager) CreateUploadSession(ctx context.Context, req *fs.UploadRequest
|
|||
// For relayed upload, we don't need to create credential
|
||||
uploadSession.ChunkSize = 0
|
||||
credential.ChunkSize = 0
|
||||
credential.EncryptMetadata = nil
|
||||
uploadSession.Props.ClientSideEncrypted = false
|
||||
}
|
||||
credential.SessionID = uploadSession.Props.UploadSessionID
|
||||
credential.Expires = req.Props.ExpireAt.Unix()
|
||||
credential.StoragePolicy = uploadSession.Policy
|
||||
credential.CallbackSecret = uploadSession.CallbackSecret
|
||||
credential.Uri = uploadSession.Props.Uri.String()
|
||||
credential.EncryptMetadata = uploadSession.EncryptMetadata
|
||||
if !unrelayed {
|
||||
credential.EncryptMetadata = nil
|
||||
}
|
||||
|
||||
// If upload sentinel check is required, queue a check task
|
||||
if d.Capabilities().StaticFeatures.Enabled(int(driver.HandlerCapabilityUploadSentinelRequired)) {
|
||||
|
|
@ -185,34 +178,12 @@ func (m *manager) PrepareUpload(ctx context.Context, req *fs.UploadRequest, opts
|
|||
return m.fs.PrepareUpload(ctx, req, opts...)
|
||||
}
|
||||
|
||||
func (m *manager) Upload(ctx context.Context, req *fs.UploadRequest, policy *ent.StoragePolicy, session *fs.UploadSession) error {
|
||||
func (m *manager) Upload(ctx context.Context, req *fs.UploadRequest, policy *ent.StoragePolicy) error {
|
||||
d, err := m.GetStorageDriver(ctx, m.CastStoragePolicyOnSlave(ctx, policy))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if session != nil && session.EncryptMetadata != nil && !req.Props.ClientSideEncrypted {
|
||||
cryptor, err := m.dep.EncryptorFactory(ctx)(session.EncryptMetadata.Algorithm)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create cryptor: %w", err)
|
||||
}
|
||||
|
||||
err = cryptor.LoadMetadata(ctx, session.EncryptMetadata)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load encrypt metadata: %w", err)
|
||||
}
|
||||
|
||||
if err := cryptor.SetSource(req.File, req.Seeker, req.Props.Size, 0); err != nil {
|
||||
return fmt.Errorf("failed to set source: %w", err)
|
||||
}
|
||||
|
||||
req.File = cryptor
|
||||
|
||||
if req.Seeker != nil {
|
||||
req.Seeker = cryptor
|
||||
}
|
||||
}
|
||||
|
||||
if err := d.Put(ctx, req); err != nil {
|
||||
return serializer.NewError(serializer.CodeIOFailed, "Failed to upload file", err)
|
||||
}
|
||||
|
|
@ -330,8 +301,6 @@ func (m *manager) Update(ctx context.Context, req *fs.UploadRequest, opts ...fs.
|
|||
}
|
||||
|
||||
req.Props.UploadSessionID = uuid.Must(uuid.NewV4()).String()
|
||||
// Sever side supported encryption algorithms
|
||||
req.Props.EncryptionSupported = []types.Cipher{types.CipherAES256CTR}
|
||||
|
||||
if m.stateless {
|
||||
return m.updateStateless(ctx, req, o)
|
||||
|
|
@ -343,7 +312,7 @@ func (m *manager) Update(ctx context.Context, req *fs.UploadRequest, opts ...fs.
|
|||
return nil, fmt.Errorf("faield to prepare uplaod: %w", err)
|
||||
}
|
||||
|
||||
if err := m.Upload(ctx, req, uploadSession.Policy, uploadSession); err != nil {
|
||||
if err := m.Upload(ctx, req, uploadSession.Policy); err != nil {
|
||||
m.OnUploadFailed(ctx, uploadSession)
|
||||
return nil, fmt.Errorf("failed to upload new entity: %w", err)
|
||||
}
|
||||
|
|
@ -399,7 +368,7 @@ func (m *manager) updateStateless(ctx context.Context, req *fs.UploadRequest, o
|
|||
}
|
||||
|
||||
req.Props = res.Req.Props
|
||||
if err := m.Upload(ctx, req, res.Session.Policy, res.Session); err != nil {
|
||||
if err := m.Upload(ctx, req, res.Session.Policy); err != nil {
|
||||
if err := o.Node.OnUploadFailed(ctx, &fs.StatelessOnUploadFailedService{
|
||||
UploadSession: res.Session,
|
||||
UserID: o.StatelessUserID,
|
||||
|
|
|
|||
|
|
@ -18,7 +18,6 @@ import (
|
|||
"github.com/cloudreve/Cloudreve/v4/inventory"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/cluster"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/encrypt"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/manager"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/manager/entitysource"
|
||||
|
|
@ -218,18 +217,11 @@ func (m *CreateArchiveTask) listEntitiesAndSendToSlave(ctx context.Context, dep
|
|||
user := inventory.UserFromContext(ctx)
|
||||
fm := manager.NewFileManager(dep, user)
|
||||
storagePolicyClient := dep.StoragePolicyClient()
|
||||
masterKey, _ := dep.MasterEncryptKeyVault(ctx).GetMasterKey(ctx)
|
||||
|
||||
failed, err := fm.CreateArchive(ctx, uris, io.Discard,
|
||||
fs.WithDryRun(func(name string, e fs.Entity) {
|
||||
entityModel, err := decryptEntityKeyIfNeeded(masterKey, e.Model())
|
||||
if err != nil {
|
||||
m.l.Warning("Failed to decrypt entity key for %q: %s", name, err)
|
||||
return
|
||||
}
|
||||
|
||||
payload.Entities = append(payload.Entities, SlaveCreateArchiveEntity{
|
||||
Entity: entityModel,
|
||||
Entity: e.Model(),
|
||||
Path: name,
|
||||
})
|
||||
if _, ok := payload.Policies[e.PolicyID()]; !ok {
|
||||
|
|
@ -688,18 +680,3 @@ func (m *SlaveCreateArchiveTask) Progress(ctx context.Context) queue.Progresses
|
|||
|
||||
return m.progress
|
||||
}
|
||||
|
||||
func decryptEntityKeyIfNeeded(masterKey []byte, entity *ent.Entity) (*ent.Entity, error) {
|
||||
if entity.Props == nil || entity.Props.EncryptMetadata == nil || entity.Props.EncryptMetadata.KeyPlainText != nil {
|
||||
return entity, nil
|
||||
}
|
||||
|
||||
decryptedKey, err := encrypt.DecryptWithMasterKey(masterKey, entity.Props.EncryptMetadata.Key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decrypt entity key: %w", err)
|
||||
}
|
||||
|
||||
entity.Props.EncryptMetadata.KeyPlainText = decryptedKey
|
||||
entity.Props.EncryptMetadata.Key = nil
|
||||
return entity, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -27,6 +27,13 @@ import (
|
|||
"github.com/cloudreve/Cloudreve/v4/pkg/util"
|
||||
"github.com/gofrs/uuid"
|
||||
"github.com/mholt/archives"
|
||||
"golang.org/x/text/encoding"
|
||||
"golang.org/x/text/encoding/charmap"
|
||||
"golang.org/x/text/encoding/japanese"
|
||||
"golang.org/x/text/encoding/korean"
|
||||
"golang.org/x/text/encoding/simplifiedchinese"
|
||||
"golang.org/x/text/encoding/traditionalchinese"
|
||||
"golang.org/x/text/encoding/unicode"
|
||||
)
|
||||
|
||||
type (
|
||||
|
|
@ -40,15 +47,14 @@ type (
|
|||
}
|
||||
ExtractArchiveTaskPhase string
|
||||
ExtractArchiveTaskState struct {
|
||||
Uri string `json:"uri,omitempty"`
|
||||
Encoding string `json:"encoding,omitempty"`
|
||||
Dst string `json:"dst,omitempty"`
|
||||
TempPath string `json:"temp_path,omitempty"`
|
||||
TempZipFilePath string `json:"temp_zip_file_path,omitempty"`
|
||||
ProcessedCursor string `json:"processed_cursor,omitempty"`
|
||||
SlaveTaskID int `json:"slave_task_id,omitempty"`
|
||||
Password string `json:"password,omitempty"`
|
||||
FileMask []string `json:"file_mask,omitempty"`
|
||||
Uri string `json:"uri,omitempty"`
|
||||
Encoding string `json:"encoding,omitempty"`
|
||||
Dst string `json:"dst,omitempty"`
|
||||
TempPath string `json:"temp_path,omitempty"`
|
||||
TempZipFilePath string `json:"temp_zip_file_path,omitempty"`
|
||||
ProcessedCursor string `json:"processed_cursor,omitempty"`
|
||||
SlaveTaskID int `json:"slave_task_id,omitempty"`
|
||||
Password string `json:"password,omitempty"`
|
||||
NodeState `json:",inline"`
|
||||
Phase ExtractArchiveTaskPhase `json:"phase,omitempty"`
|
||||
}
|
||||
|
|
@ -72,15 +78,54 @@ func init() {
|
|||
queue.RegisterResumableTaskFactory(queue.ExtractArchiveTaskType, NewExtractArchiveTaskFromModel)
|
||||
}
|
||||
|
||||
var encodings = map[string]encoding.Encoding{
|
||||
"ibm866": charmap.CodePage866,
|
||||
"iso8859_2": charmap.ISO8859_2,
|
||||
"iso8859_3": charmap.ISO8859_3,
|
||||
"iso8859_4": charmap.ISO8859_4,
|
||||
"iso8859_5": charmap.ISO8859_5,
|
||||
"iso8859_6": charmap.ISO8859_6,
|
||||
"iso8859_7": charmap.ISO8859_7,
|
||||
"iso8859_8": charmap.ISO8859_8,
|
||||
"iso8859_8I": charmap.ISO8859_8I,
|
||||
"iso8859_10": charmap.ISO8859_10,
|
||||
"iso8859_13": charmap.ISO8859_13,
|
||||
"iso8859_14": charmap.ISO8859_14,
|
||||
"iso8859_15": charmap.ISO8859_15,
|
||||
"iso8859_16": charmap.ISO8859_16,
|
||||
"koi8r": charmap.KOI8R,
|
||||
"koi8u": charmap.KOI8U,
|
||||
"macintosh": charmap.Macintosh,
|
||||
"windows874": charmap.Windows874,
|
||||
"windows1250": charmap.Windows1250,
|
||||
"windows1251": charmap.Windows1251,
|
||||
"windows1252": charmap.Windows1252,
|
||||
"windows1253": charmap.Windows1253,
|
||||
"windows1254": charmap.Windows1254,
|
||||
"windows1255": charmap.Windows1255,
|
||||
"windows1256": charmap.Windows1256,
|
||||
"windows1257": charmap.Windows1257,
|
||||
"windows1258": charmap.Windows1258,
|
||||
"macintoshcyrillic": charmap.MacintoshCyrillic,
|
||||
"gbk": simplifiedchinese.GBK,
|
||||
"gb18030": simplifiedchinese.GB18030,
|
||||
"big5": traditionalchinese.Big5,
|
||||
"eucjp": japanese.EUCJP,
|
||||
"iso2022jp": japanese.ISO2022JP,
|
||||
"shiftjis": japanese.ShiftJIS,
|
||||
"euckr": korean.EUCKR,
|
||||
"utf16be": unicode.UTF16(unicode.BigEndian, unicode.IgnoreBOM),
|
||||
"utf16le": unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM),
|
||||
}
|
||||
|
||||
// NewExtractArchiveTask creates a new ExtractArchiveTask
|
||||
func NewExtractArchiveTask(ctx context.Context, src, dst, encoding, password string, mask []string) (queue.Task, error) {
|
||||
func NewExtractArchiveTask(ctx context.Context, src, dst, encoding, password string) (queue.Task, error) {
|
||||
state := &ExtractArchiveTaskState{
|
||||
Uri: src,
|
||||
Dst: dst,
|
||||
Encoding: encoding,
|
||||
NodeState: NodeState{},
|
||||
Password: password,
|
||||
FileMask: mask,
|
||||
}
|
||||
stateBytes, err := json.Marshal(state)
|
||||
if err != nil {
|
||||
|
|
@ -194,21 +239,14 @@ func (m *ExtractArchiveTask) createSlaveExtractTask(ctx context.Context, dep dep
|
|||
return task.StatusError, fmt.Errorf("failed to get policy: %w", err)
|
||||
}
|
||||
|
||||
masterKey, _ := dep.MasterEncryptKeyVault(ctx).GetMasterKey(ctx)
|
||||
entityModel, err := decryptEntityKeyIfNeeded(masterKey, archiveFile.PrimaryEntity().Model())
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to decrypt entity key for archive file %q: %s", archiveFile.DisplayName(), err)
|
||||
}
|
||||
|
||||
payload := &SlaveExtractArchiveTaskState{
|
||||
FileName: archiveFile.DisplayName(),
|
||||
Entity: entityModel,
|
||||
Entity: archiveFile.PrimaryEntity().Model(),
|
||||
Policy: policy,
|
||||
Encoding: m.state.Encoding,
|
||||
Dst: m.state.Dst,
|
||||
UserID: user.ID,
|
||||
Password: m.state.Password,
|
||||
FileMask: m.state.FileMask,
|
||||
}
|
||||
|
||||
payloadStr, err := json.Marshal(payload)
|
||||
|
|
@ -298,7 +336,7 @@ func (m *ExtractArchiveTask) masterExtractArchive(ctx context.Context, dep depen
|
|||
|
||||
extractor, ok := format.(archives.Extractor)
|
||||
if !ok {
|
||||
return task.StatusError, fmt.Errorf("format not an extractor %s", format.Extension())
|
||||
return task.StatusError, fmt.Errorf("format not an extractor %s")
|
||||
}
|
||||
|
||||
formatExt := format.Extension()
|
||||
|
|
@ -333,7 +371,7 @@ func (m *ExtractArchiveTask) masterExtractArchive(ctx context.Context, dep depen
|
|||
if zipExtractor, ok := extractor.(archives.Zip); ok {
|
||||
if m.state.Encoding != "" {
|
||||
m.l.Info("Using encoding %q for zip archive", m.state.Encoding)
|
||||
encoding, ok := manager.ZipEncodings[strings.ToLower(m.state.Encoding)]
|
||||
encoding, ok := encodings[strings.ToLower(m.state.Encoding)]
|
||||
if !ok {
|
||||
m.l.Warning("Unknown encoding %q, fallback to default encoding", m.state.Encoding)
|
||||
} else {
|
||||
|
|
@ -378,14 +416,6 @@ func (m *ExtractArchiveTask) masterExtractArchive(ctx context.Context, dep depen
|
|||
rawPath := util.FormSlash(f.NameInArchive)
|
||||
savePath := dst.JoinRaw(rawPath)
|
||||
|
||||
// If file mask is not empty, check if the path is in the mask
|
||||
if len(m.state.FileMask) > 0 && !isFileInMask(rawPath, m.state.FileMask) {
|
||||
m.l.Warning("File %q is not in the mask, skipping...", f.NameInArchive)
|
||||
atomic.AddInt64(&m.progress[ProgressTypeExtractCount].Current, 1)
|
||||
atomic.AddInt64(&m.progress[ProgressTypeExtractSize].Current, f.Size())
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check if path is legit
|
||||
if !strings.HasPrefix(savePath.Path(), util.FillSlash(path.Clean(dst.Path()))) {
|
||||
m.l.Warning("Path %q is not legit, skipping...", f.NameInArchive)
|
||||
|
|
@ -415,10 +445,6 @@ func (m *ExtractArchiveTask) masterExtractArchive(ctx context.Context, dep depen
|
|||
Props: &fs.UploadProps{
|
||||
Uri: savePath,
|
||||
Size: f.Size(),
|
||||
LastModified: func() *time.Time {
|
||||
t := f.FileInfo.ModTime().Local()
|
||||
return &t
|
||||
}(),
|
||||
},
|
||||
ProgressFunc: func(current, diff int64, total int64) {
|
||||
atomic.AddInt64(&m.progress[ProgressTypeExtractSize].Current, diff)
|
||||
|
|
@ -573,7 +599,6 @@ type (
|
|||
TempZipFilePath string `json:"temp_zip_file_path,omitempty"`
|
||||
ProcessedCursor string `json:"processed_cursor,omitempty"`
|
||||
Password string `json:"password,omitempty"`
|
||||
FileMask []string `json:"file_mask,omitempty"`
|
||||
}
|
||||
)
|
||||
|
||||
|
|
@ -713,7 +738,7 @@ func (m *SlaveExtractArchiveTask) Do(ctx context.Context) (task.Status, error) {
|
|||
if zipExtractor, ok := extractor.(archives.Zip); ok {
|
||||
if m.state.Encoding != "" {
|
||||
m.l.Info("Using encoding %q for zip archive", m.state.Encoding)
|
||||
encoding, ok := manager.ZipEncodings[strings.ToLower(m.state.Encoding)]
|
||||
encoding, ok := encodings[strings.ToLower(m.state.Encoding)]
|
||||
if !ok {
|
||||
m.l.Warning("Unknown encoding %q, fallback to default encoding", m.state.Encoding)
|
||||
} else {
|
||||
|
|
@ -754,12 +779,6 @@ func (m *SlaveExtractArchiveTask) Do(ctx context.Context) (task.Status, error) {
|
|||
rawPath := util.FormSlash(f.NameInArchive)
|
||||
savePath := dst.JoinRaw(rawPath)
|
||||
|
||||
// If file mask is not empty, check if the path is in the mask
|
||||
if len(m.state.FileMask) > 0 && !isFileInMask(rawPath, m.state.FileMask) {
|
||||
m.l.Debug("File %q is not in the mask, skipping...", f.NameInArchive)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check if path is legit
|
||||
if !strings.HasPrefix(savePath.Path(), util.FillSlash(path.Clean(dst.Path()))) {
|
||||
atomic.AddInt64(&m.progress[ProgressTypeExtractCount].Current, 1)
|
||||
|
|
@ -789,10 +808,6 @@ func (m *SlaveExtractArchiveTask) Do(ctx context.Context) (task.Status, error) {
|
|||
Props: &fs.UploadProps{
|
||||
Uri: savePath,
|
||||
Size: f.Size(),
|
||||
LastModified: func() *time.Time {
|
||||
t := f.FileInfo.ModTime().Local()
|
||||
return &t
|
||||
}(),
|
||||
},
|
||||
ProgressFunc: func(current, diff int64, total int64) {
|
||||
atomic.AddInt64(&m.progress[ProgressTypeExtractSize].Current, diff)
|
||||
|
|
@ -831,17 +846,3 @@ func (m *SlaveExtractArchiveTask) Progress(ctx context.Context) queue.Progresses
|
|||
defer m.Unlock()
|
||||
return m.progress
|
||||
}
|
||||
|
||||
func isFileInMask(path string, mask []string) bool {
|
||||
if len(mask) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
for _, m := range mask {
|
||||
if path == m || strings.HasPrefix(path, m+"/") {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
|
|
|||
|
|
@ -145,12 +145,7 @@ func (e *exifExtractor) Exts() []string {
|
|||
}
|
||||
|
||||
// Reference: https://github.com/photoprism/photoprism/blob/602097635f1c84d91f2d919f7aedaef7a07fc458/internal/meta/exif.go
|
||||
func (e *exifExtractor) Extract(ctx context.Context, ext string, source entitysource.EntitySource, opts ...optionFunc) ([]driver.MediaMeta, error) {
|
||||
option := &option{}
|
||||
for _, opt := range opts {
|
||||
opt.apply(option)
|
||||
}
|
||||
|
||||
func (e *exifExtractor) Extract(ctx context.Context, ext string, source entitysource.EntitySource) ([]driver.MediaMeta, error) {
|
||||
localLimit, remoteLimit := e.settings.MediaMetaExifSizeLimit(ctx)
|
||||
if err := checkFileSize(localLimit, remoteLimit, source); err != nil {
|
||||
return nil, err
|
||||
|
|
|
|||
|
|
@ -4,14 +4,12 @@ import (
|
|||
"context"
|
||||
"encoding/gob"
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/manager/entitysource"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/request"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
|
||||
"github.com/samber/lo"
|
||||
"io"
|
||||
)
|
||||
|
||||
type (
|
||||
|
|
@ -19,7 +17,7 @@ type (
|
|||
// Exts returns the supported file extensions.
|
||||
Exts() []string
|
||||
// Extract extracts the media meta from the given source.
|
||||
Extract(ctx context.Context, ext string, source entitysource.EntitySource, opts ...optionFunc) ([]driver.MediaMeta, error)
|
||||
Extract(ctx context.Context, ext string, source entitysource.EntitySource) ([]driver.MediaMeta, error)
|
||||
}
|
||||
)
|
||||
|
||||
|
|
@ -31,7 +29,7 @@ func init() {
|
|||
gob.Register([]driver.MediaMeta{})
|
||||
}
|
||||
|
||||
func NewExtractorManager(ctx context.Context, settings setting.Provider, l logging.Logger, client request.Client) Extractor {
|
||||
func NewExtractorManager(ctx context.Context, settings setting.Provider, l logging.Logger) Extractor {
|
||||
e := &extractorManager{
|
||||
settings: settings,
|
||||
extMap: make(map[string][]Extractor),
|
||||
|
|
@ -54,11 +52,6 @@ func NewExtractorManager(ctx context.Context, settings setting.Provider, l loggi
|
|||
extractors = append(extractors, ffprobeE)
|
||||
}
|
||||
|
||||
if e.settings.MediaMetaGeocodingEnabled(ctx) {
|
||||
geocodingE := newGeocodingExtractor(settings, l, client)
|
||||
extractors = append(extractors, geocodingE)
|
||||
}
|
||||
|
||||
for _, extractor := range extractors {
|
||||
for _, ext := range extractor.Exts() {
|
||||
if e.extMap[ext] == nil {
|
||||
|
|
@ -80,12 +73,12 @@ func (e *extractorManager) Exts() []string {
|
|||
return lo.Keys(e.extMap)
|
||||
}
|
||||
|
||||
func (e *extractorManager) Extract(ctx context.Context, ext string, source entitysource.EntitySource, opts ...optionFunc) ([]driver.MediaMeta, error) {
|
||||
func (e *extractorManager) Extract(ctx context.Context, ext string, source entitysource.EntitySource) ([]driver.MediaMeta, error) {
|
||||
if extractor, ok := e.extMap[ext]; ok {
|
||||
res := []driver.MediaMeta{}
|
||||
for _, e := range extractor {
|
||||
_, _ = source.Seek(0, io.SeekStart)
|
||||
data, err := e.Extract(ctx, ext, source, append(opts, WithExtracted(res))...)
|
||||
data, err := e.Extract(ctx, ext, source)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -99,29 +92,6 @@ func (e *extractorManager) Extract(ctx context.Context, ext string, source entit
|
|||
}
|
||||
}
|
||||
|
||||
type option struct {
|
||||
extracted []driver.MediaMeta
|
||||
language string
|
||||
}
|
||||
|
||||
type optionFunc func(*option)
|
||||
|
||||
func (f optionFunc) apply(o *option) {
|
||||
f(o)
|
||||
}
|
||||
|
||||
func WithExtracted(extracted []driver.MediaMeta) optionFunc {
|
||||
return optionFunc(func(o *option) {
|
||||
o.extracted = extracted
|
||||
})
|
||||
}
|
||||
|
||||
func WithLanguage(language string) optionFunc {
|
||||
return optionFunc(func(o *option) {
|
||||
o.language = language
|
||||
})
|
||||
}
|
||||
|
||||
// checkFileSize checks if the file size exceeds the limit.
|
||||
func checkFileSize(localLimit, remoteLimit int64, source entitysource.EntitySource) error {
|
||||
if source.IsLocal() && localLimit > 0 && source.Entity().Size() > localLimit {
|
||||
|
|
|
|||
|
|
@ -88,19 +88,14 @@ func (f *ffprobeExtractor) Exts() []string {
|
|||
return ffprobeExts
|
||||
}
|
||||
|
||||
func (f *ffprobeExtractor) Extract(ctx context.Context, ext string, source entitysource.EntitySource, opts ...optionFunc) ([]driver.MediaMeta, error) {
|
||||
option := &option{}
|
||||
for _, opt := range opts {
|
||||
opt.apply(option)
|
||||
}
|
||||
|
||||
func (f *ffprobeExtractor) Extract(ctx context.Context, ext string, source entitysource.EntitySource) ([]driver.MediaMeta, error) {
|
||||
localLimit, remoteLimit := f.settings.MediaMetaFFProbeSizeLimit(ctx)
|
||||
if err := checkFileSize(localLimit, remoteLimit, source); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var input string
|
||||
if source.IsLocal() && !source.Entity().Encrypted() {
|
||||
if source.IsLocal() {
|
||||
input = source.LocalPath(ctx)
|
||||
} else {
|
||||
expire := time.Now().Add(UrlExpire)
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue