Compare commits

...

85 Commits

Author SHA1 Message Date
Aaron Liu 32632db36f feat(fs): change event debounce before emitting to subscriber 2025-12-13 14:48:01 +08:00
Aaron Liu c01b748dfc feat(fs): fs change event notification via SSE / show panic stack trace in task queue 2025-12-13 14:48:01 +08:00
Darren Yu 05c68b4062
fix(thumb blob path): separators be wrongly modified (#3062) (#3116)
* fix(thumb blob path): separators be wrongly modified

* Update common.go
2025-12-05 15:57:58 +08:00
Darren Yu a08c796e3f
fix(ks3): fix content disposition format for download filename (#3040) (#3057) 2025-12-05 15:33:18 +08:00
Aaron Liu fec4dec3ac feat(upload): etag check in client-side upload / support empty policy ID 2025-12-05 15:17:07 +08:00
Aaron Liu 67c6f937c9 fix(oss): disable RSA min key size check for OSS callback (#3038) 2025-11-15 11:59:09 +08:00
Aaron Liu 6ad72e07f4 update submodule 2025-11-14 11:18:39 +08:00
Aaron Liu 994ef7af81 fix(search): multiple metadata search does not work (#3027) 2025-11-12 13:57:38 +08:00
Darren Yu b507c1b893
docs: update feature description (#3023)
* docs: update feature description

* Apply suggestion from @HFO4

---------

Co-authored-by: AaronLiu <abslant.liu@gmail.com>
2025-11-12 13:55:38 +08:00
Darren Yu deecc5c20b
feat(thumb blob path): support magic variables in thumb blob path (#3030) 2025-11-12 13:49:32 +08:00
dependabot[bot] 6085f2090f
chore(deps): bump golang.org/x/image (#2093)
Bumps [golang.org/x/image](https://github.com/golang/image) from 0.0.0-20211028202545-6944b10bf410 to 0.18.0.
- [Commits](https://github.com/golang/image/commits/v0.18.0)

---
updated-dependencies:
- dependency-name: golang.org/x/image
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-10-28 10:56:01 +08:00
dependabot[bot] 670b79eef3
chore(deps): bump github.com/gin-contrib/cors from 1.3.0 to 1.6.0 (#2097)
Bumps [github.com/gin-contrib/cors](https://github.com/gin-contrib/cors) from 1.3.0 to 1.6.0.
- [Release notes](https://github.com/gin-contrib/cors/releases)
- [Changelog](https://github.com/gin-contrib/cors/blob/master/.goreleaser.yaml)
- [Commits](https://github.com/gin-contrib/cors/compare/v1.3.0...v1.6.0)

---
updated-dependencies:
- dependency-name: github.com/gin-contrib/cors
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-10-28 10:53:40 +08:00
dependabot[bot] 4785be81c2
chore(deps): bump github.com/wneessen/go-mail from 0.6.2 to 0.7.1 (#2939)
Bumps [github.com/wneessen/go-mail](https://github.com/wneessen/go-mail) from 0.6.2 to 0.7.1.
- [Release notes](https://github.com/wneessen/go-mail/releases)
- [Commits](https://github.com/wneessen/go-mail/compare/v0.6.2...v0.7.1)

---
updated-dependencies:
- dependency-name: github.com/wneessen/go-mail
  dependency-version: 0.7.1
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-10-28 10:50:54 +08:00
Aaron Liu f27969d74f chore: update required golang version and gzip middleware 2025-10-24 15:07:12 +08:00
Aaron Liu e3580d9351 feat(encryption): add UI and settings for file encryption 2025-10-24 15:04:54 +08:00
Aaron Liu 16b02b1fb3 feat: file blob encryption 2025-10-21 14:54:13 +08:00
Darren Yu 6bd30a8af7
fix(oss): change default expire ttl and sign param to adapt SDK v2 (#2979)
* fix(oss): change default expire ttl and sign param to adapt SDK v2

* fix(oss): add expire ttl limit
2025-10-16 11:49:21 +08:00
Aaron Liu 21cdafb2af fix(oss): traffic limit should be in query instead of headers (#2977) 2025-10-16 07:46:22 +08:00
Aaron Liu e29237d593 fix(webdav): error code for missing parent in mkcol should be `409` instead of `404` (#2953) 2025-10-15 10:28:31 +08:00
Aaron Liu 46897e2880 fix(oss): presigned multipart upload mismatch 2025-10-14 10:21:43 +08:00
Aaron Liu 213eaa54dd update submodule 2025-10-14 09:29:24 +08:00
Aaron Liu e7d6fb25e4 feat(oss): upgrade to SDK v2 (#2963) 2025-10-14 08:49:45 +08:00
Darren Yu e3e08a9b75
feat(share): adapt to keep specified path in V3 sharing link (#2958) 2025-10-12 10:28:40 +08:00
酸柠檬猹Char 78f7ec8b08
fix: Some containers won't auto restart in the current Docker Compose (#2932)
Add "restart: unless-stopped" to the database and redis container.
2025-09-27 22:04:38 +08:00
Aaron Liu 3d41e00384 feat(media meta): add Mapbox as a map provider option (#2922) 2025-09-27 10:19:22 +08:00
Aaron Liu 5e5dca40c4 feat(media meta): reverse geocoding from mapbox (#2922) 2025-09-26 11:27:46 +08:00
Mason Liu 668b542c59
feat: update reset thumbnail feature (#2854)
* update reset thumbnail feature

* consolidate supported thumbnail extensions into site config; remove dedicated API

* allow patching thumb ; remove Reset Thumbnail API

* fix code formatting

---------

Co-authored-by: Aaron Liu <abslant.liu@gmail.com>
2025-09-23 11:24:38 +08:00
Aaron Liu 440ab775b8 chore(compose): add aria2 port mapping 2025-09-23 09:53:31 +08:00
Darren Yu 678593f30d
fix(thumb blob path): remove extra randomkey in thumb blob path (#2893)
* fix(thumb blob path): remove extra randomkey in thumb blob path

* Update upload.go

Refactor SavePath assignment for clarity.

* Update thumbnail.go
2025-09-16 11:44:22 +08:00
Darren Yu 58ceae9708
fix(uploader): failed to generate upload token for some file types (#2847) (#2900)
* fix(mime): `mimeType` not assigned to new value when is empty

* fix(mime): add fallback mime type
2025-09-16 10:35:30 +08:00
Darren Yu 3b8110b648
fix(cos): traffic limit wrongly given in bytes, should be bits (#2899) 2025-09-16 10:33:41 +08:00
Darren Yu f0c5b08428
feat(extract): preserve last modified when extract archive file (#2897) 2025-09-16 10:31:09 +08:00
Darren Yu 9434c2f29b
fix(upgrade v3): validation on unique magic var in either blob name or path (#2890)
* fix(upgrade v3): validation on unique magic var in either blob name or path

* Update policy.go
2025-09-13 16:18:18 +08:00
Aaron Liu 7d97237593 feat(archive viewer): option to select text encoding for zip files (#2867) 2025-09-12 15:41:43 +08:00
Aaron Liu a581851f84 feat(webdav): option to disable system file uploads (#2871) 2025-09-12 14:04:51 +08:00
Darren Yu fe7cf5d0d8
feat(thumb): enhance native thumbnail generater with encoding format and quality (#2868)
* feat(thumb): enhance native thumbnail generater with encoding format and quality

* Update thumbnail.go

* Update obs.go
2025-09-05 11:40:30 +08:00
Aaron Liu cec2b55e1e update submodule 2025-09-02 13:06:56 +08:00
Darren Yu af43746ba2
feat(email): migrate magic variables to email templates title in patches (#2862) 2025-09-02 11:57:49 +08:00
Aaron Liu 9f1cb52cfb feat(explorer): preview archive file content and extract selected files (#2852) 2025-09-02 11:54:04 +08:00
Aaron Liu 4acf9401b8 feat(uploader): concurrent chunk uploads for local/remote storage policy 2025-08-30 10:37:08 +08:00
Aaron Liu c3ed4f5839 feat(uploader): concurrent chunk uploads 2025-08-30 10:36:20 +08:00
Aaron Liu 9b40e0146f fix(dbfs): remove recursive limit for deleting files 2025-08-28 11:26:55 +08:00
Aaron Liu a16b491f65 fix(entitysource): rate limiter applied to nil reader (#2834) 2025-08-26 11:30:55 +08:00
Darren Yu a095117061
feat(email): support magic variables in email title, add init email template for multiple languages (#2814)
* feat(email): add init email template for multiple languages

* Update setting.go

* Update setting.go

* feat(email): support magic variables in email title
2025-08-26 11:02:38 +08:00
Aaron Liu acc660f112 update submodule 2025-08-22 09:19:35 +08:00
Aaron Liu a677e23394 feat(dashboard): filter file by shared link, direct link, uploading status (#2782) 2025-08-21 14:12:30 +08:00
Aaron Liu 13e774f27d feat(dashboard): filter file by shared link, direct link, uploading status (#2667) 2025-08-21 13:14:11 +08:00
Aaron Liu 91717b7c49 feat(archive): add support for 7z and bz2 / extract rar and 7zip files protected with password (#2668) 2025-08-21 10:20:13 +08:00
Aaron Liu a1ce16bd5e fix(smtp): SMTP reset error should be ignored for non-standard SMTP server implementation (#2791) 2025-08-19 09:43:23 +08:00
Aaron Liu 872b08e5da fix(smtp): force enabling SSL does not work (#2777) 2025-08-13 18:54:56 +08:00
Aaron Liu f73583b370 update submodule 2025-08-12 13:27:33 +08:00
Aaron Liu c0132a10cb feat(dashboard): upgrade promotion 2025-08-12 13:27:07 +08:00
Aaron Liu 927c3bff00 fix(dep): remove undefined dependency 2025-08-12 13:12:54 +08:00
Aaron Liu bb9b42eb10 feat(audit): flush audit logs into DB in a standalone goroutine 2025-08-12 13:10:55 +08:00
Aaron Liu 5f18d277c8 fix(conf): ProxyHeader should be optional (#2760) 2025-08-12 09:53:15 +08:00
Aaron Liu b0057fe92f feat(profile): options to select why kind of share links to show in user's profile (#2453) 2025-08-12 09:52:47 +08:00
Darren Yu bb3db2e326
fix(middleware): left deafult `ProxyHeader` config item as blank to reduce risk of fake xff (#2760) 2025-08-12 09:35:36 +08:00
Aaron Liu 8deeadb1e5 fix(middleware): only select first client IP from X-Forwarded-For (#2748) 2025-08-10 10:47:29 +08:00
Aaron Liu 8688069fac refactor(mail): migrate to wneessen/go-mail (#2738) 2025-08-10 10:40:21 +08:00
Aaron Liu 4c08644b05 fix(dbfs): generate thumbnail blob should not update file modification date 2025-08-10 09:38:27 +08:00
Aaron Liu 4c976b8627 feat(blob path): diable `{path}` magic var for blob path 2025-08-07 11:35:28 +08:00
Aaron Liu b0375f5a24 fix(recycle): nil pointer if failed to found files in trash (#2750) 2025-08-07 11:03:02 +08:00
Aaron Liu 48e9719336 fix(dbfs): deadlock in SQLite while creating upload session 2025-08-07 10:30:44 +08:00
Darren Yu 7654ce889c
fix(blob path): Random variables in blob save path be wrongly fixed (#2741)
* fix(blob path): Random variables in blob save path be wrongly fixed

* feat(blob path): Use regex to match all magic variables
2025-08-05 20:29:14 +08:00
Aaron Liu 80b25e88ee fix(dbfs): file modified_at should not be updated by ent 2025-08-05 15:11:32 +08:00
Aaron Liu e31a6cbcb3 fix(workflow): concurrent read&write to progress map while transfer files in batch (#2737) 2025-08-05 12:02:17 +08:00
Curious 51d9e06f21
chore(docker compose): pin postgres to major version (#2723) 2025-08-04 14:52:21 +08:00
Git'Fellow 36be9b7a19
Fix typos on README (#2693) 2025-07-31 11:18:48 +08:00
Aaron Liu c8c2a60adb feat(storage policy): set deny/allow list for file extension and custom regexp (#2695) 2025-07-25 11:32:04 +08:00
Aaron Liu 60bf0e02b3 fix(qbittorrent): download task option not working (#2666) 2025-07-25 10:15:55 +08:00
omiku 488f32512d
Add Kingsoft Cloud object storage policy to solve the cross-domain and friendly file name incompatibility problem of s3 compatible storage policy. (#2665)
* 新增金山云对象存储策略,解决s3兼容存储策略的跨域及友好文件名不兼容问题

* fix bug&add download Expire time args

* Handling of expiration times when they may be empty
2025-07-21 16:08:22 +08:00
Aaron Liu 1cdccf5fc9 feat(thumb): adding option to define custom input argument for FFmpeg (#2657) 2025-07-15 14:11:42 +08:00
Aaron Liu 15762cb393 feat(thumb): support output webp thumbnails for vips generator (#2657) 2025-07-15 13:51:23 +08:00
Aaron Liu e96b595622 feat(direct link): add option to get direct link with download enforced (#2651) 2025-07-15 13:22:04 +08:00
Aaron Liu d19fc0e75c feat(remote download): sanitize file names with special characters (#2648) 2025-07-15 12:00:39 +08:00
Aaron Liu 195d68c535 chore(docker): add LibRAW into docker image (#2645) 2025-07-15 11:01:44 +08:00
Aaron Liu 000124f6c7 feat(ui): custom HTML content in predefined locations (#2621) 2025-07-15 10:45:32 +08:00
Aaron Liu ca57ca1ba0 feat(custom): custom sidebar items 2025-07-15 10:41:13 +08:00
Aaron Liu 3cda4d1ef7 feat(fs): custom properties for files (#2407) 2025-07-12 11:15:33 +08:00
Aaron Liu b13490357b feat(dashboard): cleanup tasks and events (#2368) 2025-07-05 11:52:15 +08:00
Aaron Liu 617d3a4262 feat(qiniu): use accelerated upload domain (#2497) 2025-07-05 10:50:51 +08:00
Aaron Liu 75a03aa708 fix(auth): unified empty path for sign content (#2616) 2025-07-05 10:05:09 +08:00
Aaron Liu fe2ccb4d4e feat(share): add option to automatically render and show README file (#2382) 2025-07-04 14:40:32 +08:00
Aaron Liu aada3aab02 feat(storage): load balance storage policy (#2436) 2025-07-04 10:05:15 +08:00
Samler a0aefef691
feat: platform self-adaptation for file viewer application (#2603) 2025-07-03 14:04:14 +08:00
159 changed files with 10255 additions and 1432 deletions

View File

@ -3,7 +3,7 @@ FROM alpine:latest
WORKDIR /cloudreve
RUN apk update \
&& apk add --no-cache tzdata vips-tools ffmpeg libreoffice aria2 supervisor font-noto font-noto-cjk libheif\
&& apk add --no-cache tzdata vips-tools ffmpeg libreoffice aria2 supervisor font-noto font-noto-cjk libheif libraw-tools\
&& cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime \
&& echo "Asia/Shanghai" > /etc/timezone \
&& mkdir -p ./data/temp/aria2 \
@ -13,7 +13,8 @@ ENV CR_ENABLE_ARIA2=1 \
CR_SETTING_DEFAULT_thumb_ffmpeg_enabled=1 \
CR_SETTING_DEFAULT_thumb_vips_enabled=1 \
CR_SETTING_DEFAULT_thumb_libreoffice_enabled=1 \
CR_SETTING_DEFAULT_media_meta_ffprobe=1
CR_SETTING_DEFAULT_media_meta_ffprobe=1 \
CR_SETTING_DEFAULT_thumb_libraw_enabled=1
COPY .build/aria2.supervisor.conf .build/entrypoint.sh ./
COPY cloudreve ./cloudreve

View File

@ -7,7 +7,7 @@
Cloudreve
<br>
</h1>
<h4 align="center">Self-hosted file management system with muilt-cloud support.</h4>
<h4 align="center">Self-hosted file management system with multi-cloud support.</h4>
<p align="center">
<a href="https://dev.azure.com/abslantliu/Cloudreve/_build?definitionId=6">
@ -38,18 +38,18 @@
## :sparkles: Features
- :cloud: Support storing files into Local, Remote node, OneDrive, S3 compatible API, Qiniu, Aliyun OSS, Tencent COS, Upyun.
- :cloud: Support storing files into Local, Remote node, OneDrive, S3 compatible API, Qiniu Kodo, Aliyun OSS, Tencent COS, Huawei Cloud OBS, Kingsoft Cloud KS3, Upyun.
- :outbox_tray: Upload/Download in directly transmission from client to storage providers.
- 💾 Integrate with Aria2/qBittorrent to download files in background, use multiple download nodes to share the load.
- 📚 Compress/Extract files, download files in batch.
- 📚 Compress/Extract/Preview archived files, download files in batch.
- 💻 WebDAV support covering all storage providers.
- :zap:Drag&Drop to upload files or folders, with resumeable upload support.
- :zap:Drag&Drop to upload files or folders, with parallel resumable upload support.
- :card_file_box: Extract media metadata from files, search files by metadata or tags.
- :family_woman_girl_boy: Multi-users with multi-groups.
- :link: Create share links for files and folders with expiration date.
- :eye_speech_bubble: Preview videos, images, audios, ePub files online; edit texts, diagrams, Markdown, images, Office documents online.
- :art: Customize theme colors, dark mode, PWA application, SPA, i18n.
- :rocket: All-In-One packing, with all features out-of-the-box.
- :rocket: All-in-one packaging, with all features out of the box.
- 🌈 ... ...
## :hammer_and_wrench: Deploy

View File

@ -39,12 +39,12 @@
## :sparkles: 特性
- :cloud: 支持本机、从机、七牛、阿里云 OSS、腾讯云 COS、华为云 OBS、又拍云、OneDrive (包括世纪互联版) 、S3 兼容协议 作为存储端
- :cloud: 支持本机、从机、七牛 Kodo、阿里云 OSS、腾讯云 COS、华为云 OBS、金山云 KS3、又拍云、OneDrive (包括世纪互联版) 、S3 兼容协议 作为存储端
- :outbox_tray: 上传/下载 支持客户端直传,支持下载限速
- 💾 可对接 Aria2 离线下载,可使用多个从机节点分担下载任务
- 📚 在线 压缩/解压缩、多文件打包下载
- 💾 可对接 Aria2/qBittorrent 离线下载,可使用多个从机节点分担下载任务
- 📚 在线 压缩/解压缩/压缩包预览、多文件打包下载
- 💻 覆盖全部存储策略的 WebDAV 协议支持
- :zap: 拖拽上传、目录上传、分片上传
- :zap: 拖拽上传、目录上传、并行分片上传
- :card_file_box: 提取媒体元数据,通过元数据或标签搜索文件
- :family_woman_girl_boy: 多用户、用户组、多存储策略
- :link: 创建文件、目录的分享链接,可设定自动过期

View File

@ -178,6 +178,8 @@ func (s *server) Close() {
defer cancel()
}
s.dep.EventHub().Close()
// Shutdown http server
if s.server != nil {
err := s.server.Shutdown(ctx)

View File

@ -3,7 +3,7 @@ package constants
// These values will be injected at build time, DO NOT EDIT.
// BackendVersion 当前后端版本号
var BackendVersion = "4.1.0"
var BackendVersion = "4.7.0"
// IsPro 是否为Pro版本
var IsPro = "false"

View File

@ -17,6 +17,8 @@ import (
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
"github.com/cloudreve/Cloudreve/v4/pkg/credmanager"
"github.com/cloudreve/Cloudreve/v4/pkg/email"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/encrypt"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/eventhub"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/mime"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/lock"
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
@ -129,55 +131,63 @@ type Dep interface {
WebAuthn(ctx context.Context) (*webauthn.WebAuthn, error)
// UAParser Get a singleton uaparser.Parser instance for user agent parsing.
UAParser() *uaparser.Parser
// MasterEncryptKeyVault Get a singleton encrypt.MasterEncryptKeyVault instance for master encrypt key vault.
MasterEncryptKeyVault(ctx context.Context) encrypt.MasterEncryptKeyVault
// EncryptorFactory Get a new encrypt.CryptorFactory instance.
EncryptorFactory(ctx context.Context) encrypt.CryptorFactory
// EventHub Get a singleton eventhub.EventHub instance for event publishing.
EventHub() eventhub.EventHub
}
type dependency struct {
configProvider conf.ConfigProvider
logger logging.Logger
statics iofs.FS
serverStaticFS static.ServeFileSystem
dbClient *ent.Client
rawEntClient *ent.Client
kv cache.Driver
navigatorStateKv cache.Driver
settingClient inventory.SettingClient
fileClient inventory.FileClient
shareClient inventory.ShareClient
settingProvider setting.Provider
userClient inventory.UserClient
groupClient inventory.GroupClient
storagePolicyClient inventory.StoragePolicyClient
taskClient inventory.TaskClient
nodeClient inventory.NodeClient
davAccountClient inventory.DavAccountClient
directLinkClient inventory.DirectLinkClient
emailClient email.Driver
generalAuth auth.Auth
hashidEncoder hashid.Encoder
tokenAuth auth.TokenAuth
lockSystem lock.LockSystem
requestClient request.Client
ioIntenseQueue queue.Queue
thumbQueue queue.Queue
mediaMetaQueue queue.Queue
entityRecycleQueue queue.Queue
slaveQueue queue.Queue
remoteDownloadQueue queue.Queue
ioIntenseQueueTask queue.Task
mediaMeta mediameta.Extractor
thumbPipeline thumb.Generator
mimeDetector mime.MimeDetector
credManager credmanager.CredManager
nodePool cluster.NodePool
taskRegistry queue.TaskRegistry
webauthn *webauthn.WebAuthn
parser *uaparser.Parser
cron *cron.Cron
configProvider conf.ConfigProvider
logger logging.Logger
statics iofs.FS
serverStaticFS static.ServeFileSystem
dbClient *ent.Client
rawEntClient *ent.Client
kv cache.Driver
navigatorStateKv cache.Driver
settingClient inventory.SettingClient
fileClient inventory.FileClient
shareClient inventory.ShareClient
settingProvider setting.Provider
userClient inventory.UserClient
groupClient inventory.GroupClient
storagePolicyClient inventory.StoragePolicyClient
taskClient inventory.TaskClient
nodeClient inventory.NodeClient
davAccountClient inventory.DavAccountClient
directLinkClient inventory.DirectLinkClient
fsEventClient inventory.FsEventClient
emailClient email.Driver
generalAuth auth.Auth
hashidEncoder hashid.Encoder
tokenAuth auth.TokenAuth
lockSystem lock.LockSystem
requestClient request.Client
ioIntenseQueue queue.Queue
thumbQueue queue.Queue
mediaMetaQueue queue.Queue
entityRecycleQueue queue.Queue
slaveQueue queue.Queue
remoteDownloadQueue queue.Queue
ioIntenseQueueTask queue.Task
mediaMeta mediameta.Extractor
thumbPipeline thumb.Generator
mimeDetector mime.MimeDetector
credManager credmanager.CredManager
nodePool cluster.NodePool
taskRegistry queue.TaskRegistry
webauthn *webauthn.WebAuthn
parser *uaparser.Parser
cron *cron.Cron
masterEncryptKeyVault encrypt.MasterEncryptKeyVault
eventHub eventhub.EventHub
configPath string
isPro bool
requiredDbVersion string
licenseKey string
// Protects inner deps that can be reloaded at runtime.
mu sync.Mutex
@ -206,6 +216,19 @@ func (d *dependency) RequestClient(opts ...request.Option) request.Client {
return request.NewClient(d.ConfigProvider(), opts...)
}
func (d *dependency) MasterEncryptKeyVault(ctx context.Context) encrypt.MasterEncryptKeyVault {
if d.masterEncryptKeyVault != nil {
return d.masterEncryptKeyVault
}
d.masterEncryptKeyVault = encrypt.NewMasterEncryptKeyVault(ctx, d.SettingProvider())
return d.masterEncryptKeyVault
}
func (d *dependency) EncryptorFactory(ctx context.Context) encrypt.CryptorFactory {
return encrypt.NewCryptorFactory(d.MasterEncryptKeyVault(ctx))
}
func (d *dependency) WebAuthn(ctx context.Context) (*webauthn.WebAuthn, error) {
if d.webauthn != nil {
return d.webauthn, nil
@ -346,6 +369,21 @@ func (d *dependency) NavigatorStateKV() cache.Driver {
return d.navigatorStateKv
}
func (d *dependency) EventHub() eventhub.EventHub {
if d.eventHub != nil {
return d.eventHub
}
d.eventHub = eventhub.NewEventHub(d.UserClient(), d.FsEventClient())
return d.eventHub
}
func (d *dependency) FsEventClient() inventory.FsEventClient {
if d.fsEventClient != nil {
return d.fsEventClient
}
return inventory.NewFsEventClient(d.DBClient(), d.ConfigProvider().Database().Type)
}
func (d *dependency) SettingClient() inventory.SettingClient {
if d.settingClient != nil {
return d.settingClient
@ -467,7 +505,7 @@ func (d *dependency) MediaMetaExtractor(ctx context.Context) mediameta.Extractor
return d.mediaMeta
}
d.mediaMeta = mediameta.NewExtractorManager(ctx, d.SettingProvider(), d.Logger())
d.mediaMeta = mediameta.NewExtractorManager(ctx, d.SettingProvider(), d.Logger(), d.RequestClient())
return d.mediaMeta
}
@ -843,6 +881,14 @@ func (d *dependency) Shutdown(ctx context.Context) error {
}()
}
if d.eventHub != nil {
wg.Add(1)
go func() {
d.eventHub.Close()
defer wg.Done()
}()
}
d.mu.Unlock()
wg.Wait()

View File

@ -1,6 +1,8 @@
package dependency
import (
"io/fs"
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/inventory"
"github.com/cloudreve/Cloudreve/v4/pkg/auth"
@ -11,7 +13,6 @@ import (
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
"github.com/gin-contrib/static"
"io/fs"
)
// Option 发送请求的额外设置
@ -67,12 +68,6 @@ func WithProFlag(c bool) Option {
})
}
func WithLicenseKey(c string) Option {
return optionFunc(func(o *dependency) {
o.licenseKey = c
})
}
// WithRawEntClient Set the default raw ent client.
func WithRawEntClient(c *ent.Client) Option {
return optionFunc(func(o *dependency) {

View File

@ -27,8 +27,8 @@ type system struct {
Debug bool
SessionSecret string
HashIDSalt string
GracePeriod int `validate:"gte=0"`
ProxyHeader string `validate:"required_with=Listen"`
GracePeriod int `validate:"gte=0"`
ProxyHeader string
}
type ssl struct {

View File

@ -22,7 +22,7 @@ var SystemConfig = &system{
Debug: false,
Mode: "master",
Listen: ":5212",
ProxyHeader: "X-Forwarded-For",
ProxyHeader: "",
}
// CORSConfig 跨域配置

View File

@ -103,10 +103,6 @@ func (m *Migrator) migratePolicy() (map[int]bool, error) {
settings.ProxyServer = policy.OptionsSerialized.OdProxy
}
if policy.DirNameRule == "" {
policy.DirNameRule = "uploads/{uid}/{path}"
}
if policy.Type == types.PolicyTypeCos {
settings.ChunkSize = 1024 * 1024 * 25
}
@ -122,8 +118,16 @@ func (m *Migrator) migratePolicy() (map[int]bool, error) {
hasRandomElement = true
break
}
if strings.Contains(policy.DirNameRule, c) {
hasRandomElement = true
break
}
}
if !hasRandomElement {
if policy.DirNameRule == "" {
policy.DirNameRule = "uploads/{uid}/{path}"
}
policy.FileNameRule = "{uid}_{randomkey8}_{originname}"
m.l.Warning("Storage policy %q has no random element in file name rule, using default file name rule.", policy.Name)
}

2
assets

@ -1 +1 @@
Subproject commit 8e2c2bcff17d4728a01c2cabab8c3b639d72f428
Subproject commit 0b388cc50a6c8e67f645d1b7d569bd9e58ae2c30

230
cmd/masterkey.go Normal file
View File

@ -0,0 +1,230 @@
package cmd
import (
"context"
"crypto/rand"
"encoding/base64"
"fmt"
"io"
"os"
"github.com/cloudreve/Cloudreve/v4/application/dependency"
"github.com/cloudreve/Cloudreve/v4/ent/entity"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/encrypt"
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
"github.com/spf13/cobra"
)
var (
outputToFile string
newMasterKeyFile string
)
func init() {
rootCmd.AddCommand(masterKeyCmd)
masterKeyCmd.AddCommand(masterKeyGenerateCmd)
masterKeyCmd.AddCommand(masterKeyGetCmd)
masterKeyCmd.AddCommand(masterKeyRotateCmd)
masterKeyGenerateCmd.Flags().StringVarP(&outputToFile, "output", "o", "", "Output master key to file instead of stdout")
masterKeyRotateCmd.Flags().StringVarP(&newMasterKeyFile, "new-key", "n", "", "Path to file containing the new master key (base64 encoded).")
}
var masterKeyCmd = &cobra.Command{
Use: "master-key",
Short: "Master encryption key management",
Long: "Manage master encryption keys for file encryption. Use subcommands to generate, get, or rotate keys.",
Run: func(cmd *cobra.Command, args []string) {
_ = cmd.Help()
},
}
var masterKeyGenerateCmd = &cobra.Command{
Use: "generate",
Short: "Generate a new master encryption key",
Long: "Generate a new random 32-byte (256-bit) master encryption key and output it in base64 format.",
Run: func(cmd *cobra.Command, args []string) {
// Generate 32-byte random key
key := make([]byte, 32)
if _, err := io.ReadFull(rand.Reader, key); err != nil {
fmt.Fprintf(os.Stderr, "Error: Failed to generate random key: %v\n", err)
os.Exit(1)
}
// Encode to base64
encodedKey := base64.StdEncoding.EncodeToString(key)
if outputToFile != "" {
// Write to file
if err := os.WriteFile(outputToFile, []byte(encodedKey), 0600); err != nil {
fmt.Fprintf(os.Stderr, "Error: Failed to write key to file: %v\n", err)
os.Exit(1)
}
fmt.Printf("Master key generated and saved to: %s\n", outputToFile)
} else {
// Output to stdout
fmt.Println(encodedKey)
}
},
}
var masterKeyGetCmd = &cobra.Command{
Use: "get",
Short: "Get the current master encryption key",
Long: "Retrieve and display the current master encryption key from the configured vault (setting, env, or file).",
Run: func(cmd *cobra.Command, args []string) {
ctx := context.Background()
dep := dependency.NewDependency(
dependency.WithConfigPath(confPath),
)
logger := dep.Logger()
// Get the master key vault
vault := encrypt.NewMasterEncryptKeyVault(ctx, dep.SettingProvider())
// Retrieve the master key
key, err := vault.GetMasterKey(ctx)
if err != nil {
logger.Error("Failed to get master key: %s", err)
os.Exit(1)
}
// Encode to base64 and display
encodedKey := base64.StdEncoding.EncodeToString(key)
fmt.Println("")
fmt.Println(encodedKey)
},
}
var masterKeyRotateCmd = &cobra.Command{
Use: "rotate",
Short: "Rotate the master encryption key",
Long: `Rotate the master encryption key by re-encrypting all encrypted file keys with a new master key.
This operation:
1. Retrieves the current master key
2. Loads a new master key from file
3. Re-encrypts all file encryption keys with the new master key
4. Updates the master key in the settings database
Warning: This is a critical operation. Make sure to backup your database before proceeding.`,
Run: func(cmd *cobra.Command, args []string) {
ctx := context.Background()
dep := dependency.NewDependency(
dependency.WithConfigPath(confPath),
)
logger := dep.Logger()
logger.Info("Starting master key rotation...")
// Get the old master key
vault := encrypt.NewMasterEncryptKeyVault(ctx, dep.SettingProvider())
oldMasterKey, err := vault.GetMasterKey(ctx)
if err != nil {
logger.Error("Failed to get current master key: %s", err)
os.Exit(1)
}
logger.Info("Retrieved current master key")
// Get or generate the new master key
var newMasterKey []byte
// Load from file
keyData, err := os.ReadFile(newMasterKeyFile)
if err != nil {
logger.Error("Failed to read new master key file: %s", err)
os.Exit(1)
}
newMasterKey, err = base64.StdEncoding.DecodeString(string(keyData))
if err != nil {
logger.Error("Failed to decode new master key: %s", err)
os.Exit(1)
}
if len(newMasterKey) != 32 {
logger.Error("Invalid new master key: must be 32 bytes (256 bits), got %d bytes", len(newMasterKey))
os.Exit(1)
}
logger.Info("Loaded new master key from file: %s", newMasterKeyFile)
// Query all entities with encryption metadata
db := dep.DBClient()
entities, err := db.Entity.Query().
Where(entity.Not(entity.PropsIsNil())).
All(ctx)
if err != nil {
logger.Error("Failed to query entities: %s", err)
os.Exit(1)
}
logger.Info("Found %d entities to check for encryption", len(entities))
// Re-encrypt each entity's encryption key
encryptedCount := 0
for _, ent := range entities {
if ent.Props == nil || ent.Props.EncryptMetadata == nil {
continue
}
encMeta := ent.Props.EncryptMetadata
// Decrypt the file key with old master key
decryptedFileKey, err := encrypt.DecryptWithMasterKey(oldMasterKey, encMeta.Key)
if err != nil {
logger.Error("Failed to decrypt key for entity %d: %s", ent.ID, err)
os.Exit(1)
}
// Re-encrypt the file key with new master key
newEncryptedKey, err := encrypt.EncryptWithMasterKey(newMasterKey, decryptedFileKey)
if err != nil {
logger.Error("Failed to re-encrypt key for entity %d: %s", ent.ID, err)
os.Exit(1)
}
// Update the entity
newProps := *ent.Props
newProps.EncryptMetadata = &types.EncryptMetadata{
Algorithm: encMeta.Algorithm,
Key: newEncryptedKey,
KeyPlainText: nil, // Don't store plaintext
IV: encMeta.IV,
}
err = db.Entity.UpdateOne(ent).
SetProps(&newProps).
Exec(ctx)
if err != nil {
logger.Error("Failed to update entity %d: %s", ent.ID, err)
os.Exit(1)
}
encryptedCount++
}
logger.Info("Re-encrypted %d file keys", encryptedCount)
// Update the master key in settings
keyStore := dep.SettingProvider().MasterEncryptKeyVault(ctx)
if keyStore == setting.MasterEncryptKeyVaultTypeSetting {
encodedNewKey := base64.StdEncoding.EncodeToString(newMasterKey)
err = dep.SettingClient().Set(ctx, map[string]string{
"encrypt_master_key": encodedNewKey,
})
if err != nil {
logger.Error("Failed to update master key in settings: %s", err)
logger.Error("WARNING: File keys have been re-encrypted but master key update failed!")
logger.Error("Please manually update the encrypt_master_key setting.")
os.Exit(1)
}
} else {
logger.Info("Current master key is stored in %q", keyStore)
if keyStore == setting.MasterEncryptKeyVaultTypeEnv {
logger.Info("Please update the new master encryption key in your \"CR_ENCRYPT_MASTER_KEY\" environment variable.")
} else if keyStore == setting.MasterEncryptKeyVaultTypeFile {
logger.Info("Please update the new master encryption key in your key file: %q", dep.SettingProvider().MasterEncryptKeyFile(ctx))
}
logger.Info("Last step: Please manually update the new master encryption key in your ENV or key file.")
}
logger.Info("Master key rotation completed successfully")
},
}

View File

@ -2,14 +2,16 @@ package cmd
import (
"fmt"
"os"
"github.com/cloudreve/Cloudreve/v4/pkg/util"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"os"
)
var (
confPath string
confPath string
licenseKey string
)
func init() {

View File

@ -12,10 +12,6 @@ import (
"github.com/spf13/cobra"
)
var (
licenseKey string
)
func init() {
rootCmd.AddCommand(serverCmd)
serverCmd.PersistentFlags().StringVarP(&licenseKey, "license-key", "l", "", "License key of your Cloudreve Pro")
@ -29,7 +25,6 @@ var serverCmd = &cobra.Command{
dependency.WithConfigPath(confPath),
dependency.WithProFlag(constants.IsProBool),
dependency.WithRequiredDbVersion(constants.BackendVersion),
dependency.WithLicenseKey(licenseKey),
)
server := application.NewServer(dep)
logger := dep.Logger()

View File

@ -1,13 +1,15 @@
services:
pro:
cloudreve:
image: cloudreve/cloudreve:latest
container_name: cloudreve-backend
depends_on:
- postgresql
- redis
restart: always
restart: unless-stopped
ports:
- 5212:5212
- 6888:6888
- 6888:6888/udp
environment:
- CR_CONF_Database.Type=postgres
- CR_CONF_Database.Host=postgresql
@ -19,8 +21,12 @@ services:
- backend_data:/cloudreve/data
postgresql:
image: postgres:latest
# Best practice: Pin to major version.
# NOTE: For major version jumps:
# backup & consult https://www.postgresql.org/docs/current/pgupgrade.html
image: postgres:17
container_name: postgresql
restart: unless-stopped
environment:
- POSTGRES_USER=cloudreve
- POSTGRES_DB=cloudreve
@ -31,6 +37,7 @@ services:
redis:
image: redis:latest
container_name: redis
restart: unless-stopped
volumes:
- redis_data:/data

View File

@ -19,6 +19,7 @@ import (
"github.com/cloudreve/Cloudreve/v4/ent/directlink"
"github.com/cloudreve/Cloudreve/v4/ent/entity"
"github.com/cloudreve/Cloudreve/v4/ent/file"
"github.com/cloudreve/Cloudreve/v4/ent/fsevent"
"github.com/cloudreve/Cloudreve/v4/ent/group"
"github.com/cloudreve/Cloudreve/v4/ent/metadata"
"github.com/cloudreve/Cloudreve/v4/ent/node"
@ -45,6 +46,8 @@ type Client struct {
Entity *EntityClient
// File is the client for interacting with the File builders.
File *FileClient
// FsEvent is the client for interacting with the FsEvent builders.
FsEvent *FsEventClient
// Group is the client for interacting with the Group builders.
Group *GroupClient
// Metadata is the client for interacting with the Metadata builders.
@ -78,6 +81,7 @@ func (c *Client) init() {
c.DirectLink = NewDirectLinkClient(c.config)
c.Entity = NewEntityClient(c.config)
c.File = NewFileClient(c.config)
c.FsEvent = NewFsEventClient(c.config)
c.Group = NewGroupClient(c.config)
c.Metadata = NewMetadataClient(c.config)
c.Node = NewNodeClient(c.config)
@ -183,6 +187,7 @@ func (c *Client) Tx(ctx context.Context) (*Tx, error) {
DirectLink: NewDirectLinkClient(cfg),
Entity: NewEntityClient(cfg),
File: NewFileClient(cfg),
FsEvent: NewFsEventClient(cfg),
Group: NewGroupClient(cfg),
Metadata: NewMetadataClient(cfg),
Node: NewNodeClient(cfg),
@ -215,6 +220,7 @@ func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error)
DirectLink: NewDirectLinkClient(cfg),
Entity: NewEntityClient(cfg),
File: NewFileClient(cfg),
FsEvent: NewFsEventClient(cfg),
Group: NewGroupClient(cfg),
Metadata: NewMetadataClient(cfg),
Node: NewNodeClient(cfg),
@ -253,8 +259,8 @@ func (c *Client) Close() error {
// In order to add hooks to a specific client, call: `client.Node.Use(...)`.
func (c *Client) Use(hooks ...Hook) {
for _, n := range []interface{ Use(...Hook) }{
c.DavAccount, c.DirectLink, c.Entity, c.File, c.Group, c.Metadata, c.Node,
c.Passkey, c.Setting, c.Share, c.StoragePolicy, c.Task, c.User,
c.DavAccount, c.DirectLink, c.Entity, c.File, c.FsEvent, c.Group, c.Metadata,
c.Node, c.Passkey, c.Setting, c.Share, c.StoragePolicy, c.Task, c.User,
} {
n.Use(hooks...)
}
@ -264,8 +270,8 @@ func (c *Client) Use(hooks ...Hook) {
// In order to add interceptors to a specific client, call: `client.Node.Intercept(...)`.
func (c *Client) Intercept(interceptors ...Interceptor) {
for _, n := range []interface{ Intercept(...Interceptor) }{
c.DavAccount, c.DirectLink, c.Entity, c.File, c.Group, c.Metadata, c.Node,
c.Passkey, c.Setting, c.Share, c.StoragePolicy, c.Task, c.User,
c.DavAccount, c.DirectLink, c.Entity, c.File, c.FsEvent, c.Group, c.Metadata,
c.Node, c.Passkey, c.Setting, c.Share, c.StoragePolicy, c.Task, c.User,
} {
n.Intercept(interceptors...)
}
@ -282,6 +288,8 @@ func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) {
return c.Entity.mutate(ctx, m)
case *FileMutation:
return c.File.mutate(ctx, m)
case *FsEventMutation:
return c.FsEvent.mutate(ctx, m)
case *GroupMutation:
return c.Group.mutate(ctx, m)
case *MetadataMutation:
@ -1034,8 +1042,7 @@ func (c *FileClient) Hooks() []Hook {
// Interceptors returns the client interceptors.
func (c *FileClient) Interceptors() []Interceptor {
inters := c.inters.File
return append(inters[:len(inters):len(inters)], file.Interceptors[:]...)
return c.inters.File
}
func (c *FileClient) mutate(ctx context.Context, m *FileMutation) (Value, error) {
@ -1053,6 +1060,157 @@ func (c *FileClient) mutate(ctx context.Context, m *FileMutation) (Value, error)
}
}
// FsEventClient is a client for the FsEvent schema.
type FsEventClient struct {
config
}
// NewFsEventClient returns a client for the FsEvent from the given config.
func NewFsEventClient(c config) *FsEventClient {
return &FsEventClient{config: c}
}
// Use adds a list of mutation hooks to the hooks stack.
// A call to `Use(f, g, h)` equals to `fsevent.Hooks(f(g(h())))`.
func (c *FsEventClient) Use(hooks ...Hook) {
c.hooks.FsEvent = append(c.hooks.FsEvent, hooks...)
}
// Intercept adds a list of query interceptors to the interceptors stack.
// A call to `Intercept(f, g, h)` equals to `fsevent.Intercept(f(g(h())))`.
func (c *FsEventClient) Intercept(interceptors ...Interceptor) {
c.inters.FsEvent = append(c.inters.FsEvent, interceptors...)
}
// Create returns a builder for creating a FsEvent entity.
func (c *FsEventClient) Create() *FsEventCreate {
mutation := newFsEventMutation(c.config, OpCreate)
return &FsEventCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// CreateBulk returns a builder for creating a bulk of FsEvent entities.
func (c *FsEventClient) CreateBulk(builders ...*FsEventCreate) *FsEventCreateBulk {
return &FsEventCreateBulk{config: c.config, builders: builders}
}
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
// a builder and applies setFunc on it.
func (c *FsEventClient) MapCreateBulk(slice any, setFunc func(*FsEventCreate, int)) *FsEventCreateBulk {
rv := reflect.ValueOf(slice)
if rv.Kind() != reflect.Slice {
return &FsEventCreateBulk{err: fmt.Errorf("calling to FsEventClient.MapCreateBulk with wrong type %T, need slice", slice)}
}
builders := make([]*FsEventCreate, rv.Len())
for i := 0; i < rv.Len(); i++ {
builders[i] = c.Create()
setFunc(builders[i], i)
}
return &FsEventCreateBulk{config: c.config, builders: builders}
}
// Update returns an update builder for FsEvent.
func (c *FsEventClient) Update() *FsEventUpdate {
mutation := newFsEventMutation(c.config, OpUpdate)
return &FsEventUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOne returns an update builder for the given entity.
func (c *FsEventClient) UpdateOne(fe *FsEvent) *FsEventUpdateOne {
mutation := newFsEventMutation(c.config, OpUpdateOne, withFsEvent(fe))
return &FsEventUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOneID returns an update builder for the given id.
func (c *FsEventClient) UpdateOneID(id int) *FsEventUpdateOne {
mutation := newFsEventMutation(c.config, OpUpdateOne, withFsEventID(id))
return &FsEventUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// Delete returns a delete builder for FsEvent.
func (c *FsEventClient) Delete() *FsEventDelete {
mutation := newFsEventMutation(c.config, OpDelete)
return &FsEventDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// DeleteOne returns a builder for deleting the given entity.
func (c *FsEventClient) DeleteOne(fe *FsEvent) *FsEventDeleteOne {
return c.DeleteOneID(fe.ID)
}
// DeleteOneID returns a builder for deleting the given entity by its id.
func (c *FsEventClient) DeleteOneID(id int) *FsEventDeleteOne {
builder := c.Delete().Where(fsevent.ID(id))
builder.mutation.id = &id
builder.mutation.op = OpDeleteOne
return &FsEventDeleteOne{builder}
}
// Query returns a query builder for FsEvent.
func (c *FsEventClient) Query() *FsEventQuery {
return &FsEventQuery{
config: c.config,
ctx: &QueryContext{Type: TypeFsEvent},
inters: c.Interceptors(),
}
}
// Get returns a FsEvent entity by its id.
func (c *FsEventClient) Get(ctx context.Context, id int) (*FsEvent, error) {
return c.Query().Where(fsevent.ID(id)).Only(ctx)
}
// GetX is like Get, but panics if an error occurs.
func (c *FsEventClient) GetX(ctx context.Context, id int) *FsEvent {
obj, err := c.Get(ctx, id)
if err != nil {
panic(err)
}
return obj
}
// QueryUser queries the user edge of a FsEvent.
func (c *FsEventClient) QueryUser(fe *FsEvent) *UserQuery {
query := (&UserClient{config: c.config}).Query()
query.path = func(context.Context) (fromV *sql.Selector, _ error) {
id := fe.ID
step := sqlgraph.NewStep(
sqlgraph.From(fsevent.Table, fsevent.FieldID, id),
sqlgraph.To(user.Table, user.FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, fsevent.UserTable, fsevent.UserColumn),
)
fromV = sqlgraph.Neighbors(fe.driver.Dialect(), step)
return fromV, nil
}
return query
}
// Hooks returns the client hooks.
func (c *FsEventClient) Hooks() []Hook {
hooks := c.hooks.FsEvent
return append(hooks[:len(hooks):len(hooks)], fsevent.Hooks[:]...)
}
// Interceptors returns the client interceptors.
func (c *FsEventClient) Interceptors() []Interceptor {
inters := c.inters.FsEvent
return append(inters[:len(inters):len(inters)], fsevent.Interceptors[:]...)
}
func (c *FsEventClient) mutate(ctx context.Context, m *FsEventMutation) (Value, error) {
switch m.Op() {
case OpCreate:
return (&FsEventCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
case OpUpdate:
return (&FsEventUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
case OpUpdateOne:
return (&FsEventUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
case OpDelete, OpDeleteOne:
return (&FsEventDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx)
default:
return nil, fmt.Errorf("ent: unknown FsEvent mutation op: %q", m.Op())
}
}
// GroupClient is a client for the Group schema.
type GroupClient struct {
config
@ -2529,6 +2687,22 @@ func (c *UserClient) QueryTasks(u *User) *TaskQuery {
return query
}
// QueryFsevents queries the fsevents edge of a User.
func (c *UserClient) QueryFsevents(u *User) *FsEventQuery {
query := (&FsEventClient{config: c.config}).Query()
query.path = func(context.Context) (fromV *sql.Selector, _ error) {
id := u.ID
step := sqlgraph.NewStep(
sqlgraph.From(user.Table, user.FieldID, id),
sqlgraph.To(fsevent.Table, fsevent.FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, user.FseventsTable, user.FseventsColumn),
)
fromV = sqlgraph.Neighbors(u.driver.Dialect(), step)
return fromV, nil
}
return query
}
// QueryEntities queries the entities edge of a User.
func (c *UserClient) QueryEntities(u *User) *EntityQuery {
query := (&EntityClient{config: c.config}).Query()
@ -2575,12 +2749,12 @@ func (c *UserClient) mutate(ctx context.Context, m *UserMutation) (Value, error)
// hooks and interceptors per client, for fast access.
type (
hooks struct {
DavAccount, DirectLink, Entity, File, Group, Metadata, Node, Passkey, Setting,
Share, StoragePolicy, Task, User []ent.Hook
DavAccount, DirectLink, Entity, File, FsEvent, Group, Metadata, Node, Passkey,
Setting, Share, StoragePolicy, Task, User []ent.Hook
}
inters struct {
DavAccount, DirectLink, Entity, File, Group, Metadata, Node, Passkey, Setting,
Share, StoragePolicy, Task, User []ent.Interceptor
DavAccount, DirectLink, Entity, File, FsEvent, Group, Metadata, Node, Passkey,
Setting, Share, StoragePolicy, Task, User []ent.Interceptor
}
)

View File

@ -16,6 +16,7 @@ import (
"github.com/cloudreve/Cloudreve/v4/ent/directlink"
"github.com/cloudreve/Cloudreve/v4/ent/entity"
"github.com/cloudreve/Cloudreve/v4/ent/file"
"github.com/cloudreve/Cloudreve/v4/ent/fsevent"
"github.com/cloudreve/Cloudreve/v4/ent/group"
"github.com/cloudreve/Cloudreve/v4/ent/metadata"
"github.com/cloudreve/Cloudreve/v4/ent/node"
@ -89,6 +90,7 @@ func checkColumn(table, column string) error {
directlink.Table: directlink.ValidColumn,
entity.Table: entity.ValidColumn,
file.Table: file.ValidColumn,
fsevent.Table: fsevent.ValidColumn,
group.Table: group.ValidColumn,
metadata.Table: metadata.ValidColumn,
node.Table: node.ValidColumn,

View File

@ -42,8 +42,8 @@ type Entity struct {
CreatedBy int `json:"created_by,omitempty"`
// UploadSessionID holds the value of the "upload_session_id" field.
UploadSessionID *uuid.UUID `json:"upload_session_id,omitempty"`
// RecycleOptions holds the value of the "recycle_options" field.
RecycleOptions *types.EntityRecycleOption `json:"recycle_options,omitempty"`
// Props holds the value of the "props" field.
Props *types.EntityProps `json:"props,omitempty"`
// Edges holds the relations/edges for other nodes in the graph.
// The values are being populated by the EntityQuery when eager-loading is set.
Edges EntityEdges `json:"edges"`
@ -105,7 +105,7 @@ func (*Entity) scanValues(columns []string) ([]any, error) {
switch columns[i] {
case entity.FieldUploadSessionID:
values[i] = &sql.NullScanner{S: new(uuid.UUID)}
case entity.FieldRecycleOptions:
case entity.FieldProps:
values[i] = new([]byte)
case entity.FieldID, entity.FieldType, entity.FieldSize, entity.FieldReferenceCount, entity.FieldStoragePolicyEntities, entity.FieldCreatedBy:
values[i] = new(sql.NullInt64)
@ -196,12 +196,12 @@ func (e *Entity) assignValues(columns []string, values []any) error {
e.UploadSessionID = new(uuid.UUID)
*e.UploadSessionID = *value.S.(*uuid.UUID)
}
case entity.FieldRecycleOptions:
case entity.FieldProps:
if value, ok := values[i].(*[]byte); !ok {
return fmt.Errorf("unexpected type %T for field recycle_options", values[i])
return fmt.Errorf("unexpected type %T for field props", values[i])
} else if value != nil && len(*value) > 0 {
if err := json.Unmarshal(*value, &e.RecycleOptions); err != nil {
return fmt.Errorf("unmarshal field recycle_options: %w", err)
if err := json.Unmarshal(*value, &e.Props); err != nil {
return fmt.Errorf("unmarshal field props: %w", err)
}
}
default:
@ -289,8 +289,8 @@ func (e *Entity) String() string {
builder.WriteString(fmt.Sprintf("%v", *v))
}
builder.WriteString(", ")
builder.WriteString("recycle_options=")
builder.WriteString(fmt.Sprintf("%v", e.RecycleOptions))
builder.WriteString("props=")
builder.WriteString(fmt.Sprintf("%v", e.Props))
builder.WriteByte(')')
return builder.String()
}

View File

@ -35,8 +35,8 @@ const (
FieldCreatedBy = "created_by"
// FieldUploadSessionID holds the string denoting the upload_session_id field in the database.
FieldUploadSessionID = "upload_session_id"
// FieldRecycleOptions holds the string denoting the recycle_options field in the database.
FieldRecycleOptions = "recycle_options"
// FieldProps holds the string denoting the props field in the database.
FieldProps = "recycle_options"
// EdgeFile holds the string denoting the file edge name in mutations.
EdgeFile = "file"
// EdgeUser holds the string denoting the user edge name in mutations.
@ -79,7 +79,7 @@ var Columns = []string{
FieldStoragePolicyEntities,
FieldCreatedBy,
FieldUploadSessionID,
FieldRecycleOptions,
FieldProps,
}
var (

View File

@ -521,14 +521,14 @@ func UploadSessionIDNotNil() predicate.Entity {
return predicate.Entity(sql.FieldNotNull(FieldUploadSessionID))
}
// RecycleOptionsIsNil applies the IsNil predicate on the "recycle_options" field.
func RecycleOptionsIsNil() predicate.Entity {
return predicate.Entity(sql.FieldIsNull(FieldRecycleOptions))
// PropsIsNil applies the IsNil predicate on the "props" field.
func PropsIsNil() predicate.Entity {
return predicate.Entity(sql.FieldIsNull(FieldProps))
}
// RecycleOptionsNotNil applies the NotNil predicate on the "recycle_options" field.
func RecycleOptionsNotNil() predicate.Entity {
return predicate.Entity(sql.FieldNotNull(FieldRecycleOptions))
// PropsNotNil applies the NotNil predicate on the "props" field.
func PropsNotNil() predicate.Entity {
return predicate.Entity(sql.FieldNotNull(FieldProps))
}
// HasFile applies the HasEdge predicate on the "file" edge.

View File

@ -135,9 +135,9 @@ func (ec *EntityCreate) SetNillableUploadSessionID(u *uuid.UUID) *EntityCreate {
return ec
}
// SetRecycleOptions sets the "recycle_options" field.
func (ec *EntityCreate) SetRecycleOptions(tro *types.EntityRecycleOption) *EntityCreate {
ec.mutation.SetRecycleOptions(tro)
// SetProps sets the "props" field.
func (ec *EntityCreate) SetProps(tp *types.EntityProps) *EntityCreate {
ec.mutation.SetProps(tp)
return ec
}
@ -336,9 +336,9 @@ func (ec *EntityCreate) createSpec() (*Entity, *sqlgraph.CreateSpec) {
_spec.SetField(entity.FieldUploadSessionID, field.TypeUUID, value)
_node.UploadSessionID = &value
}
if value, ok := ec.mutation.RecycleOptions(); ok {
_spec.SetField(entity.FieldRecycleOptions, field.TypeJSON, value)
_node.RecycleOptions = value
if value, ok := ec.mutation.Props(); ok {
_spec.SetField(entity.FieldProps, field.TypeJSON, value)
_node.Props = value
}
if nodes := ec.mutation.FileIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
@ -586,21 +586,21 @@ func (u *EntityUpsert) ClearUploadSessionID() *EntityUpsert {
return u
}
// SetRecycleOptions sets the "recycle_options" field.
func (u *EntityUpsert) SetRecycleOptions(v *types.EntityRecycleOption) *EntityUpsert {
u.Set(entity.FieldRecycleOptions, v)
// SetProps sets the "props" field.
func (u *EntityUpsert) SetProps(v *types.EntityProps) *EntityUpsert {
u.Set(entity.FieldProps, v)
return u
}
// UpdateRecycleOptions sets the "recycle_options" field to the value that was provided on create.
func (u *EntityUpsert) UpdateRecycleOptions() *EntityUpsert {
u.SetExcluded(entity.FieldRecycleOptions)
// UpdateProps sets the "props" field to the value that was provided on create.
func (u *EntityUpsert) UpdateProps() *EntityUpsert {
u.SetExcluded(entity.FieldProps)
return u
}
// ClearRecycleOptions clears the value of the "recycle_options" field.
func (u *EntityUpsert) ClearRecycleOptions() *EntityUpsert {
u.SetNull(entity.FieldRecycleOptions)
// ClearProps clears the value of the "props" field.
func (u *EntityUpsert) ClearProps() *EntityUpsert {
u.SetNull(entity.FieldProps)
return u
}
@ -817,24 +817,24 @@ func (u *EntityUpsertOne) ClearUploadSessionID() *EntityUpsertOne {
})
}
// SetRecycleOptions sets the "recycle_options" field.
func (u *EntityUpsertOne) SetRecycleOptions(v *types.EntityRecycleOption) *EntityUpsertOne {
// SetProps sets the "props" field.
func (u *EntityUpsertOne) SetProps(v *types.EntityProps) *EntityUpsertOne {
return u.Update(func(s *EntityUpsert) {
s.SetRecycleOptions(v)
s.SetProps(v)
})
}
// UpdateRecycleOptions sets the "recycle_options" field to the value that was provided on create.
func (u *EntityUpsertOne) UpdateRecycleOptions() *EntityUpsertOne {
// UpdateProps sets the "props" field to the value that was provided on create.
func (u *EntityUpsertOne) UpdateProps() *EntityUpsertOne {
return u.Update(func(s *EntityUpsert) {
s.UpdateRecycleOptions()
s.UpdateProps()
})
}
// ClearRecycleOptions clears the value of the "recycle_options" field.
func (u *EntityUpsertOne) ClearRecycleOptions() *EntityUpsertOne {
// ClearProps clears the value of the "props" field.
func (u *EntityUpsertOne) ClearProps() *EntityUpsertOne {
return u.Update(func(s *EntityUpsert) {
s.ClearRecycleOptions()
s.ClearProps()
})
}
@ -1222,24 +1222,24 @@ func (u *EntityUpsertBulk) ClearUploadSessionID() *EntityUpsertBulk {
})
}
// SetRecycleOptions sets the "recycle_options" field.
func (u *EntityUpsertBulk) SetRecycleOptions(v *types.EntityRecycleOption) *EntityUpsertBulk {
// SetProps sets the "props" field.
func (u *EntityUpsertBulk) SetProps(v *types.EntityProps) *EntityUpsertBulk {
return u.Update(func(s *EntityUpsert) {
s.SetRecycleOptions(v)
s.SetProps(v)
})
}
// UpdateRecycleOptions sets the "recycle_options" field to the value that was provided on create.
func (u *EntityUpsertBulk) UpdateRecycleOptions() *EntityUpsertBulk {
// UpdateProps sets the "props" field to the value that was provided on create.
func (u *EntityUpsertBulk) UpdateProps() *EntityUpsertBulk {
return u.Update(func(s *EntityUpsert) {
s.UpdateRecycleOptions()
s.UpdateProps()
})
}
// ClearRecycleOptions clears the value of the "recycle_options" field.
func (u *EntityUpsertBulk) ClearRecycleOptions() *EntityUpsertBulk {
// ClearProps clears the value of the "props" field.
func (u *EntityUpsertBulk) ClearProps() *EntityUpsertBulk {
return u.Update(func(s *EntityUpsert) {
s.ClearRecycleOptions()
s.ClearProps()
})
}

View File

@ -190,15 +190,15 @@ func (eu *EntityUpdate) ClearUploadSessionID() *EntityUpdate {
return eu
}
// SetRecycleOptions sets the "recycle_options" field.
func (eu *EntityUpdate) SetRecycleOptions(tro *types.EntityRecycleOption) *EntityUpdate {
eu.mutation.SetRecycleOptions(tro)
// SetProps sets the "props" field.
func (eu *EntityUpdate) SetProps(tp *types.EntityProps) *EntityUpdate {
eu.mutation.SetProps(tp)
return eu
}
// ClearRecycleOptions clears the value of the "recycle_options" field.
func (eu *EntityUpdate) ClearRecycleOptions() *EntityUpdate {
eu.mutation.ClearRecycleOptions()
// ClearProps clears the value of the "props" field.
func (eu *EntityUpdate) ClearProps() *EntityUpdate {
eu.mutation.ClearProps()
return eu
}
@ -383,11 +383,11 @@ func (eu *EntityUpdate) sqlSave(ctx context.Context) (n int, err error) {
if eu.mutation.UploadSessionIDCleared() {
_spec.ClearField(entity.FieldUploadSessionID, field.TypeUUID)
}
if value, ok := eu.mutation.RecycleOptions(); ok {
_spec.SetField(entity.FieldRecycleOptions, field.TypeJSON, value)
if value, ok := eu.mutation.Props(); ok {
_spec.SetField(entity.FieldProps, field.TypeJSON, value)
}
if eu.mutation.RecycleOptionsCleared() {
_spec.ClearField(entity.FieldRecycleOptions, field.TypeJSON)
if eu.mutation.PropsCleared() {
_spec.ClearField(entity.FieldProps, field.TypeJSON)
}
if eu.mutation.FileCleared() {
edge := &sqlgraph.EdgeSpec{
@ -669,15 +669,15 @@ func (euo *EntityUpdateOne) ClearUploadSessionID() *EntityUpdateOne {
return euo
}
// SetRecycleOptions sets the "recycle_options" field.
func (euo *EntityUpdateOne) SetRecycleOptions(tro *types.EntityRecycleOption) *EntityUpdateOne {
euo.mutation.SetRecycleOptions(tro)
// SetProps sets the "props" field.
func (euo *EntityUpdateOne) SetProps(tp *types.EntityProps) *EntityUpdateOne {
euo.mutation.SetProps(tp)
return euo
}
// ClearRecycleOptions clears the value of the "recycle_options" field.
func (euo *EntityUpdateOne) ClearRecycleOptions() *EntityUpdateOne {
euo.mutation.ClearRecycleOptions()
// ClearProps clears the value of the "props" field.
func (euo *EntityUpdateOne) ClearProps() *EntityUpdateOne {
euo.mutation.ClearProps()
return euo
}
@ -892,11 +892,11 @@ func (euo *EntityUpdateOne) sqlSave(ctx context.Context) (_node *Entity, err err
if euo.mutation.UploadSessionIDCleared() {
_spec.ClearField(entity.FieldUploadSessionID, field.TypeUUID)
}
if value, ok := euo.mutation.RecycleOptions(); ok {
_spec.SetField(entity.FieldRecycleOptions, field.TypeJSON, value)
if value, ok := euo.mutation.Props(); ok {
_spec.SetField(entity.FieldProps, field.TypeJSON, value)
}
if euo.mutation.RecycleOptionsCleared() {
_spec.ClearField(entity.FieldRecycleOptions, field.TypeJSON)
if euo.mutation.PropsCleared() {
_spec.ClearField(entity.FieldProps, field.TypeJSON)
}
if euo.mutation.FileCleared() {
edge := &sqlgraph.EdgeSpec{

View File

@ -25,8 +25,6 @@ type File struct {
CreatedAt time.Time `json:"created_at,omitempty"`
// UpdatedAt holds the value of the "updated_at" field.
UpdatedAt time.Time `json:"updated_at,omitempty"`
// DeletedAt holds the value of the "deleted_at" field.
DeletedAt *time.Time `json:"deleted_at,omitempty"`
// Type holds the value of the "type" field.
Type int `json:"type,omitempty"`
// Name holds the value of the "name" field.
@ -171,7 +169,7 @@ func (*File) scanValues(columns []string) ([]any, error) {
values[i] = new(sql.NullInt64)
case file.FieldName:
values[i] = new(sql.NullString)
case file.FieldCreatedAt, file.FieldUpdatedAt, file.FieldDeletedAt:
case file.FieldCreatedAt, file.FieldUpdatedAt:
values[i] = new(sql.NullTime)
default:
values[i] = new(sql.UnknownType)
@ -206,13 +204,6 @@ func (f *File) assignValues(columns []string, values []any) error {
} else if value.Valid {
f.UpdatedAt = value.Time
}
case file.FieldDeletedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field deleted_at", values[i])
} else if value.Valid {
f.DeletedAt = new(time.Time)
*f.DeletedAt = value.Time
}
case file.FieldType:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field type", values[i])
@ -351,11 +342,6 @@ func (f *File) String() string {
builder.WriteString("updated_at=")
builder.WriteString(f.UpdatedAt.Format(time.ANSIC))
builder.WriteString(", ")
if v := f.DeletedAt; v != nil {
builder.WriteString("deleted_at=")
builder.WriteString(v.Format(time.ANSIC))
}
builder.WriteString(", ")
builder.WriteString("type=")
builder.WriteString(fmt.Sprintf("%v", f.Type))
builder.WriteString(", ")

View File

@ -19,8 +19,6 @@ const (
FieldCreatedAt = "created_at"
// FieldUpdatedAt holds the string denoting the updated_at field in the database.
FieldUpdatedAt = "updated_at"
// FieldDeletedAt holds the string denoting the deleted_at field in the database.
FieldDeletedAt = "deleted_at"
// FieldType holds the string denoting the type field in the database.
FieldType = "type"
// FieldName holds the string denoting the name field in the database.
@ -112,7 +110,6 @@ var Columns = []string{
FieldID,
FieldCreatedAt,
FieldUpdatedAt,
FieldDeletedAt,
FieldType,
FieldName,
FieldOwnerID,
@ -146,14 +143,11 @@ func ValidColumn(column string) bool {
//
// import _ "github.com/cloudreve/Cloudreve/v4/ent/runtime"
var (
Hooks [1]ent.Hook
Interceptors [1]ent.Interceptor
Hooks [1]ent.Hook
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
DefaultCreatedAt func() time.Time
// DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
DefaultUpdatedAt func() time.Time
// UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
UpdateDefaultUpdatedAt func() time.Time
// DefaultSize holds the default value on creation for the "size" field.
DefaultSize int64
// DefaultIsSymbolic holds the default value on creation for the "is_symbolic" field.
@ -178,11 +172,6 @@ func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
}
// ByDeletedAt orders the results by the deleted_at field.
func ByDeletedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldDeletedAt, opts...).ToFunc()
}
// ByType orders the results by the type field.
func ByType(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldType, opts...).ToFunc()

View File

@ -65,11 +65,6 @@ func UpdatedAt(v time.Time) predicate.File {
return predicate.File(sql.FieldEQ(FieldUpdatedAt, v))
}
// DeletedAt applies equality check predicate on the "deleted_at" field. It's identical to DeletedAtEQ.
func DeletedAt(v time.Time) predicate.File {
return predicate.File(sql.FieldEQ(FieldDeletedAt, v))
}
// Type applies equality check predicate on the "type" field. It's identical to TypeEQ.
func Type(v int) predicate.File {
return predicate.File(sql.FieldEQ(FieldType, v))
@ -190,56 +185,6 @@ func UpdatedAtLTE(v time.Time) predicate.File {
return predicate.File(sql.FieldLTE(FieldUpdatedAt, v))
}
// DeletedAtEQ applies the EQ predicate on the "deleted_at" field.
func DeletedAtEQ(v time.Time) predicate.File {
return predicate.File(sql.FieldEQ(FieldDeletedAt, v))
}
// DeletedAtNEQ applies the NEQ predicate on the "deleted_at" field.
func DeletedAtNEQ(v time.Time) predicate.File {
return predicate.File(sql.FieldNEQ(FieldDeletedAt, v))
}
// DeletedAtIn applies the In predicate on the "deleted_at" field.
func DeletedAtIn(vs ...time.Time) predicate.File {
return predicate.File(sql.FieldIn(FieldDeletedAt, vs...))
}
// DeletedAtNotIn applies the NotIn predicate on the "deleted_at" field.
func DeletedAtNotIn(vs ...time.Time) predicate.File {
return predicate.File(sql.FieldNotIn(FieldDeletedAt, vs...))
}
// DeletedAtGT applies the GT predicate on the "deleted_at" field.
func DeletedAtGT(v time.Time) predicate.File {
return predicate.File(sql.FieldGT(FieldDeletedAt, v))
}
// DeletedAtGTE applies the GTE predicate on the "deleted_at" field.
func DeletedAtGTE(v time.Time) predicate.File {
return predicate.File(sql.FieldGTE(FieldDeletedAt, v))
}
// DeletedAtLT applies the LT predicate on the "deleted_at" field.
func DeletedAtLT(v time.Time) predicate.File {
return predicate.File(sql.FieldLT(FieldDeletedAt, v))
}
// DeletedAtLTE applies the LTE predicate on the "deleted_at" field.
func DeletedAtLTE(v time.Time) predicate.File {
return predicate.File(sql.FieldLTE(FieldDeletedAt, v))
}
// DeletedAtIsNil applies the IsNil predicate on the "deleted_at" field.
func DeletedAtIsNil() predicate.File {
return predicate.File(sql.FieldIsNull(FieldDeletedAt))
}
// DeletedAtNotNil applies the NotNil predicate on the "deleted_at" field.
func DeletedAtNotNil() predicate.File {
return predicate.File(sql.FieldNotNull(FieldDeletedAt))
}
// TypeEQ applies the EQ predicate on the "type" field.
func TypeEQ(v int) predicate.File {
return predicate.File(sql.FieldEQ(FieldType, v))

View File

@ -57,20 +57,6 @@ func (fc *FileCreate) SetNillableUpdatedAt(t *time.Time) *FileCreate {
return fc
}
// SetDeletedAt sets the "deleted_at" field.
func (fc *FileCreate) SetDeletedAt(t time.Time) *FileCreate {
fc.mutation.SetDeletedAt(t)
return fc
}
// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil.
func (fc *FileCreate) SetNillableDeletedAt(t *time.Time) *FileCreate {
if t != nil {
fc.SetDeletedAt(*t)
}
return fc
}
// SetType sets the "type" field.
func (fc *FileCreate) SetType(i int) *FileCreate {
fc.mutation.SetType(i)
@ -413,10 +399,6 @@ func (fc *FileCreate) createSpec() (*File, *sqlgraph.CreateSpec) {
_spec.SetField(file.FieldUpdatedAt, field.TypeTime, value)
_node.UpdatedAt = value
}
if value, ok := fc.mutation.DeletedAt(); ok {
_spec.SetField(file.FieldDeletedAt, field.TypeTime, value)
_node.DeletedAt = &value
}
if value, ok := fc.mutation.GetType(); ok {
_spec.SetField(file.FieldType, field.TypeInt, value)
_node.Type = value
@ -636,24 +618,6 @@ func (u *FileUpsert) UpdateUpdatedAt() *FileUpsert {
return u
}
// SetDeletedAt sets the "deleted_at" field.
func (u *FileUpsert) SetDeletedAt(v time.Time) *FileUpsert {
u.Set(file.FieldDeletedAt, v)
return u
}
// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create.
func (u *FileUpsert) UpdateDeletedAt() *FileUpsert {
u.SetExcluded(file.FieldDeletedAt)
return u
}
// ClearDeletedAt clears the value of the "deleted_at" field.
func (u *FileUpsert) ClearDeletedAt() *FileUpsert {
u.SetNull(file.FieldDeletedAt)
return u
}
// SetType sets the "type" field.
func (u *FileUpsert) SetType(v int) *FileUpsert {
u.Set(file.FieldType, v)
@ -863,27 +827,6 @@ func (u *FileUpsertOne) UpdateUpdatedAt() *FileUpsertOne {
})
}
// SetDeletedAt sets the "deleted_at" field.
func (u *FileUpsertOne) SetDeletedAt(v time.Time) *FileUpsertOne {
return u.Update(func(s *FileUpsert) {
s.SetDeletedAt(v)
})
}
// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create.
func (u *FileUpsertOne) UpdateDeletedAt() *FileUpsertOne {
return u.Update(func(s *FileUpsert) {
s.UpdateDeletedAt()
})
}
// ClearDeletedAt clears the value of the "deleted_at" field.
func (u *FileUpsertOne) ClearDeletedAt() *FileUpsertOne {
return u.Update(func(s *FileUpsert) {
s.ClearDeletedAt()
})
}
// SetType sets the "type" field.
func (u *FileUpsertOne) SetType(v int) *FileUpsertOne {
return u.Update(func(s *FileUpsert) {
@ -1289,27 +1232,6 @@ func (u *FileUpsertBulk) UpdateUpdatedAt() *FileUpsertBulk {
})
}
// SetDeletedAt sets the "deleted_at" field.
func (u *FileUpsertBulk) SetDeletedAt(v time.Time) *FileUpsertBulk {
return u.Update(func(s *FileUpsert) {
s.SetDeletedAt(v)
})
}
// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create.
func (u *FileUpsertBulk) UpdateDeletedAt() *FileUpsertBulk {
return u.Update(func(s *FileUpsert) {
s.UpdateDeletedAt()
})
}
// ClearDeletedAt clears the value of the "deleted_at" field.
func (u *FileUpsertBulk) ClearDeletedAt() *FileUpsertBulk {
return u.Update(func(s *FileUpsert) {
s.ClearDeletedAt()
})
}
// SetType sets the "type" field.
func (u *FileUpsertBulk) SetType(v int) *FileUpsertBulk {
return u.Update(func(s *FileUpsert) {

View File

@ -41,26 +41,14 @@ func (fu *FileUpdate) SetUpdatedAt(t time.Time) *FileUpdate {
return fu
}
// SetDeletedAt sets the "deleted_at" field.
func (fu *FileUpdate) SetDeletedAt(t time.Time) *FileUpdate {
fu.mutation.SetDeletedAt(t)
return fu
}
// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil.
func (fu *FileUpdate) SetNillableDeletedAt(t *time.Time) *FileUpdate {
// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil.
func (fu *FileUpdate) SetNillableUpdatedAt(t *time.Time) *FileUpdate {
if t != nil {
fu.SetDeletedAt(*t)
fu.SetUpdatedAt(*t)
}
return fu
}
// ClearDeletedAt clears the value of the "deleted_at" field.
func (fu *FileUpdate) ClearDeletedAt() *FileUpdate {
fu.mutation.ClearDeletedAt()
return fu
}
// SetType sets the "type" field.
func (fu *FileUpdate) SetType(i int) *FileUpdate {
fu.mutation.ResetType()
@ -472,9 +460,6 @@ func (fu *FileUpdate) RemoveDirectLinks(d ...*DirectLink) *FileUpdate {
// Save executes the query and returns the number of nodes affected by the update operation.
func (fu *FileUpdate) Save(ctx context.Context) (int, error) {
if err := fu.defaults(); err != nil {
return 0, err
}
return withHooks(ctx, fu.sqlSave, fu.mutation, fu.hooks)
}
@ -500,18 +485,6 @@ func (fu *FileUpdate) ExecX(ctx context.Context) {
}
}
// defaults sets the default values of the builder before save.
func (fu *FileUpdate) defaults() error {
if _, ok := fu.mutation.UpdatedAt(); !ok {
if file.UpdateDefaultUpdatedAt == nil {
return fmt.Errorf("ent: uninitialized file.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)")
}
v := file.UpdateDefaultUpdatedAt()
fu.mutation.SetUpdatedAt(v)
}
return nil
}
// check runs all checks and user-defined validators on the builder.
func (fu *FileUpdate) check() error {
if _, ok := fu.mutation.OwnerID(); fu.mutation.OwnerCleared() && !ok {
@ -535,12 +508,6 @@ func (fu *FileUpdate) sqlSave(ctx context.Context) (n int, err error) {
if value, ok := fu.mutation.UpdatedAt(); ok {
_spec.SetField(file.FieldUpdatedAt, field.TypeTime, value)
}
if value, ok := fu.mutation.DeletedAt(); ok {
_spec.SetField(file.FieldDeletedAt, field.TypeTime, value)
}
if fu.mutation.DeletedAtCleared() {
_spec.ClearField(file.FieldDeletedAt, field.TypeTime)
}
if value, ok := fu.mutation.GetType(); ok {
_spec.SetField(file.FieldType, field.TypeInt, value)
}
@ -912,26 +879,14 @@ func (fuo *FileUpdateOne) SetUpdatedAt(t time.Time) *FileUpdateOne {
return fuo
}
// SetDeletedAt sets the "deleted_at" field.
func (fuo *FileUpdateOne) SetDeletedAt(t time.Time) *FileUpdateOne {
fuo.mutation.SetDeletedAt(t)
return fuo
}
// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil.
func (fuo *FileUpdateOne) SetNillableDeletedAt(t *time.Time) *FileUpdateOne {
// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil.
func (fuo *FileUpdateOne) SetNillableUpdatedAt(t *time.Time) *FileUpdateOne {
if t != nil {
fuo.SetDeletedAt(*t)
fuo.SetUpdatedAt(*t)
}
return fuo
}
// ClearDeletedAt clears the value of the "deleted_at" field.
func (fuo *FileUpdateOne) ClearDeletedAt() *FileUpdateOne {
fuo.mutation.ClearDeletedAt()
return fuo
}
// SetType sets the "type" field.
func (fuo *FileUpdateOne) SetType(i int) *FileUpdateOne {
fuo.mutation.ResetType()
@ -1356,9 +1311,6 @@ func (fuo *FileUpdateOne) Select(field string, fields ...string) *FileUpdateOne
// Save executes the query and returns the updated File entity.
func (fuo *FileUpdateOne) Save(ctx context.Context) (*File, error) {
if err := fuo.defaults(); err != nil {
return nil, err
}
return withHooks(ctx, fuo.sqlSave, fuo.mutation, fuo.hooks)
}
@ -1384,18 +1336,6 @@ func (fuo *FileUpdateOne) ExecX(ctx context.Context) {
}
}
// defaults sets the default values of the builder before save.
func (fuo *FileUpdateOne) defaults() error {
if _, ok := fuo.mutation.UpdatedAt(); !ok {
if file.UpdateDefaultUpdatedAt == nil {
return fmt.Errorf("ent: uninitialized file.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)")
}
v := file.UpdateDefaultUpdatedAt()
fuo.mutation.SetUpdatedAt(v)
}
return nil
}
// check runs all checks and user-defined validators on the builder.
func (fuo *FileUpdateOne) check() error {
if _, ok := fuo.mutation.OwnerID(); fuo.mutation.OwnerCleared() && !ok {
@ -1436,12 +1376,6 @@ func (fuo *FileUpdateOne) sqlSave(ctx context.Context) (_node *File, err error)
if value, ok := fuo.mutation.UpdatedAt(); ok {
_spec.SetField(file.FieldUpdatedAt, field.TypeTime, value)
}
if value, ok := fuo.mutation.DeletedAt(); ok {
_spec.SetField(file.FieldDeletedAt, field.TypeTime, value)
}
if fuo.mutation.DeletedAtCleared() {
_spec.ClearField(file.FieldDeletedAt, field.TypeTime)
}
if value, ok := fuo.mutation.GetType(); ok {
_spec.SetField(file.FieldType, field.TypeInt, value)
}

204
ent/fsevent.go Normal file
View File

@ -0,0 +1,204 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"fmt"
"strings"
"time"
"entgo.io/ent"
"entgo.io/ent/dialect/sql"
"github.com/cloudreve/Cloudreve/v4/ent/fsevent"
"github.com/cloudreve/Cloudreve/v4/ent/user"
"github.com/gofrs/uuid"
)
// FsEvent is the model entity for the FsEvent schema.
type FsEvent struct {
config `json:"-"`
// ID of the ent.
ID int `json:"id,omitempty"`
// CreatedAt holds the value of the "created_at" field.
CreatedAt time.Time `json:"created_at,omitempty"`
// UpdatedAt holds the value of the "updated_at" field.
UpdatedAt time.Time `json:"updated_at,omitempty"`
// DeletedAt holds the value of the "deleted_at" field.
DeletedAt *time.Time `json:"deleted_at,omitempty"`
// Event holds the value of the "event" field.
Event string `json:"event,omitempty"`
// Subscriber holds the value of the "subscriber" field.
Subscriber uuid.UUID `json:"subscriber,omitempty"`
// UserFsevent holds the value of the "user_fsevent" field.
UserFsevent int `json:"user_fsevent,omitempty"`
// Edges holds the relations/edges for other nodes in the graph.
// The values are being populated by the FsEventQuery when eager-loading is set.
Edges FsEventEdges `json:"edges"`
selectValues sql.SelectValues
}
// FsEventEdges holds the relations/edges for other nodes in the graph.
type FsEventEdges struct {
// User holds the value of the user edge.
User *User `json:"user,omitempty"`
// loadedTypes holds the information for reporting if a
// type was loaded (or requested) in eager-loading or not.
loadedTypes [1]bool
}
// UserOrErr returns the User value or an error if the edge
// was not loaded in eager-loading, or loaded but was not found.
func (e FsEventEdges) UserOrErr() (*User, error) {
if e.loadedTypes[0] {
if e.User == nil {
// Edge was loaded but was not found.
return nil, &NotFoundError{label: user.Label}
}
return e.User, nil
}
return nil, &NotLoadedError{edge: "user"}
}
// scanValues returns the types for scanning values from sql.Rows.
func (*FsEvent) scanValues(columns []string) ([]any, error) {
values := make([]any, len(columns))
for i := range columns {
switch columns[i] {
case fsevent.FieldID, fsevent.FieldUserFsevent:
values[i] = new(sql.NullInt64)
case fsevent.FieldEvent:
values[i] = new(sql.NullString)
case fsevent.FieldCreatedAt, fsevent.FieldUpdatedAt, fsevent.FieldDeletedAt:
values[i] = new(sql.NullTime)
case fsevent.FieldSubscriber:
values[i] = new(uuid.UUID)
default:
values[i] = new(sql.UnknownType)
}
}
return values, nil
}
// assignValues assigns the values that were returned from sql.Rows (after scanning)
// to the FsEvent fields.
func (fe *FsEvent) assignValues(columns []string, values []any) error {
if m, n := len(values), len(columns); m < n {
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
}
for i := range columns {
switch columns[i] {
case fsevent.FieldID:
value, ok := values[i].(*sql.NullInt64)
if !ok {
return fmt.Errorf("unexpected type %T for field id", value)
}
fe.ID = int(value.Int64)
case fsevent.FieldCreatedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field created_at", values[i])
} else if value.Valid {
fe.CreatedAt = value.Time
}
case fsevent.FieldUpdatedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field updated_at", values[i])
} else if value.Valid {
fe.UpdatedAt = value.Time
}
case fsevent.FieldDeletedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field deleted_at", values[i])
} else if value.Valid {
fe.DeletedAt = new(time.Time)
*fe.DeletedAt = value.Time
}
case fsevent.FieldEvent:
if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field event", values[i])
} else if value.Valid {
fe.Event = value.String
}
case fsevent.FieldSubscriber:
if value, ok := values[i].(*uuid.UUID); !ok {
return fmt.Errorf("unexpected type %T for field subscriber", values[i])
} else if value != nil {
fe.Subscriber = *value
}
case fsevent.FieldUserFsevent:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field user_fsevent", values[i])
} else if value.Valid {
fe.UserFsevent = int(value.Int64)
}
default:
fe.selectValues.Set(columns[i], values[i])
}
}
return nil
}
// Value returns the ent.Value that was dynamically selected and assigned to the FsEvent.
// This includes values selected through modifiers, order, etc.
func (fe *FsEvent) Value(name string) (ent.Value, error) {
return fe.selectValues.Get(name)
}
// QueryUser queries the "user" edge of the FsEvent entity.
func (fe *FsEvent) QueryUser() *UserQuery {
return NewFsEventClient(fe.config).QueryUser(fe)
}
// Update returns a builder for updating this FsEvent.
// Note that you need to call FsEvent.Unwrap() before calling this method if this FsEvent
// was returned from a transaction, and the transaction was committed or rolled back.
func (fe *FsEvent) Update() *FsEventUpdateOne {
return NewFsEventClient(fe.config).UpdateOne(fe)
}
// Unwrap unwraps the FsEvent entity that was returned from a transaction after it was closed,
// so that all future queries will be executed through the driver which created the transaction.
func (fe *FsEvent) Unwrap() *FsEvent {
_tx, ok := fe.config.driver.(*txDriver)
if !ok {
panic("ent: FsEvent is not a transactional entity")
}
fe.config.driver = _tx.drv
return fe
}
// String implements the fmt.Stringer.
func (fe *FsEvent) String() string {
var builder strings.Builder
builder.WriteString("FsEvent(")
builder.WriteString(fmt.Sprintf("id=%v, ", fe.ID))
builder.WriteString("created_at=")
builder.WriteString(fe.CreatedAt.Format(time.ANSIC))
builder.WriteString(", ")
builder.WriteString("updated_at=")
builder.WriteString(fe.UpdatedAt.Format(time.ANSIC))
builder.WriteString(", ")
if v := fe.DeletedAt; v != nil {
builder.WriteString("deleted_at=")
builder.WriteString(v.Format(time.ANSIC))
}
builder.WriteString(", ")
builder.WriteString("event=")
builder.WriteString(fe.Event)
builder.WriteString(", ")
builder.WriteString("subscriber=")
builder.WriteString(fmt.Sprintf("%v", fe.Subscriber))
builder.WriteString(", ")
builder.WriteString("user_fsevent=")
builder.WriteString(fmt.Sprintf("%v", fe.UserFsevent))
builder.WriteByte(')')
return builder.String()
}
// SetUser manually set the edge as loaded state.
func (e *FsEvent) SetUser(v *User) {
e.Edges.User = v
e.Edges.loadedTypes[0] = true
}
// FsEvents is a parsable slice of FsEvent.
type FsEvents []*FsEvent

130
ent/fsevent/fsevent.go Normal file
View File

@ -0,0 +1,130 @@
// Code generated by ent, DO NOT EDIT.
package fsevent
import (
"time"
"entgo.io/ent"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
)
const (
// Label holds the string label denoting the fsevent type in the database.
Label = "fs_event"
// FieldID holds the string denoting the id field in the database.
FieldID = "id"
// FieldCreatedAt holds the string denoting the created_at field in the database.
FieldCreatedAt = "created_at"
// FieldUpdatedAt holds the string denoting the updated_at field in the database.
FieldUpdatedAt = "updated_at"
// FieldDeletedAt holds the string denoting the deleted_at field in the database.
FieldDeletedAt = "deleted_at"
// FieldEvent holds the string denoting the event field in the database.
FieldEvent = "event"
// FieldSubscriber holds the string denoting the subscriber field in the database.
FieldSubscriber = "subscriber"
// FieldUserFsevent holds the string denoting the user_fsevent field in the database.
FieldUserFsevent = "user_fsevent"
// EdgeUser holds the string denoting the user edge name in mutations.
EdgeUser = "user"
// Table holds the table name of the fsevent in the database.
Table = "fs_events"
// UserTable is the table that holds the user relation/edge.
UserTable = "fs_events"
// UserInverseTable is the table name for the User entity.
// It exists in this package in order to avoid circular dependency with the "user" package.
UserInverseTable = "users"
// UserColumn is the table column denoting the user relation/edge.
UserColumn = "user_fsevent"
)
// Columns holds all SQL columns for fsevent fields.
var Columns = []string{
FieldID,
FieldCreatedAt,
FieldUpdatedAt,
FieldDeletedAt,
FieldEvent,
FieldSubscriber,
FieldUserFsevent,
}
// ValidColumn reports if the column name is valid (part of the table columns).
func ValidColumn(column string) bool {
for i := range Columns {
if column == Columns[i] {
return true
}
}
return false
}
// Note that the variables below are initialized by the runtime
// package on the initialization of the application. Therefore,
// it should be imported in the main as follows:
//
// import _ "github.com/cloudreve/Cloudreve/v4/ent/runtime"
var (
Hooks [1]ent.Hook
Interceptors [1]ent.Interceptor
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
DefaultCreatedAt func() time.Time
// DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
DefaultUpdatedAt func() time.Time
// UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
UpdateDefaultUpdatedAt func() time.Time
)
// OrderOption defines the ordering options for the FsEvent queries.
type OrderOption func(*sql.Selector)
// ByID orders the results by the id field.
func ByID(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldID, opts...).ToFunc()
}
// ByCreatedAt orders the results by the created_at field.
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
}
// ByUpdatedAt orders the results by the updated_at field.
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
}
// ByDeletedAt orders the results by the deleted_at field.
func ByDeletedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldDeletedAt, opts...).ToFunc()
}
// ByEvent orders the results by the event field.
func ByEvent(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldEvent, opts...).ToFunc()
}
// BySubscriber orders the results by the subscriber field.
func BySubscriber(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldSubscriber, opts...).ToFunc()
}
// ByUserFsevent orders the results by the user_fsevent field.
func ByUserFsevent(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldUserFsevent, opts...).ToFunc()
}
// ByUserField orders the results by user field.
func ByUserField(field string, opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newUserStep(), sql.OrderByField(field, opts...))
}
}
func newUserStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(UserInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn),
)
}

390
ent/fsevent/where.go Normal file
View File

@ -0,0 +1,390 @@
// Code generated by ent, DO NOT EDIT.
package fsevent
import (
"time"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
"github.com/gofrs/uuid"
)
// ID filters vertices based on their ID field.
func ID(id int) predicate.FsEvent {
return predicate.FsEvent(sql.FieldEQ(FieldID, id))
}
// IDEQ applies the EQ predicate on the ID field.
func IDEQ(id int) predicate.FsEvent {
return predicate.FsEvent(sql.FieldEQ(FieldID, id))
}
// IDNEQ applies the NEQ predicate on the ID field.
func IDNEQ(id int) predicate.FsEvent {
return predicate.FsEvent(sql.FieldNEQ(FieldID, id))
}
// IDIn applies the In predicate on the ID field.
func IDIn(ids ...int) predicate.FsEvent {
return predicate.FsEvent(sql.FieldIn(FieldID, ids...))
}
// IDNotIn applies the NotIn predicate on the ID field.
func IDNotIn(ids ...int) predicate.FsEvent {
return predicate.FsEvent(sql.FieldNotIn(FieldID, ids...))
}
// IDGT applies the GT predicate on the ID field.
func IDGT(id int) predicate.FsEvent {
return predicate.FsEvent(sql.FieldGT(FieldID, id))
}
// IDGTE applies the GTE predicate on the ID field.
func IDGTE(id int) predicate.FsEvent {
return predicate.FsEvent(sql.FieldGTE(FieldID, id))
}
// IDLT applies the LT predicate on the ID field.
func IDLT(id int) predicate.FsEvent {
return predicate.FsEvent(sql.FieldLT(FieldID, id))
}
// IDLTE applies the LTE predicate on the ID field.
func IDLTE(id int) predicate.FsEvent {
return predicate.FsEvent(sql.FieldLTE(FieldID, id))
}
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
func CreatedAt(v time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldEQ(FieldCreatedAt, v))
}
// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
func UpdatedAt(v time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldEQ(FieldUpdatedAt, v))
}
// DeletedAt applies equality check predicate on the "deleted_at" field. It's identical to DeletedAtEQ.
func DeletedAt(v time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldEQ(FieldDeletedAt, v))
}
// Event applies equality check predicate on the "event" field. It's identical to EventEQ.
func Event(v string) predicate.FsEvent {
return predicate.FsEvent(sql.FieldEQ(FieldEvent, v))
}
// Subscriber applies equality check predicate on the "subscriber" field. It's identical to SubscriberEQ.
func Subscriber(v uuid.UUID) predicate.FsEvent {
return predicate.FsEvent(sql.FieldEQ(FieldSubscriber, v))
}
// UserFsevent applies equality check predicate on the "user_fsevent" field. It's identical to UserFseventEQ.
func UserFsevent(v int) predicate.FsEvent {
return predicate.FsEvent(sql.FieldEQ(FieldUserFsevent, v))
}
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
func CreatedAtEQ(v time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldEQ(FieldCreatedAt, v))
}
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
func CreatedAtNEQ(v time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldNEQ(FieldCreatedAt, v))
}
// CreatedAtIn applies the In predicate on the "created_at" field.
func CreatedAtIn(vs ...time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldIn(FieldCreatedAt, vs...))
}
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
func CreatedAtNotIn(vs ...time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldNotIn(FieldCreatedAt, vs...))
}
// CreatedAtGT applies the GT predicate on the "created_at" field.
func CreatedAtGT(v time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldGT(FieldCreatedAt, v))
}
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
func CreatedAtGTE(v time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldGTE(FieldCreatedAt, v))
}
// CreatedAtLT applies the LT predicate on the "created_at" field.
func CreatedAtLT(v time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldLT(FieldCreatedAt, v))
}
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
func CreatedAtLTE(v time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldLTE(FieldCreatedAt, v))
}
// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
func UpdatedAtEQ(v time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldEQ(FieldUpdatedAt, v))
}
// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
func UpdatedAtNEQ(v time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldNEQ(FieldUpdatedAt, v))
}
// UpdatedAtIn applies the In predicate on the "updated_at" field.
func UpdatedAtIn(vs ...time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldIn(FieldUpdatedAt, vs...))
}
// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
func UpdatedAtNotIn(vs ...time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldNotIn(FieldUpdatedAt, vs...))
}
// UpdatedAtGT applies the GT predicate on the "updated_at" field.
func UpdatedAtGT(v time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldGT(FieldUpdatedAt, v))
}
// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
func UpdatedAtGTE(v time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldGTE(FieldUpdatedAt, v))
}
// UpdatedAtLT applies the LT predicate on the "updated_at" field.
func UpdatedAtLT(v time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldLT(FieldUpdatedAt, v))
}
// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
func UpdatedAtLTE(v time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldLTE(FieldUpdatedAt, v))
}
// DeletedAtEQ applies the EQ predicate on the "deleted_at" field.
func DeletedAtEQ(v time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldEQ(FieldDeletedAt, v))
}
// DeletedAtNEQ applies the NEQ predicate on the "deleted_at" field.
func DeletedAtNEQ(v time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldNEQ(FieldDeletedAt, v))
}
// DeletedAtIn applies the In predicate on the "deleted_at" field.
func DeletedAtIn(vs ...time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldIn(FieldDeletedAt, vs...))
}
// DeletedAtNotIn applies the NotIn predicate on the "deleted_at" field.
func DeletedAtNotIn(vs ...time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldNotIn(FieldDeletedAt, vs...))
}
// DeletedAtGT applies the GT predicate on the "deleted_at" field.
func DeletedAtGT(v time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldGT(FieldDeletedAt, v))
}
// DeletedAtGTE applies the GTE predicate on the "deleted_at" field.
func DeletedAtGTE(v time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldGTE(FieldDeletedAt, v))
}
// DeletedAtLT applies the LT predicate on the "deleted_at" field.
func DeletedAtLT(v time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldLT(FieldDeletedAt, v))
}
// DeletedAtLTE applies the LTE predicate on the "deleted_at" field.
func DeletedAtLTE(v time.Time) predicate.FsEvent {
return predicate.FsEvent(sql.FieldLTE(FieldDeletedAt, v))
}
// DeletedAtIsNil applies the IsNil predicate on the "deleted_at" field.
func DeletedAtIsNil() predicate.FsEvent {
return predicate.FsEvent(sql.FieldIsNull(FieldDeletedAt))
}
// DeletedAtNotNil applies the NotNil predicate on the "deleted_at" field.
func DeletedAtNotNil() predicate.FsEvent {
return predicate.FsEvent(sql.FieldNotNull(FieldDeletedAt))
}
// EventEQ applies the EQ predicate on the "event" field.
func EventEQ(v string) predicate.FsEvent {
return predicate.FsEvent(sql.FieldEQ(FieldEvent, v))
}
// EventNEQ applies the NEQ predicate on the "event" field.
func EventNEQ(v string) predicate.FsEvent {
return predicate.FsEvent(sql.FieldNEQ(FieldEvent, v))
}
// EventIn applies the In predicate on the "event" field.
func EventIn(vs ...string) predicate.FsEvent {
return predicate.FsEvent(sql.FieldIn(FieldEvent, vs...))
}
// EventNotIn applies the NotIn predicate on the "event" field.
func EventNotIn(vs ...string) predicate.FsEvent {
return predicate.FsEvent(sql.FieldNotIn(FieldEvent, vs...))
}
// EventGT applies the GT predicate on the "event" field.
func EventGT(v string) predicate.FsEvent {
return predicate.FsEvent(sql.FieldGT(FieldEvent, v))
}
// EventGTE applies the GTE predicate on the "event" field.
func EventGTE(v string) predicate.FsEvent {
return predicate.FsEvent(sql.FieldGTE(FieldEvent, v))
}
// EventLT applies the LT predicate on the "event" field.
func EventLT(v string) predicate.FsEvent {
return predicate.FsEvent(sql.FieldLT(FieldEvent, v))
}
// EventLTE applies the LTE predicate on the "event" field.
func EventLTE(v string) predicate.FsEvent {
return predicate.FsEvent(sql.FieldLTE(FieldEvent, v))
}
// EventContains applies the Contains predicate on the "event" field.
func EventContains(v string) predicate.FsEvent {
return predicate.FsEvent(sql.FieldContains(FieldEvent, v))
}
// EventHasPrefix applies the HasPrefix predicate on the "event" field.
func EventHasPrefix(v string) predicate.FsEvent {
return predicate.FsEvent(sql.FieldHasPrefix(FieldEvent, v))
}
// EventHasSuffix applies the HasSuffix predicate on the "event" field.
func EventHasSuffix(v string) predicate.FsEvent {
return predicate.FsEvent(sql.FieldHasSuffix(FieldEvent, v))
}
// EventEqualFold applies the EqualFold predicate on the "event" field.
func EventEqualFold(v string) predicate.FsEvent {
return predicate.FsEvent(sql.FieldEqualFold(FieldEvent, v))
}
// EventContainsFold applies the ContainsFold predicate on the "event" field.
func EventContainsFold(v string) predicate.FsEvent {
return predicate.FsEvent(sql.FieldContainsFold(FieldEvent, v))
}
// SubscriberEQ applies the EQ predicate on the "subscriber" field.
func SubscriberEQ(v uuid.UUID) predicate.FsEvent {
return predicate.FsEvent(sql.FieldEQ(FieldSubscriber, v))
}
// SubscriberNEQ applies the NEQ predicate on the "subscriber" field.
func SubscriberNEQ(v uuid.UUID) predicate.FsEvent {
return predicate.FsEvent(sql.FieldNEQ(FieldSubscriber, v))
}
// SubscriberIn applies the In predicate on the "subscriber" field.
func SubscriberIn(vs ...uuid.UUID) predicate.FsEvent {
return predicate.FsEvent(sql.FieldIn(FieldSubscriber, vs...))
}
// SubscriberNotIn applies the NotIn predicate on the "subscriber" field.
func SubscriberNotIn(vs ...uuid.UUID) predicate.FsEvent {
return predicate.FsEvent(sql.FieldNotIn(FieldSubscriber, vs...))
}
// SubscriberGT applies the GT predicate on the "subscriber" field.
func SubscriberGT(v uuid.UUID) predicate.FsEvent {
return predicate.FsEvent(sql.FieldGT(FieldSubscriber, v))
}
// SubscriberGTE applies the GTE predicate on the "subscriber" field.
func SubscriberGTE(v uuid.UUID) predicate.FsEvent {
return predicate.FsEvent(sql.FieldGTE(FieldSubscriber, v))
}
// SubscriberLT applies the LT predicate on the "subscriber" field.
func SubscriberLT(v uuid.UUID) predicate.FsEvent {
return predicate.FsEvent(sql.FieldLT(FieldSubscriber, v))
}
// SubscriberLTE applies the LTE predicate on the "subscriber" field.
func SubscriberLTE(v uuid.UUID) predicate.FsEvent {
return predicate.FsEvent(sql.FieldLTE(FieldSubscriber, v))
}
// UserFseventEQ applies the EQ predicate on the "user_fsevent" field.
func UserFseventEQ(v int) predicate.FsEvent {
return predicate.FsEvent(sql.FieldEQ(FieldUserFsevent, v))
}
// UserFseventNEQ applies the NEQ predicate on the "user_fsevent" field.
func UserFseventNEQ(v int) predicate.FsEvent {
return predicate.FsEvent(sql.FieldNEQ(FieldUserFsevent, v))
}
// UserFseventIn applies the In predicate on the "user_fsevent" field.
func UserFseventIn(vs ...int) predicate.FsEvent {
return predicate.FsEvent(sql.FieldIn(FieldUserFsevent, vs...))
}
// UserFseventNotIn applies the NotIn predicate on the "user_fsevent" field.
func UserFseventNotIn(vs ...int) predicate.FsEvent {
return predicate.FsEvent(sql.FieldNotIn(FieldUserFsevent, vs...))
}
// UserFseventIsNil applies the IsNil predicate on the "user_fsevent" field.
func UserFseventIsNil() predicate.FsEvent {
return predicate.FsEvent(sql.FieldIsNull(FieldUserFsevent))
}
// UserFseventNotNil applies the NotNil predicate on the "user_fsevent" field.
func UserFseventNotNil() predicate.FsEvent {
return predicate.FsEvent(sql.FieldNotNull(FieldUserFsevent))
}
// HasUser applies the HasEdge predicate on the "user" edge.
func HasUser() predicate.FsEvent {
return predicate.FsEvent(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn),
)
sqlgraph.HasNeighbors(s, step)
})
}
// HasUserWith applies the HasEdge predicate on the "user" edge with a given conditions (other predicates).
func HasUserWith(preds ...predicate.User) predicate.FsEvent {
return predicate.FsEvent(func(s *sql.Selector) {
step := newUserStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
}
})
})
}
// And groups predicates with the AND operator between them.
func And(predicates ...predicate.FsEvent) predicate.FsEvent {
return predicate.FsEvent(sql.AndPredicates(predicates...))
}
// Or groups predicates with the OR operator between them.
func Or(predicates ...predicate.FsEvent) predicate.FsEvent {
return predicate.FsEvent(sql.OrPredicates(predicates...))
}
// Not applies the not operator on the given predicate.
func Not(p predicate.FsEvent) predicate.FsEvent {
return predicate.FsEvent(sql.NotPredicates(p))
}

827
ent/fsevent_create.go Normal file
View File

@ -0,0 +1,827 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"context"
"errors"
"fmt"
"time"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/cloudreve/Cloudreve/v4/ent/fsevent"
"github.com/cloudreve/Cloudreve/v4/ent/user"
"github.com/gofrs/uuid"
)
// FsEventCreate is the builder for creating a FsEvent entity.
type FsEventCreate struct {
config
mutation *FsEventMutation
hooks []Hook
conflict []sql.ConflictOption
}
// SetCreatedAt sets the "created_at" field.
func (fec *FsEventCreate) SetCreatedAt(t time.Time) *FsEventCreate {
fec.mutation.SetCreatedAt(t)
return fec
}
// SetNillableCreatedAt sets the "created_at" field if the given value is not nil.
func (fec *FsEventCreate) SetNillableCreatedAt(t *time.Time) *FsEventCreate {
if t != nil {
fec.SetCreatedAt(*t)
}
return fec
}
// SetUpdatedAt sets the "updated_at" field.
func (fec *FsEventCreate) SetUpdatedAt(t time.Time) *FsEventCreate {
fec.mutation.SetUpdatedAt(t)
return fec
}
// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil.
func (fec *FsEventCreate) SetNillableUpdatedAt(t *time.Time) *FsEventCreate {
if t != nil {
fec.SetUpdatedAt(*t)
}
return fec
}
// SetDeletedAt sets the "deleted_at" field.
func (fec *FsEventCreate) SetDeletedAt(t time.Time) *FsEventCreate {
fec.mutation.SetDeletedAt(t)
return fec
}
// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil.
func (fec *FsEventCreate) SetNillableDeletedAt(t *time.Time) *FsEventCreate {
if t != nil {
fec.SetDeletedAt(*t)
}
return fec
}
// SetEvent sets the "event" field.
func (fec *FsEventCreate) SetEvent(s string) *FsEventCreate {
fec.mutation.SetEvent(s)
return fec
}
// SetSubscriber sets the "subscriber" field.
func (fec *FsEventCreate) SetSubscriber(u uuid.UUID) *FsEventCreate {
fec.mutation.SetSubscriber(u)
return fec
}
// SetUserFsevent sets the "user_fsevent" field.
func (fec *FsEventCreate) SetUserFsevent(i int) *FsEventCreate {
fec.mutation.SetUserFsevent(i)
return fec
}
// SetNillableUserFsevent sets the "user_fsevent" field if the given value is not nil.
func (fec *FsEventCreate) SetNillableUserFsevent(i *int) *FsEventCreate {
if i != nil {
fec.SetUserFsevent(*i)
}
return fec
}
// SetUserID sets the "user" edge to the User entity by ID.
func (fec *FsEventCreate) SetUserID(id int) *FsEventCreate {
fec.mutation.SetUserID(id)
return fec
}
// SetNillableUserID sets the "user" edge to the User entity by ID if the given value is not nil.
func (fec *FsEventCreate) SetNillableUserID(id *int) *FsEventCreate {
if id != nil {
fec = fec.SetUserID(*id)
}
return fec
}
// SetUser sets the "user" edge to the User entity.
func (fec *FsEventCreate) SetUser(u *User) *FsEventCreate {
return fec.SetUserID(u.ID)
}
// Mutation returns the FsEventMutation object of the builder.
func (fec *FsEventCreate) Mutation() *FsEventMutation {
return fec.mutation
}
// Save creates the FsEvent in the database.
func (fec *FsEventCreate) Save(ctx context.Context) (*FsEvent, error) {
if err := fec.defaults(); err != nil {
return nil, err
}
return withHooks(ctx, fec.sqlSave, fec.mutation, fec.hooks)
}
// SaveX calls Save and panics if Save returns an error.
func (fec *FsEventCreate) SaveX(ctx context.Context) *FsEvent {
v, err := fec.Save(ctx)
if err != nil {
panic(err)
}
return v
}
// Exec executes the query.
func (fec *FsEventCreate) Exec(ctx context.Context) error {
_, err := fec.Save(ctx)
return err
}
// ExecX is like Exec, but panics if an error occurs.
func (fec *FsEventCreate) ExecX(ctx context.Context) {
if err := fec.Exec(ctx); err != nil {
panic(err)
}
}
// defaults sets the default values of the builder before save.
func (fec *FsEventCreate) defaults() error {
if _, ok := fec.mutation.CreatedAt(); !ok {
if fsevent.DefaultCreatedAt == nil {
return fmt.Errorf("ent: uninitialized fsevent.DefaultCreatedAt (forgotten import ent/runtime?)")
}
v := fsevent.DefaultCreatedAt()
fec.mutation.SetCreatedAt(v)
}
if _, ok := fec.mutation.UpdatedAt(); !ok {
if fsevent.DefaultUpdatedAt == nil {
return fmt.Errorf("ent: uninitialized fsevent.DefaultUpdatedAt (forgotten import ent/runtime?)")
}
v := fsevent.DefaultUpdatedAt()
fec.mutation.SetUpdatedAt(v)
}
return nil
}
// check runs all checks and user-defined validators on the builder.
func (fec *FsEventCreate) check() error {
if _, ok := fec.mutation.CreatedAt(); !ok {
return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "FsEvent.created_at"`)}
}
if _, ok := fec.mutation.UpdatedAt(); !ok {
return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "FsEvent.updated_at"`)}
}
if _, ok := fec.mutation.Event(); !ok {
return &ValidationError{Name: "event", err: errors.New(`ent: missing required field "FsEvent.event"`)}
}
if _, ok := fec.mutation.Subscriber(); !ok {
return &ValidationError{Name: "subscriber", err: errors.New(`ent: missing required field "FsEvent.subscriber"`)}
}
return nil
}
func (fec *FsEventCreate) sqlSave(ctx context.Context) (*FsEvent, error) {
if err := fec.check(); err != nil {
return nil, err
}
_node, _spec := fec.createSpec()
if err := sqlgraph.CreateNode(ctx, fec.driver, _spec); err != nil {
if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
return nil, err
}
id := _spec.ID.Value.(int64)
_node.ID = int(id)
fec.mutation.id = &_node.ID
fec.mutation.done = true
return _node, nil
}
func (fec *FsEventCreate) createSpec() (*FsEvent, *sqlgraph.CreateSpec) {
var (
_node = &FsEvent{config: fec.config}
_spec = sqlgraph.NewCreateSpec(fsevent.Table, sqlgraph.NewFieldSpec(fsevent.FieldID, field.TypeInt))
)
if id, ok := fec.mutation.ID(); ok {
_node.ID = id
id64 := int64(id)
_spec.ID.Value = id64
}
_spec.OnConflict = fec.conflict
if value, ok := fec.mutation.CreatedAt(); ok {
_spec.SetField(fsevent.FieldCreatedAt, field.TypeTime, value)
_node.CreatedAt = value
}
if value, ok := fec.mutation.UpdatedAt(); ok {
_spec.SetField(fsevent.FieldUpdatedAt, field.TypeTime, value)
_node.UpdatedAt = value
}
if value, ok := fec.mutation.DeletedAt(); ok {
_spec.SetField(fsevent.FieldDeletedAt, field.TypeTime, value)
_node.DeletedAt = &value
}
if value, ok := fec.mutation.Event(); ok {
_spec.SetField(fsevent.FieldEvent, field.TypeString, value)
_node.Event = value
}
if value, ok := fec.mutation.Subscriber(); ok {
_spec.SetField(fsevent.FieldSubscriber, field.TypeUUID, value)
_node.Subscriber = value
}
if nodes := fec.mutation.UserIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
Inverse: true,
Table: fsevent.UserTable,
Columns: []string{fsevent.UserColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_node.UserFsevent = nodes[0]
_spec.Edges = append(_spec.Edges, edge)
}
return _node, _spec
}
// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause
// of the `INSERT` statement. For example:
//
// client.FsEvent.Create().
// SetCreatedAt(v).
// OnConflict(
// // Update the row with the new values
// // the was proposed for insertion.
// sql.ResolveWithNewValues(),
// ).
// // Override some of the fields with custom
// // update values.
// Update(func(u *ent.FsEventUpsert) {
// SetCreatedAt(v+v).
// }).
// Exec(ctx)
func (fec *FsEventCreate) OnConflict(opts ...sql.ConflictOption) *FsEventUpsertOne {
fec.conflict = opts
return &FsEventUpsertOne{
create: fec,
}
}
// OnConflictColumns calls `OnConflict` and configures the columns
// as conflict target. Using this option is equivalent to using:
//
// client.FsEvent.Create().
// OnConflict(sql.ConflictColumns(columns...)).
// Exec(ctx)
func (fec *FsEventCreate) OnConflictColumns(columns ...string) *FsEventUpsertOne {
fec.conflict = append(fec.conflict, sql.ConflictColumns(columns...))
return &FsEventUpsertOne{
create: fec,
}
}
type (
// FsEventUpsertOne is the builder for "upsert"-ing
// one FsEvent node.
FsEventUpsertOne struct {
create *FsEventCreate
}
// FsEventUpsert is the "OnConflict" setter.
FsEventUpsert struct {
*sql.UpdateSet
}
)
// SetUpdatedAt sets the "updated_at" field.
func (u *FsEventUpsert) SetUpdatedAt(v time.Time) *FsEventUpsert {
u.Set(fsevent.FieldUpdatedAt, v)
return u
}
// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create.
func (u *FsEventUpsert) UpdateUpdatedAt() *FsEventUpsert {
u.SetExcluded(fsevent.FieldUpdatedAt)
return u
}
// SetDeletedAt sets the "deleted_at" field.
func (u *FsEventUpsert) SetDeletedAt(v time.Time) *FsEventUpsert {
u.Set(fsevent.FieldDeletedAt, v)
return u
}
// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create.
func (u *FsEventUpsert) UpdateDeletedAt() *FsEventUpsert {
u.SetExcluded(fsevent.FieldDeletedAt)
return u
}
// ClearDeletedAt clears the value of the "deleted_at" field.
func (u *FsEventUpsert) ClearDeletedAt() *FsEventUpsert {
u.SetNull(fsevent.FieldDeletedAt)
return u
}
// SetEvent sets the "event" field.
func (u *FsEventUpsert) SetEvent(v string) *FsEventUpsert {
u.Set(fsevent.FieldEvent, v)
return u
}
// UpdateEvent sets the "event" field to the value that was provided on create.
func (u *FsEventUpsert) UpdateEvent() *FsEventUpsert {
u.SetExcluded(fsevent.FieldEvent)
return u
}
// SetSubscriber sets the "subscriber" field.
func (u *FsEventUpsert) SetSubscriber(v uuid.UUID) *FsEventUpsert {
u.Set(fsevent.FieldSubscriber, v)
return u
}
// UpdateSubscriber sets the "subscriber" field to the value that was provided on create.
func (u *FsEventUpsert) UpdateSubscriber() *FsEventUpsert {
u.SetExcluded(fsevent.FieldSubscriber)
return u
}
// SetUserFsevent sets the "user_fsevent" field.
func (u *FsEventUpsert) SetUserFsevent(v int) *FsEventUpsert {
u.Set(fsevent.FieldUserFsevent, v)
return u
}
// UpdateUserFsevent sets the "user_fsevent" field to the value that was provided on create.
func (u *FsEventUpsert) UpdateUserFsevent() *FsEventUpsert {
u.SetExcluded(fsevent.FieldUserFsevent)
return u
}
// ClearUserFsevent clears the value of the "user_fsevent" field.
func (u *FsEventUpsert) ClearUserFsevent() *FsEventUpsert {
u.SetNull(fsevent.FieldUserFsevent)
return u
}
// UpdateNewValues updates the mutable fields using the new values that were set on create.
// Using this option is equivalent to using:
//
// client.FsEvent.Create().
// OnConflict(
// sql.ResolveWithNewValues(),
// ).
// Exec(ctx)
func (u *FsEventUpsertOne) UpdateNewValues() *FsEventUpsertOne {
u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues())
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) {
if _, exists := u.create.mutation.CreatedAt(); exists {
s.SetIgnore(fsevent.FieldCreatedAt)
}
}))
return u
}
// Ignore sets each column to itself in case of conflict.
// Using this option is equivalent to using:
//
// client.FsEvent.Create().
// OnConflict(sql.ResolveWithIgnore()).
// Exec(ctx)
func (u *FsEventUpsertOne) Ignore() *FsEventUpsertOne {
u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore())
return u
}
// DoNothing configures the conflict_action to `DO NOTHING`.
// Supported only by SQLite and PostgreSQL.
func (u *FsEventUpsertOne) DoNothing() *FsEventUpsertOne {
u.create.conflict = append(u.create.conflict, sql.DoNothing())
return u
}
// Update allows overriding fields `UPDATE` values. See the FsEventCreate.OnConflict
// documentation for more info.
func (u *FsEventUpsertOne) Update(set func(*FsEventUpsert)) *FsEventUpsertOne {
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) {
set(&FsEventUpsert{UpdateSet: update})
}))
return u
}
// SetUpdatedAt sets the "updated_at" field.
func (u *FsEventUpsertOne) SetUpdatedAt(v time.Time) *FsEventUpsertOne {
return u.Update(func(s *FsEventUpsert) {
s.SetUpdatedAt(v)
})
}
// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create.
func (u *FsEventUpsertOne) UpdateUpdatedAt() *FsEventUpsertOne {
return u.Update(func(s *FsEventUpsert) {
s.UpdateUpdatedAt()
})
}
// SetDeletedAt sets the "deleted_at" field.
func (u *FsEventUpsertOne) SetDeletedAt(v time.Time) *FsEventUpsertOne {
return u.Update(func(s *FsEventUpsert) {
s.SetDeletedAt(v)
})
}
// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create.
func (u *FsEventUpsertOne) UpdateDeletedAt() *FsEventUpsertOne {
return u.Update(func(s *FsEventUpsert) {
s.UpdateDeletedAt()
})
}
// ClearDeletedAt clears the value of the "deleted_at" field.
func (u *FsEventUpsertOne) ClearDeletedAt() *FsEventUpsertOne {
return u.Update(func(s *FsEventUpsert) {
s.ClearDeletedAt()
})
}
// SetEvent sets the "event" field.
func (u *FsEventUpsertOne) SetEvent(v string) *FsEventUpsertOne {
return u.Update(func(s *FsEventUpsert) {
s.SetEvent(v)
})
}
// UpdateEvent sets the "event" field to the value that was provided on create.
func (u *FsEventUpsertOne) UpdateEvent() *FsEventUpsertOne {
return u.Update(func(s *FsEventUpsert) {
s.UpdateEvent()
})
}
// SetSubscriber sets the "subscriber" field.
func (u *FsEventUpsertOne) SetSubscriber(v uuid.UUID) *FsEventUpsertOne {
return u.Update(func(s *FsEventUpsert) {
s.SetSubscriber(v)
})
}
// UpdateSubscriber sets the "subscriber" field to the value that was provided on create.
func (u *FsEventUpsertOne) UpdateSubscriber() *FsEventUpsertOne {
return u.Update(func(s *FsEventUpsert) {
s.UpdateSubscriber()
})
}
// SetUserFsevent sets the "user_fsevent" field.
func (u *FsEventUpsertOne) SetUserFsevent(v int) *FsEventUpsertOne {
return u.Update(func(s *FsEventUpsert) {
s.SetUserFsevent(v)
})
}
// UpdateUserFsevent sets the "user_fsevent" field to the value that was provided on create.
func (u *FsEventUpsertOne) UpdateUserFsevent() *FsEventUpsertOne {
return u.Update(func(s *FsEventUpsert) {
s.UpdateUserFsevent()
})
}
// ClearUserFsevent clears the value of the "user_fsevent" field.
func (u *FsEventUpsertOne) ClearUserFsevent() *FsEventUpsertOne {
return u.Update(func(s *FsEventUpsert) {
s.ClearUserFsevent()
})
}
// Exec executes the query.
func (u *FsEventUpsertOne) Exec(ctx context.Context) error {
if len(u.create.conflict) == 0 {
return errors.New("ent: missing options for FsEventCreate.OnConflict")
}
return u.create.Exec(ctx)
}
// ExecX is like Exec, but panics if an error occurs.
func (u *FsEventUpsertOne) ExecX(ctx context.Context) {
if err := u.create.Exec(ctx); err != nil {
panic(err)
}
}
// Exec executes the UPSERT query and returns the inserted/updated ID.
func (u *FsEventUpsertOne) ID(ctx context.Context) (id int, err error) {
node, err := u.create.Save(ctx)
if err != nil {
return id, err
}
return node.ID, nil
}
// IDX is like ID, but panics if an error occurs.
func (u *FsEventUpsertOne) IDX(ctx context.Context) int {
id, err := u.ID(ctx)
if err != nil {
panic(err)
}
return id
}
func (m *FsEventCreate) SetRawID(t int) *FsEventCreate {
m.mutation.SetRawID(t)
return m
}
// FsEventCreateBulk is the builder for creating many FsEvent entities in bulk.
type FsEventCreateBulk struct {
config
err error
builders []*FsEventCreate
conflict []sql.ConflictOption
}
// Save creates the FsEvent entities in the database.
func (fecb *FsEventCreateBulk) Save(ctx context.Context) ([]*FsEvent, error) {
if fecb.err != nil {
return nil, fecb.err
}
specs := make([]*sqlgraph.CreateSpec, len(fecb.builders))
nodes := make([]*FsEvent, len(fecb.builders))
mutators := make([]Mutator, len(fecb.builders))
for i := range fecb.builders {
func(i int, root context.Context) {
builder := fecb.builders[i]
builder.defaults()
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*FsEventMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
if err := builder.check(); err != nil {
return nil, err
}
builder.mutation = mutation
var err error
nodes[i], specs[i] = builder.createSpec()
if i < len(mutators)-1 {
_, err = mutators[i+1].Mutate(root, fecb.builders[i+1].mutation)
} else {
spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
spec.OnConflict = fecb.conflict
// Invoke the actual operation on the latest mutation in the chain.
if err = sqlgraph.BatchCreate(ctx, fecb.driver, spec); err != nil {
if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
}
}
if err != nil {
return nil, err
}
mutation.id = &nodes[i].ID
if specs[i].ID.Value != nil {
id := specs[i].ID.Value.(int64)
nodes[i].ID = int(id)
}
mutation.done = true
return nodes[i], nil
})
for i := len(builder.hooks) - 1; i >= 0; i-- {
mut = builder.hooks[i](mut)
}
mutators[i] = mut
}(i, ctx)
}
if len(mutators) > 0 {
if _, err := mutators[0].Mutate(ctx, fecb.builders[0].mutation); err != nil {
return nil, err
}
}
return nodes, nil
}
// SaveX is like Save, but panics if an error occurs.
func (fecb *FsEventCreateBulk) SaveX(ctx context.Context) []*FsEvent {
v, err := fecb.Save(ctx)
if err != nil {
panic(err)
}
return v
}
// Exec executes the query.
func (fecb *FsEventCreateBulk) Exec(ctx context.Context) error {
_, err := fecb.Save(ctx)
return err
}
// ExecX is like Exec, but panics if an error occurs.
func (fecb *FsEventCreateBulk) ExecX(ctx context.Context) {
if err := fecb.Exec(ctx); err != nil {
panic(err)
}
}
// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause
// of the `INSERT` statement. For example:
//
// client.FsEvent.CreateBulk(builders...).
// OnConflict(
// // Update the row with the new values
// // the was proposed for insertion.
// sql.ResolveWithNewValues(),
// ).
// // Override some of the fields with custom
// // update values.
// Update(func(u *ent.FsEventUpsert) {
// SetCreatedAt(v+v).
// }).
// Exec(ctx)
func (fecb *FsEventCreateBulk) OnConflict(opts ...sql.ConflictOption) *FsEventUpsertBulk {
fecb.conflict = opts
return &FsEventUpsertBulk{
create: fecb,
}
}
// OnConflictColumns calls `OnConflict` and configures the columns
// as conflict target. Using this option is equivalent to using:
//
// client.FsEvent.Create().
// OnConflict(sql.ConflictColumns(columns...)).
// Exec(ctx)
func (fecb *FsEventCreateBulk) OnConflictColumns(columns ...string) *FsEventUpsertBulk {
fecb.conflict = append(fecb.conflict, sql.ConflictColumns(columns...))
return &FsEventUpsertBulk{
create: fecb,
}
}
// FsEventUpsertBulk is the builder for "upsert"-ing
// a bulk of FsEvent nodes.
type FsEventUpsertBulk struct {
create *FsEventCreateBulk
}
// UpdateNewValues updates the mutable fields using the new values that
// were set on create. Using this option is equivalent to using:
//
// client.FsEvent.Create().
// OnConflict(
// sql.ResolveWithNewValues(),
// ).
// Exec(ctx)
func (u *FsEventUpsertBulk) UpdateNewValues() *FsEventUpsertBulk {
u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues())
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) {
for _, b := range u.create.builders {
if _, exists := b.mutation.CreatedAt(); exists {
s.SetIgnore(fsevent.FieldCreatedAt)
}
}
}))
return u
}
// Ignore sets each column to itself in case of conflict.
// Using this option is equivalent to using:
//
// client.FsEvent.Create().
// OnConflict(sql.ResolveWithIgnore()).
// Exec(ctx)
func (u *FsEventUpsertBulk) Ignore() *FsEventUpsertBulk {
u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore())
return u
}
// DoNothing configures the conflict_action to `DO NOTHING`.
// Supported only by SQLite and PostgreSQL.
func (u *FsEventUpsertBulk) DoNothing() *FsEventUpsertBulk {
u.create.conflict = append(u.create.conflict, sql.DoNothing())
return u
}
// Update allows overriding fields `UPDATE` values. See the FsEventCreateBulk.OnConflict
// documentation for more info.
func (u *FsEventUpsertBulk) Update(set func(*FsEventUpsert)) *FsEventUpsertBulk {
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) {
set(&FsEventUpsert{UpdateSet: update})
}))
return u
}
// SetUpdatedAt sets the "updated_at" field.
func (u *FsEventUpsertBulk) SetUpdatedAt(v time.Time) *FsEventUpsertBulk {
return u.Update(func(s *FsEventUpsert) {
s.SetUpdatedAt(v)
})
}
// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create.
func (u *FsEventUpsertBulk) UpdateUpdatedAt() *FsEventUpsertBulk {
return u.Update(func(s *FsEventUpsert) {
s.UpdateUpdatedAt()
})
}
// SetDeletedAt sets the "deleted_at" field.
func (u *FsEventUpsertBulk) SetDeletedAt(v time.Time) *FsEventUpsertBulk {
return u.Update(func(s *FsEventUpsert) {
s.SetDeletedAt(v)
})
}
// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create.
func (u *FsEventUpsertBulk) UpdateDeletedAt() *FsEventUpsertBulk {
return u.Update(func(s *FsEventUpsert) {
s.UpdateDeletedAt()
})
}
// ClearDeletedAt clears the value of the "deleted_at" field.
func (u *FsEventUpsertBulk) ClearDeletedAt() *FsEventUpsertBulk {
return u.Update(func(s *FsEventUpsert) {
s.ClearDeletedAt()
})
}
// SetEvent sets the "event" field.
func (u *FsEventUpsertBulk) SetEvent(v string) *FsEventUpsertBulk {
return u.Update(func(s *FsEventUpsert) {
s.SetEvent(v)
})
}
// UpdateEvent sets the "event" field to the value that was provided on create.
func (u *FsEventUpsertBulk) UpdateEvent() *FsEventUpsertBulk {
return u.Update(func(s *FsEventUpsert) {
s.UpdateEvent()
})
}
// SetSubscriber sets the "subscriber" field.
func (u *FsEventUpsertBulk) SetSubscriber(v uuid.UUID) *FsEventUpsertBulk {
return u.Update(func(s *FsEventUpsert) {
s.SetSubscriber(v)
})
}
// UpdateSubscriber sets the "subscriber" field to the value that was provided on create.
func (u *FsEventUpsertBulk) UpdateSubscriber() *FsEventUpsertBulk {
return u.Update(func(s *FsEventUpsert) {
s.UpdateSubscriber()
})
}
// SetUserFsevent sets the "user_fsevent" field.
func (u *FsEventUpsertBulk) SetUserFsevent(v int) *FsEventUpsertBulk {
return u.Update(func(s *FsEventUpsert) {
s.SetUserFsevent(v)
})
}
// UpdateUserFsevent sets the "user_fsevent" field to the value that was provided on create.
func (u *FsEventUpsertBulk) UpdateUserFsevent() *FsEventUpsertBulk {
return u.Update(func(s *FsEventUpsert) {
s.UpdateUserFsevent()
})
}
// ClearUserFsevent clears the value of the "user_fsevent" field.
func (u *FsEventUpsertBulk) ClearUserFsevent() *FsEventUpsertBulk {
return u.Update(func(s *FsEventUpsert) {
s.ClearUserFsevent()
})
}
// Exec executes the query.
func (u *FsEventUpsertBulk) Exec(ctx context.Context) error {
if u.create.err != nil {
return u.create.err
}
for i, b := range u.create.builders {
if len(b.conflict) != 0 {
return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the FsEventCreateBulk instead", i)
}
}
if len(u.create.conflict) == 0 {
return errors.New("ent: missing options for FsEventCreateBulk.OnConflict")
}
return u.create.Exec(ctx)
}
// ExecX is like Exec, but panics if an error occurs.
func (u *FsEventUpsertBulk) ExecX(ctx context.Context) {
if err := u.create.Exec(ctx); err != nil {
panic(err)
}
}

88
ent/fsevent_delete.go Normal file
View File

@ -0,0 +1,88 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"context"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/cloudreve/Cloudreve/v4/ent/fsevent"
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
)
// FsEventDelete is the builder for deleting a FsEvent entity.
type FsEventDelete struct {
config
hooks []Hook
mutation *FsEventMutation
}
// Where appends a list predicates to the FsEventDelete builder.
func (fed *FsEventDelete) Where(ps ...predicate.FsEvent) *FsEventDelete {
fed.mutation.Where(ps...)
return fed
}
// Exec executes the deletion query and returns how many vertices were deleted.
func (fed *FsEventDelete) Exec(ctx context.Context) (int, error) {
return withHooks(ctx, fed.sqlExec, fed.mutation, fed.hooks)
}
// ExecX is like Exec, but panics if an error occurs.
func (fed *FsEventDelete) ExecX(ctx context.Context) int {
n, err := fed.Exec(ctx)
if err != nil {
panic(err)
}
return n
}
func (fed *FsEventDelete) sqlExec(ctx context.Context) (int, error) {
_spec := sqlgraph.NewDeleteSpec(fsevent.Table, sqlgraph.NewFieldSpec(fsevent.FieldID, field.TypeInt))
if ps := fed.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
affected, err := sqlgraph.DeleteNodes(ctx, fed.driver, _spec)
if err != nil && sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
fed.mutation.done = true
return affected, err
}
// FsEventDeleteOne is the builder for deleting a single FsEvent entity.
type FsEventDeleteOne struct {
fed *FsEventDelete
}
// Where appends a list predicates to the FsEventDelete builder.
func (fedo *FsEventDeleteOne) Where(ps ...predicate.FsEvent) *FsEventDeleteOne {
fedo.fed.mutation.Where(ps...)
return fedo
}
// Exec executes the deletion query.
func (fedo *FsEventDeleteOne) Exec(ctx context.Context) error {
n, err := fedo.fed.Exec(ctx)
switch {
case err != nil:
return err
case n == 0:
return &NotFoundError{fsevent.Label}
default:
return nil
}
}
// ExecX is like Exec, but panics if an error occurs.
func (fedo *FsEventDeleteOne) ExecX(ctx context.Context) {
if err := fedo.Exec(ctx); err != nil {
panic(err)
}
}

605
ent/fsevent_query.go Normal file
View File

@ -0,0 +1,605 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"context"
"fmt"
"math"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/cloudreve/Cloudreve/v4/ent/fsevent"
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
"github.com/cloudreve/Cloudreve/v4/ent/user"
)
// FsEventQuery is the builder for querying FsEvent entities.
type FsEventQuery struct {
config
ctx *QueryContext
order []fsevent.OrderOption
inters []Interceptor
predicates []predicate.FsEvent
withUser *UserQuery
// intermediate query (i.e. traversal path).
sql *sql.Selector
path func(context.Context) (*sql.Selector, error)
}
// Where adds a new predicate for the FsEventQuery builder.
func (feq *FsEventQuery) Where(ps ...predicate.FsEvent) *FsEventQuery {
feq.predicates = append(feq.predicates, ps...)
return feq
}
// Limit the number of records to be returned by this query.
func (feq *FsEventQuery) Limit(limit int) *FsEventQuery {
feq.ctx.Limit = &limit
return feq
}
// Offset to start from.
func (feq *FsEventQuery) Offset(offset int) *FsEventQuery {
feq.ctx.Offset = &offset
return feq
}
// Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method.
func (feq *FsEventQuery) Unique(unique bool) *FsEventQuery {
feq.ctx.Unique = &unique
return feq
}
// Order specifies how the records should be ordered.
func (feq *FsEventQuery) Order(o ...fsevent.OrderOption) *FsEventQuery {
feq.order = append(feq.order, o...)
return feq
}
// QueryUser chains the current query on the "user" edge.
func (feq *FsEventQuery) QueryUser() *UserQuery {
query := (&UserClient{config: feq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := feq.prepareQuery(ctx); err != nil {
return nil, err
}
selector := feq.sqlQuery(ctx)
if err := selector.Err(); err != nil {
return nil, err
}
step := sqlgraph.NewStep(
sqlgraph.From(fsevent.Table, fsevent.FieldID, selector),
sqlgraph.To(user.Table, user.FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, fsevent.UserTable, fsevent.UserColumn),
)
fromU = sqlgraph.SetNeighbors(feq.driver.Dialect(), step)
return fromU, nil
}
return query
}
// First returns the first FsEvent entity from the query.
// Returns a *NotFoundError when no FsEvent was found.
func (feq *FsEventQuery) First(ctx context.Context) (*FsEvent, error) {
nodes, err := feq.Limit(1).All(setContextOp(ctx, feq.ctx, "First"))
if err != nil {
return nil, err
}
if len(nodes) == 0 {
return nil, &NotFoundError{fsevent.Label}
}
return nodes[0], nil
}
// FirstX is like First, but panics if an error occurs.
func (feq *FsEventQuery) FirstX(ctx context.Context) *FsEvent {
node, err := feq.First(ctx)
if err != nil && !IsNotFound(err) {
panic(err)
}
return node
}
// FirstID returns the first FsEvent ID from the query.
// Returns a *NotFoundError when no FsEvent ID was found.
func (feq *FsEventQuery) FirstID(ctx context.Context) (id int, err error) {
var ids []int
if ids, err = feq.Limit(1).IDs(setContextOp(ctx, feq.ctx, "FirstID")); err != nil {
return
}
if len(ids) == 0 {
err = &NotFoundError{fsevent.Label}
return
}
return ids[0], nil
}
// FirstIDX is like FirstID, but panics if an error occurs.
func (feq *FsEventQuery) FirstIDX(ctx context.Context) int {
id, err := feq.FirstID(ctx)
if err != nil && !IsNotFound(err) {
panic(err)
}
return id
}
// Only returns a single FsEvent entity found by the query, ensuring it only returns one.
// Returns a *NotSingularError when more than one FsEvent entity is found.
// Returns a *NotFoundError when no FsEvent entities are found.
func (feq *FsEventQuery) Only(ctx context.Context) (*FsEvent, error) {
nodes, err := feq.Limit(2).All(setContextOp(ctx, feq.ctx, "Only"))
if err != nil {
return nil, err
}
switch len(nodes) {
case 1:
return nodes[0], nil
case 0:
return nil, &NotFoundError{fsevent.Label}
default:
return nil, &NotSingularError{fsevent.Label}
}
}
// OnlyX is like Only, but panics if an error occurs.
func (feq *FsEventQuery) OnlyX(ctx context.Context) *FsEvent {
node, err := feq.Only(ctx)
if err != nil {
panic(err)
}
return node
}
// OnlyID is like Only, but returns the only FsEvent ID in the query.
// Returns a *NotSingularError when more than one FsEvent ID is found.
// Returns a *NotFoundError when no entities are found.
func (feq *FsEventQuery) OnlyID(ctx context.Context) (id int, err error) {
var ids []int
if ids, err = feq.Limit(2).IDs(setContextOp(ctx, feq.ctx, "OnlyID")); err != nil {
return
}
switch len(ids) {
case 1:
id = ids[0]
case 0:
err = &NotFoundError{fsevent.Label}
default:
err = &NotSingularError{fsevent.Label}
}
return
}
// OnlyIDX is like OnlyID, but panics if an error occurs.
func (feq *FsEventQuery) OnlyIDX(ctx context.Context) int {
id, err := feq.OnlyID(ctx)
if err != nil {
panic(err)
}
return id
}
// All executes the query and returns a list of FsEvents.
func (feq *FsEventQuery) All(ctx context.Context) ([]*FsEvent, error) {
ctx = setContextOp(ctx, feq.ctx, "All")
if err := feq.prepareQuery(ctx); err != nil {
return nil, err
}
qr := querierAll[[]*FsEvent, *FsEventQuery]()
return withInterceptors[[]*FsEvent](ctx, feq, qr, feq.inters)
}
// AllX is like All, but panics if an error occurs.
func (feq *FsEventQuery) AllX(ctx context.Context) []*FsEvent {
nodes, err := feq.All(ctx)
if err != nil {
panic(err)
}
return nodes
}
// IDs executes the query and returns a list of FsEvent IDs.
func (feq *FsEventQuery) IDs(ctx context.Context) (ids []int, err error) {
if feq.ctx.Unique == nil && feq.path != nil {
feq.Unique(true)
}
ctx = setContextOp(ctx, feq.ctx, "IDs")
if err = feq.Select(fsevent.FieldID).Scan(ctx, &ids); err != nil {
return nil, err
}
return ids, nil
}
// IDsX is like IDs, but panics if an error occurs.
func (feq *FsEventQuery) IDsX(ctx context.Context) []int {
ids, err := feq.IDs(ctx)
if err != nil {
panic(err)
}
return ids
}
// Count returns the count of the given query.
func (feq *FsEventQuery) Count(ctx context.Context) (int, error) {
ctx = setContextOp(ctx, feq.ctx, "Count")
if err := feq.prepareQuery(ctx); err != nil {
return 0, err
}
return withInterceptors[int](ctx, feq, querierCount[*FsEventQuery](), feq.inters)
}
// CountX is like Count, but panics if an error occurs.
func (feq *FsEventQuery) CountX(ctx context.Context) int {
count, err := feq.Count(ctx)
if err != nil {
panic(err)
}
return count
}
// Exist returns true if the query has elements in the graph.
func (feq *FsEventQuery) Exist(ctx context.Context) (bool, error) {
ctx = setContextOp(ctx, feq.ctx, "Exist")
switch _, err := feq.FirstID(ctx); {
case IsNotFound(err):
return false, nil
case err != nil:
return false, fmt.Errorf("ent: check existence: %w", err)
default:
return true, nil
}
}
// ExistX is like Exist, but panics if an error occurs.
func (feq *FsEventQuery) ExistX(ctx context.Context) bool {
exist, err := feq.Exist(ctx)
if err != nil {
panic(err)
}
return exist
}
// Clone returns a duplicate of the FsEventQuery builder, including all associated steps. It can be
// used to prepare common query builders and use them differently after the clone is made.
func (feq *FsEventQuery) Clone() *FsEventQuery {
if feq == nil {
return nil
}
return &FsEventQuery{
config: feq.config,
ctx: feq.ctx.Clone(),
order: append([]fsevent.OrderOption{}, feq.order...),
inters: append([]Interceptor{}, feq.inters...),
predicates: append([]predicate.FsEvent{}, feq.predicates...),
withUser: feq.withUser.Clone(),
// clone intermediate query.
sql: feq.sql.Clone(),
path: feq.path,
}
}
// WithUser tells the query-builder to eager-load the nodes that are connected to
// the "user" edge. The optional arguments are used to configure the query builder of the edge.
func (feq *FsEventQuery) WithUser(opts ...func(*UserQuery)) *FsEventQuery {
query := (&UserClient{config: feq.config}).Query()
for _, opt := range opts {
opt(query)
}
feq.withUser = query
return feq
}
// GroupBy is used to group vertices by one or more fields/columns.
// It is often used with aggregate functions, like: count, max, mean, min, sum.
//
// Example:
//
// var v []struct {
// CreatedAt time.Time `json:"created_at,omitempty"`
// Count int `json:"count,omitempty"`
// }
//
// client.FsEvent.Query().
// GroupBy(fsevent.FieldCreatedAt).
// Aggregate(ent.Count()).
// Scan(ctx, &v)
func (feq *FsEventQuery) GroupBy(field string, fields ...string) *FsEventGroupBy {
feq.ctx.Fields = append([]string{field}, fields...)
grbuild := &FsEventGroupBy{build: feq}
grbuild.flds = &feq.ctx.Fields
grbuild.label = fsevent.Label
grbuild.scan = grbuild.Scan
return grbuild
}
// Select allows the selection one or more fields/columns for the given query,
// instead of selecting all fields in the entity.
//
// Example:
//
// var v []struct {
// CreatedAt time.Time `json:"created_at,omitempty"`
// }
//
// client.FsEvent.Query().
// Select(fsevent.FieldCreatedAt).
// Scan(ctx, &v)
func (feq *FsEventQuery) Select(fields ...string) *FsEventSelect {
feq.ctx.Fields = append(feq.ctx.Fields, fields...)
sbuild := &FsEventSelect{FsEventQuery: feq}
sbuild.label = fsevent.Label
sbuild.flds, sbuild.scan = &feq.ctx.Fields, sbuild.Scan
return sbuild
}
// Aggregate returns a FsEventSelect configured with the given aggregations.
func (feq *FsEventQuery) Aggregate(fns ...AggregateFunc) *FsEventSelect {
return feq.Select().Aggregate(fns...)
}
func (feq *FsEventQuery) prepareQuery(ctx context.Context) error {
for _, inter := range feq.inters {
if inter == nil {
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
}
if trv, ok := inter.(Traverser); ok {
if err := trv.Traverse(ctx, feq); err != nil {
return err
}
}
}
for _, f := range feq.ctx.Fields {
if !fsevent.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
}
}
if feq.path != nil {
prev, err := feq.path(ctx)
if err != nil {
return err
}
feq.sql = prev
}
return nil
}
func (feq *FsEventQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*FsEvent, error) {
var (
nodes = []*FsEvent{}
_spec = feq.querySpec()
loadedTypes = [1]bool{
feq.withUser != nil,
}
)
_spec.ScanValues = func(columns []string) ([]any, error) {
return (*FsEvent).scanValues(nil, columns)
}
_spec.Assign = func(columns []string, values []any) error {
node := &FsEvent{config: feq.config}
nodes = append(nodes, node)
node.Edges.loadedTypes = loadedTypes
return node.assignValues(columns, values)
}
for i := range hooks {
hooks[i](ctx, _spec)
}
if err := sqlgraph.QueryNodes(ctx, feq.driver, _spec); err != nil {
return nil, err
}
if len(nodes) == 0 {
return nodes, nil
}
if query := feq.withUser; query != nil {
if err := feq.loadUser(ctx, query, nodes, nil,
func(n *FsEvent, e *User) { n.Edges.User = e }); err != nil {
return nil, err
}
}
return nodes, nil
}
func (feq *FsEventQuery) loadUser(ctx context.Context, query *UserQuery, nodes []*FsEvent, init func(*FsEvent), assign func(*FsEvent, *User)) error {
ids := make([]int, 0, len(nodes))
nodeids := make(map[int][]*FsEvent)
for i := range nodes {
fk := nodes[i].UserFsevent
if _, ok := nodeids[fk]; !ok {
ids = append(ids, fk)
}
nodeids[fk] = append(nodeids[fk], nodes[i])
}
if len(ids) == 0 {
return nil
}
query.Where(user.IDIn(ids...))
neighbors, err := query.All(ctx)
if err != nil {
return err
}
for _, n := range neighbors {
nodes, ok := nodeids[n.ID]
if !ok {
return fmt.Errorf(`unexpected foreign-key "user_fsevent" returned %v`, n.ID)
}
for i := range nodes {
assign(nodes[i], n)
}
}
return nil
}
func (feq *FsEventQuery) sqlCount(ctx context.Context) (int, error) {
_spec := feq.querySpec()
_spec.Node.Columns = feq.ctx.Fields
if len(feq.ctx.Fields) > 0 {
_spec.Unique = feq.ctx.Unique != nil && *feq.ctx.Unique
}
return sqlgraph.CountNodes(ctx, feq.driver, _spec)
}
func (feq *FsEventQuery) querySpec() *sqlgraph.QuerySpec {
_spec := sqlgraph.NewQuerySpec(fsevent.Table, fsevent.Columns, sqlgraph.NewFieldSpec(fsevent.FieldID, field.TypeInt))
_spec.From = feq.sql
if unique := feq.ctx.Unique; unique != nil {
_spec.Unique = *unique
} else if feq.path != nil {
_spec.Unique = true
}
if fields := feq.ctx.Fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, fsevent.FieldID)
for i := range fields {
if fields[i] != fsevent.FieldID {
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
}
}
if feq.withUser != nil {
_spec.Node.AddColumnOnce(fsevent.FieldUserFsevent)
}
}
if ps := feq.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
if limit := feq.ctx.Limit; limit != nil {
_spec.Limit = *limit
}
if offset := feq.ctx.Offset; offset != nil {
_spec.Offset = *offset
}
if ps := feq.order; len(ps) > 0 {
_spec.Order = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
return _spec
}
func (feq *FsEventQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(feq.driver.Dialect())
t1 := builder.Table(fsevent.Table)
columns := feq.ctx.Fields
if len(columns) == 0 {
columns = fsevent.Columns
}
selector := builder.Select(t1.Columns(columns...)...).From(t1)
if feq.sql != nil {
selector = feq.sql
selector.Select(selector.Columns(columns...)...)
}
if feq.ctx.Unique != nil && *feq.ctx.Unique {
selector.Distinct()
}
for _, p := range feq.predicates {
p(selector)
}
for _, p := range feq.order {
p(selector)
}
if offset := feq.ctx.Offset; offset != nil {
// limit is mandatory for offset clause. We start
// with default value, and override it below if needed.
selector.Offset(*offset).Limit(math.MaxInt32)
}
if limit := feq.ctx.Limit; limit != nil {
selector.Limit(*limit)
}
return selector
}
// FsEventGroupBy is the group-by builder for FsEvent entities.
type FsEventGroupBy struct {
selector
build *FsEventQuery
}
// Aggregate adds the given aggregation functions to the group-by query.
func (fegb *FsEventGroupBy) Aggregate(fns ...AggregateFunc) *FsEventGroupBy {
fegb.fns = append(fegb.fns, fns...)
return fegb
}
// Scan applies the selector query and scans the result into the given value.
func (fegb *FsEventGroupBy) Scan(ctx context.Context, v any) error {
ctx = setContextOp(ctx, fegb.build.ctx, "GroupBy")
if err := fegb.build.prepareQuery(ctx); err != nil {
return err
}
return scanWithInterceptors[*FsEventQuery, *FsEventGroupBy](ctx, fegb.build, fegb, fegb.build.inters, v)
}
func (fegb *FsEventGroupBy) sqlScan(ctx context.Context, root *FsEventQuery, v any) error {
selector := root.sqlQuery(ctx).Select()
aggregation := make([]string, 0, len(fegb.fns))
for _, fn := range fegb.fns {
aggregation = append(aggregation, fn(selector))
}
if len(selector.SelectedColumns()) == 0 {
columns := make([]string, 0, len(*fegb.flds)+len(fegb.fns))
for _, f := range *fegb.flds {
columns = append(columns, selector.C(f))
}
columns = append(columns, aggregation...)
selector.Select(columns...)
}
selector.GroupBy(selector.Columns(*fegb.flds...)...)
if err := selector.Err(); err != nil {
return err
}
rows := &sql.Rows{}
query, args := selector.Query()
if err := fegb.build.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}
// FsEventSelect is the builder for selecting fields of FsEvent entities.
type FsEventSelect struct {
*FsEventQuery
selector
}
// Aggregate adds the given aggregation functions to the selector query.
func (fes *FsEventSelect) Aggregate(fns ...AggregateFunc) *FsEventSelect {
fes.fns = append(fes.fns, fns...)
return fes
}
// Scan applies the selector query and scans the result into the given value.
func (fes *FsEventSelect) Scan(ctx context.Context, v any) error {
ctx = setContextOp(ctx, fes.ctx, "Select")
if err := fes.prepareQuery(ctx); err != nil {
return err
}
return scanWithInterceptors[*FsEventQuery, *FsEventSelect](ctx, fes.FsEventQuery, fes, fes.inters, v)
}
func (fes *FsEventSelect) sqlScan(ctx context.Context, root *FsEventQuery, v any) error {
selector := root.sqlQuery(ctx)
aggregation := make([]string, 0, len(fes.fns))
for _, fn := range fes.fns {
aggregation = append(aggregation, fn(selector))
}
switch n := len(*fes.selector.flds); {
case n == 0 && len(aggregation) > 0:
selector.Select(aggregation...)
case n != 0 && len(aggregation) > 0:
selector.AppendSelect(aggregation...)
}
rows := &sql.Rows{}
query, args := selector.Query()
if err := fes.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}

494
ent/fsevent_update.go Normal file
View File

@ -0,0 +1,494 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"context"
"errors"
"fmt"
"time"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/cloudreve/Cloudreve/v4/ent/fsevent"
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
"github.com/cloudreve/Cloudreve/v4/ent/user"
"github.com/gofrs/uuid"
)
// FsEventUpdate is the builder for updating FsEvent entities.
type FsEventUpdate struct {
config
hooks []Hook
mutation *FsEventMutation
}
// Where appends a list predicates to the FsEventUpdate builder.
func (feu *FsEventUpdate) Where(ps ...predicate.FsEvent) *FsEventUpdate {
feu.mutation.Where(ps...)
return feu
}
// SetUpdatedAt sets the "updated_at" field.
func (feu *FsEventUpdate) SetUpdatedAt(t time.Time) *FsEventUpdate {
feu.mutation.SetUpdatedAt(t)
return feu
}
// SetDeletedAt sets the "deleted_at" field.
func (feu *FsEventUpdate) SetDeletedAt(t time.Time) *FsEventUpdate {
feu.mutation.SetDeletedAt(t)
return feu
}
// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil.
func (feu *FsEventUpdate) SetNillableDeletedAt(t *time.Time) *FsEventUpdate {
if t != nil {
feu.SetDeletedAt(*t)
}
return feu
}
// ClearDeletedAt clears the value of the "deleted_at" field.
func (feu *FsEventUpdate) ClearDeletedAt() *FsEventUpdate {
feu.mutation.ClearDeletedAt()
return feu
}
// SetEvent sets the "event" field.
func (feu *FsEventUpdate) SetEvent(s string) *FsEventUpdate {
feu.mutation.SetEvent(s)
return feu
}
// SetNillableEvent sets the "event" field if the given value is not nil.
func (feu *FsEventUpdate) SetNillableEvent(s *string) *FsEventUpdate {
if s != nil {
feu.SetEvent(*s)
}
return feu
}
// SetSubscriber sets the "subscriber" field.
func (feu *FsEventUpdate) SetSubscriber(u uuid.UUID) *FsEventUpdate {
feu.mutation.SetSubscriber(u)
return feu
}
// SetNillableSubscriber sets the "subscriber" field if the given value is not nil.
func (feu *FsEventUpdate) SetNillableSubscriber(u *uuid.UUID) *FsEventUpdate {
if u != nil {
feu.SetSubscriber(*u)
}
return feu
}
// SetUserFsevent sets the "user_fsevent" field.
func (feu *FsEventUpdate) SetUserFsevent(i int) *FsEventUpdate {
feu.mutation.SetUserFsevent(i)
return feu
}
// SetNillableUserFsevent sets the "user_fsevent" field if the given value is not nil.
func (feu *FsEventUpdate) SetNillableUserFsevent(i *int) *FsEventUpdate {
if i != nil {
feu.SetUserFsevent(*i)
}
return feu
}
// ClearUserFsevent clears the value of the "user_fsevent" field.
func (feu *FsEventUpdate) ClearUserFsevent() *FsEventUpdate {
feu.mutation.ClearUserFsevent()
return feu
}
// SetUserID sets the "user" edge to the User entity by ID.
func (feu *FsEventUpdate) SetUserID(id int) *FsEventUpdate {
feu.mutation.SetUserID(id)
return feu
}
// SetNillableUserID sets the "user" edge to the User entity by ID if the given value is not nil.
func (feu *FsEventUpdate) SetNillableUserID(id *int) *FsEventUpdate {
if id != nil {
feu = feu.SetUserID(*id)
}
return feu
}
// SetUser sets the "user" edge to the User entity.
func (feu *FsEventUpdate) SetUser(u *User) *FsEventUpdate {
return feu.SetUserID(u.ID)
}
// Mutation returns the FsEventMutation object of the builder.
func (feu *FsEventUpdate) Mutation() *FsEventMutation {
return feu.mutation
}
// ClearUser clears the "user" edge to the User entity.
func (feu *FsEventUpdate) ClearUser() *FsEventUpdate {
feu.mutation.ClearUser()
return feu
}
// Save executes the query and returns the number of nodes affected by the update operation.
func (feu *FsEventUpdate) Save(ctx context.Context) (int, error) {
if err := feu.defaults(); err != nil {
return 0, err
}
return withHooks(ctx, feu.sqlSave, feu.mutation, feu.hooks)
}
// SaveX is like Save, but panics if an error occurs.
func (feu *FsEventUpdate) SaveX(ctx context.Context) int {
affected, err := feu.Save(ctx)
if err != nil {
panic(err)
}
return affected
}
// Exec executes the query.
func (feu *FsEventUpdate) Exec(ctx context.Context) error {
_, err := feu.Save(ctx)
return err
}
// ExecX is like Exec, but panics if an error occurs.
func (feu *FsEventUpdate) ExecX(ctx context.Context) {
if err := feu.Exec(ctx); err != nil {
panic(err)
}
}
// defaults sets the default values of the builder before save.
func (feu *FsEventUpdate) defaults() error {
if _, ok := feu.mutation.UpdatedAt(); !ok {
if fsevent.UpdateDefaultUpdatedAt == nil {
return fmt.Errorf("ent: uninitialized fsevent.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)")
}
v := fsevent.UpdateDefaultUpdatedAt()
feu.mutation.SetUpdatedAt(v)
}
return nil
}
func (feu *FsEventUpdate) sqlSave(ctx context.Context) (n int, err error) {
_spec := sqlgraph.NewUpdateSpec(fsevent.Table, fsevent.Columns, sqlgraph.NewFieldSpec(fsevent.FieldID, field.TypeInt))
if ps := feu.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
if value, ok := feu.mutation.UpdatedAt(); ok {
_spec.SetField(fsevent.FieldUpdatedAt, field.TypeTime, value)
}
if value, ok := feu.mutation.DeletedAt(); ok {
_spec.SetField(fsevent.FieldDeletedAt, field.TypeTime, value)
}
if feu.mutation.DeletedAtCleared() {
_spec.ClearField(fsevent.FieldDeletedAt, field.TypeTime)
}
if value, ok := feu.mutation.Event(); ok {
_spec.SetField(fsevent.FieldEvent, field.TypeString, value)
}
if value, ok := feu.mutation.Subscriber(); ok {
_spec.SetField(fsevent.FieldSubscriber, field.TypeUUID, value)
}
if feu.mutation.UserCleared() {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
Inverse: true,
Table: fsevent.UserTable,
Columns: []string{fsevent.UserColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
}
if nodes := feu.mutation.UserIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
Inverse: true,
Table: fsevent.UserTable,
Columns: []string{fsevent.UserColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges.Add = append(_spec.Edges.Add, edge)
}
if n, err = sqlgraph.UpdateNodes(ctx, feu.driver, _spec); err != nil {
if _, ok := err.(*sqlgraph.NotFoundError); ok {
err = &NotFoundError{fsevent.Label}
} else if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
return 0, err
}
feu.mutation.done = true
return n, nil
}
// FsEventUpdateOne is the builder for updating a single FsEvent entity.
type FsEventUpdateOne struct {
config
fields []string
hooks []Hook
mutation *FsEventMutation
}
// SetUpdatedAt sets the "updated_at" field.
func (feuo *FsEventUpdateOne) SetUpdatedAt(t time.Time) *FsEventUpdateOne {
feuo.mutation.SetUpdatedAt(t)
return feuo
}
// SetDeletedAt sets the "deleted_at" field.
func (feuo *FsEventUpdateOne) SetDeletedAt(t time.Time) *FsEventUpdateOne {
feuo.mutation.SetDeletedAt(t)
return feuo
}
// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil.
func (feuo *FsEventUpdateOne) SetNillableDeletedAt(t *time.Time) *FsEventUpdateOne {
if t != nil {
feuo.SetDeletedAt(*t)
}
return feuo
}
// ClearDeletedAt clears the value of the "deleted_at" field.
func (feuo *FsEventUpdateOne) ClearDeletedAt() *FsEventUpdateOne {
feuo.mutation.ClearDeletedAt()
return feuo
}
// SetEvent sets the "event" field.
func (feuo *FsEventUpdateOne) SetEvent(s string) *FsEventUpdateOne {
feuo.mutation.SetEvent(s)
return feuo
}
// SetNillableEvent sets the "event" field if the given value is not nil.
func (feuo *FsEventUpdateOne) SetNillableEvent(s *string) *FsEventUpdateOne {
if s != nil {
feuo.SetEvent(*s)
}
return feuo
}
// SetSubscriber sets the "subscriber" field.
func (feuo *FsEventUpdateOne) SetSubscriber(u uuid.UUID) *FsEventUpdateOne {
feuo.mutation.SetSubscriber(u)
return feuo
}
// SetNillableSubscriber sets the "subscriber" field if the given value is not nil.
func (feuo *FsEventUpdateOne) SetNillableSubscriber(u *uuid.UUID) *FsEventUpdateOne {
if u != nil {
feuo.SetSubscriber(*u)
}
return feuo
}
// SetUserFsevent sets the "user_fsevent" field.
func (feuo *FsEventUpdateOne) SetUserFsevent(i int) *FsEventUpdateOne {
feuo.mutation.SetUserFsevent(i)
return feuo
}
// SetNillableUserFsevent sets the "user_fsevent" field if the given value is not nil.
func (feuo *FsEventUpdateOne) SetNillableUserFsevent(i *int) *FsEventUpdateOne {
if i != nil {
feuo.SetUserFsevent(*i)
}
return feuo
}
// ClearUserFsevent clears the value of the "user_fsevent" field.
func (feuo *FsEventUpdateOne) ClearUserFsevent() *FsEventUpdateOne {
feuo.mutation.ClearUserFsevent()
return feuo
}
// SetUserID sets the "user" edge to the User entity by ID.
func (feuo *FsEventUpdateOne) SetUserID(id int) *FsEventUpdateOne {
feuo.mutation.SetUserID(id)
return feuo
}
// SetNillableUserID sets the "user" edge to the User entity by ID if the given value is not nil.
func (feuo *FsEventUpdateOne) SetNillableUserID(id *int) *FsEventUpdateOne {
if id != nil {
feuo = feuo.SetUserID(*id)
}
return feuo
}
// SetUser sets the "user" edge to the User entity.
func (feuo *FsEventUpdateOne) SetUser(u *User) *FsEventUpdateOne {
return feuo.SetUserID(u.ID)
}
// Mutation returns the FsEventMutation object of the builder.
func (feuo *FsEventUpdateOne) Mutation() *FsEventMutation {
return feuo.mutation
}
// ClearUser clears the "user" edge to the User entity.
func (feuo *FsEventUpdateOne) ClearUser() *FsEventUpdateOne {
feuo.mutation.ClearUser()
return feuo
}
// Where appends a list predicates to the FsEventUpdate builder.
func (feuo *FsEventUpdateOne) Where(ps ...predicate.FsEvent) *FsEventUpdateOne {
feuo.mutation.Where(ps...)
return feuo
}
// Select allows selecting one or more fields (columns) of the returned entity.
// The default is selecting all fields defined in the entity schema.
func (feuo *FsEventUpdateOne) Select(field string, fields ...string) *FsEventUpdateOne {
feuo.fields = append([]string{field}, fields...)
return feuo
}
// Save executes the query and returns the updated FsEvent entity.
func (feuo *FsEventUpdateOne) Save(ctx context.Context) (*FsEvent, error) {
if err := feuo.defaults(); err != nil {
return nil, err
}
return withHooks(ctx, feuo.sqlSave, feuo.mutation, feuo.hooks)
}
// SaveX is like Save, but panics if an error occurs.
func (feuo *FsEventUpdateOne) SaveX(ctx context.Context) *FsEvent {
node, err := feuo.Save(ctx)
if err != nil {
panic(err)
}
return node
}
// Exec executes the query on the entity.
func (feuo *FsEventUpdateOne) Exec(ctx context.Context) error {
_, err := feuo.Save(ctx)
return err
}
// ExecX is like Exec, but panics if an error occurs.
func (feuo *FsEventUpdateOne) ExecX(ctx context.Context) {
if err := feuo.Exec(ctx); err != nil {
panic(err)
}
}
// defaults sets the default values of the builder before save.
func (feuo *FsEventUpdateOne) defaults() error {
if _, ok := feuo.mutation.UpdatedAt(); !ok {
if fsevent.UpdateDefaultUpdatedAt == nil {
return fmt.Errorf("ent: uninitialized fsevent.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)")
}
v := fsevent.UpdateDefaultUpdatedAt()
feuo.mutation.SetUpdatedAt(v)
}
return nil
}
func (feuo *FsEventUpdateOne) sqlSave(ctx context.Context) (_node *FsEvent, err error) {
_spec := sqlgraph.NewUpdateSpec(fsevent.Table, fsevent.Columns, sqlgraph.NewFieldSpec(fsevent.FieldID, field.TypeInt))
id, ok := feuo.mutation.ID()
if !ok {
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "FsEvent.id" for update`)}
}
_spec.Node.ID.Value = id
if fields := feuo.fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, fsevent.FieldID)
for _, f := range fields {
if !fsevent.ValidColumn(f) {
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
}
if f != fsevent.FieldID {
_spec.Node.Columns = append(_spec.Node.Columns, f)
}
}
}
if ps := feuo.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
if value, ok := feuo.mutation.UpdatedAt(); ok {
_spec.SetField(fsevent.FieldUpdatedAt, field.TypeTime, value)
}
if value, ok := feuo.mutation.DeletedAt(); ok {
_spec.SetField(fsevent.FieldDeletedAt, field.TypeTime, value)
}
if feuo.mutation.DeletedAtCleared() {
_spec.ClearField(fsevent.FieldDeletedAt, field.TypeTime)
}
if value, ok := feuo.mutation.Event(); ok {
_spec.SetField(fsevent.FieldEvent, field.TypeString, value)
}
if value, ok := feuo.mutation.Subscriber(); ok {
_spec.SetField(fsevent.FieldSubscriber, field.TypeUUID, value)
}
if feuo.mutation.UserCleared() {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
Inverse: true,
Table: fsevent.UserTable,
Columns: []string{fsevent.UserColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
}
if nodes := feuo.mutation.UserIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
Inverse: true,
Table: fsevent.UserTable,
Columns: []string{fsevent.UserColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges.Add = append(_spec.Edges.Add, edge)
}
_node = &FsEvent{config: feuo.config}
_spec.Assign = _node.assignValues
_spec.ScanValues = _node.scanValues
if err = sqlgraph.UpdateNode(ctx, feuo.driver, _spec); err != nil {
if _, ok := err.(*sqlgraph.NotFoundError); ok {
err = &NotFoundError{fsevent.Label}
} else if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
return nil, err
}
feuo.mutation.done = true
return _node, nil
}

View File

@ -57,6 +57,18 @@ func (f FileFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error)
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.FileMutation", m)
}
// The FsEventFunc type is an adapter to allow the use of ordinary
// function as FsEvent mutator.
type FsEventFunc func(context.Context, *ent.FsEventMutation) (ent.Value, error)
// Mutate calls f(ctx, m).
func (f FsEventFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
if mv, ok := m.(*ent.FsEventMutation); ok {
return f(ctx, mv)
}
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.FsEventMutation", m)
}
// The GroupFunc type is an adapter to allow the use of ordinary
// function as Group mutator.
type GroupFunc func(context.Context, *ent.GroupMutation) (ent.Value, error)

View File

@ -12,6 +12,7 @@ import (
"github.com/cloudreve/Cloudreve/v4/ent/directlink"
"github.com/cloudreve/Cloudreve/v4/ent/entity"
"github.com/cloudreve/Cloudreve/v4/ent/file"
"github.com/cloudreve/Cloudreve/v4/ent/fsevent"
"github.com/cloudreve/Cloudreve/v4/ent/group"
"github.com/cloudreve/Cloudreve/v4/ent/metadata"
"github.com/cloudreve/Cloudreve/v4/ent/node"
@ -188,6 +189,33 @@ func (f TraverseFile) Traverse(ctx context.Context, q ent.Query) error {
return fmt.Errorf("unexpected query type %T. expect *ent.FileQuery", q)
}
// The FsEventFunc type is an adapter to allow the use of ordinary function as a Querier.
type FsEventFunc func(context.Context, *ent.FsEventQuery) (ent.Value, error)
// Query calls f(ctx, q).
func (f FsEventFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
if q, ok := q.(*ent.FsEventQuery); ok {
return f(ctx, q)
}
return nil, fmt.Errorf("unexpected query type %T. expect *ent.FsEventQuery", q)
}
// The TraverseFsEvent type is an adapter to allow the use of ordinary function as Traverser.
type TraverseFsEvent func(context.Context, *ent.FsEventQuery) error
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
func (f TraverseFsEvent) Intercept(next ent.Querier) ent.Querier {
return next
}
// Traverse calls f(ctx, q).
func (f TraverseFsEvent) Traverse(ctx context.Context, q ent.Query) error {
if q, ok := q.(*ent.FsEventQuery); ok {
return f(ctx, q)
}
return fmt.Errorf("unexpected query type %T. expect *ent.FsEventQuery", q)
}
// The GroupFunc type is an adapter to allow the use of ordinary function as a Querier.
type GroupFunc func(context.Context, *ent.GroupQuery) (ent.Value, error)
@ -442,6 +470,8 @@ func NewQuery(q ent.Query) (Query, error) {
return &query[*ent.EntityQuery, predicate.Entity, entity.OrderOption]{typ: ent.TypeEntity, tq: q}, nil
case *ent.FileQuery:
return &query[*ent.FileQuery, predicate.File, file.OrderOption]{typ: ent.TypeFile, tq: q}, nil
case *ent.FsEventQuery:
return &query[*ent.FsEventQuery, predicate.FsEvent, fsevent.OrderOption]{typ: ent.TypeFsEvent, tq: q}, nil
case *ent.GroupQuery:
return &query[*ent.GroupQuery, predicate.Group, group.OrderOption]{typ: ent.TypeGroup, tq: q}, nil
case *ent.MetadataQuery:

File diff suppressed because one or more lines are too long

View File

@ -107,7 +107,6 @@ var (
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"mysql": "datetime"}},
{Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"mysql": "datetime"}},
{Name: "deleted_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"mysql": "datetime"}},
{Name: "type", Type: field.TypeInt},
{Name: "name", Type: field.TypeString},
{Name: "size", Type: field.TypeInt64, Default: 0},
@ -126,19 +125,19 @@ var (
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "files_files_children",
Columns: []*schema.Column{FilesColumns[10]},
Columns: []*schema.Column{FilesColumns[9]},
RefColumns: []*schema.Column{FilesColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "files_storage_policies_files",
Columns: []*schema.Column{FilesColumns[11]},
Columns: []*schema.Column{FilesColumns[10]},
RefColumns: []*schema.Column{StoragePoliciesColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "files_users_files",
Columns: []*schema.Column{FilesColumns[12]},
Columns: []*schema.Column{FilesColumns[11]},
RefColumns: []*schema.Column{UsersColumns[0]},
OnDelete: schema.NoAction,
},
@ -147,17 +146,41 @@ var (
{
Name: "file_file_children_name",
Unique: true,
Columns: []*schema.Column{FilesColumns[10], FilesColumns[5]},
Columns: []*schema.Column{FilesColumns[9], FilesColumns[4]},
},
{
Name: "file_file_children_type_updated_at",
Unique: false,
Columns: []*schema.Column{FilesColumns[10], FilesColumns[4], FilesColumns[2]},
Columns: []*schema.Column{FilesColumns[9], FilesColumns[3], FilesColumns[2]},
},
{
Name: "file_file_children_type_size",
Unique: false,
Columns: []*schema.Column{FilesColumns[10], FilesColumns[4], FilesColumns[6]},
Columns: []*schema.Column{FilesColumns[9], FilesColumns[3], FilesColumns[5]},
},
},
}
// FsEventsColumns holds the columns for the "fs_events" table.
FsEventsColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"mysql": "datetime"}},
{Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"mysql": "datetime"}},
{Name: "deleted_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"mysql": "datetime"}},
{Name: "event", Type: field.TypeString, Size: 2147483647},
{Name: "subscriber", Type: field.TypeUUID},
{Name: "user_fsevent", Type: field.TypeInt, Nullable: true},
}
// FsEventsTable holds the schema information for the "fs_events" table.
FsEventsTable = &schema.Table{
Name: "fs_events",
Columns: FsEventsColumns,
PrimaryKey: []*schema.Column{FsEventsColumns[0]},
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "fs_events_users_fsevents",
Columns: []*schema.Column{FsEventsColumns[6]},
RefColumns: []*schema.Column{UsersColumns[0]},
OnDelete: schema.SetNull,
},
},
}
@ -445,6 +468,7 @@ var (
DirectLinksTable,
EntitiesTable,
FilesTable,
FsEventsTable,
GroupsTable,
MetadataTable,
NodesTable,
@ -466,6 +490,7 @@ func init() {
FilesTable.ForeignKeys[0].RefTable = FilesTable
FilesTable.ForeignKeys[1].RefTable = StoragePoliciesTable
FilesTable.ForeignKeys[2].RefTable = UsersTable
FsEventsTable.ForeignKeys[0].RefTable = UsersTable
GroupsTable.ForeignKeys[0].RefTable = StoragePoliciesTable
MetadataTable.ForeignKeys[0].RefTable = FilesTable
PasskeysTable.ForeignKeys[0].RefTable = UsersTable

File diff suppressed because it is too large Load Diff

View File

@ -28,6 +28,12 @@ func (m *FileMutation) SetRawID(t int) {
// SetUpdatedAt sets the "updated_at" field.
func (m *FsEventMutation) SetRawID(t int) {
m.id = &t
}
// SetUpdatedAt sets the "updated_at" field.
func (m *GroupMutation) SetRawID(t int) {
m.id = &t
}

View File

@ -18,6 +18,9 @@ type Entity func(*sql.Selector)
// File is the predicate function for file builders.
type File func(*sql.Selector)
// FsEvent is the predicate function for fsevent builders.
type FsEvent func(*sql.Selector)
// Group is the predicate function for group builders.
type Group func(*sql.Selector)

View File

@ -9,6 +9,7 @@ import (
"github.com/cloudreve/Cloudreve/v4/ent/directlink"
"github.com/cloudreve/Cloudreve/v4/ent/entity"
"github.com/cloudreve/Cloudreve/v4/ent/file"
"github.com/cloudreve/Cloudreve/v4/ent/fsevent"
"github.com/cloudreve/Cloudreve/v4/ent/group"
"github.com/cloudreve/Cloudreve/v4/ent/metadata"
"github.com/cloudreve/Cloudreve/v4/ent/node"
@ -87,33 +88,45 @@ func init() {
entityDescReferenceCount := entityFields[3].Descriptor()
// entity.DefaultReferenceCount holds the default value on creation for the reference_count field.
entity.DefaultReferenceCount = entityDescReferenceCount.Default.(int)
fileMixin := schema.File{}.Mixin()
fileMixinHooks0 := fileMixin[0].Hooks()
file.Hooks[0] = fileMixinHooks0[0]
fileMixinInters0 := fileMixin[0].Interceptors()
file.Interceptors[0] = fileMixinInters0[0]
fileMixinFields0 := fileMixin[0].Fields()
_ = fileMixinFields0
fileHooks := schema.File{}.Hooks()
file.Hooks[0] = fileHooks[0]
fileFields := schema.File{}.Fields()
_ = fileFields
// fileDescCreatedAt is the schema descriptor for created_at field.
fileDescCreatedAt := fileMixinFields0[0].Descriptor()
fileDescCreatedAt := fileFields[0].Descriptor()
// file.DefaultCreatedAt holds the default value on creation for the created_at field.
file.DefaultCreatedAt = fileDescCreatedAt.Default.(func() time.Time)
// fileDescUpdatedAt is the schema descriptor for updated_at field.
fileDescUpdatedAt := fileMixinFields0[1].Descriptor()
fileDescUpdatedAt := fileFields[1].Descriptor()
// file.DefaultUpdatedAt holds the default value on creation for the updated_at field.
file.DefaultUpdatedAt = fileDescUpdatedAt.Default.(func() time.Time)
// file.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.
file.UpdateDefaultUpdatedAt = fileDescUpdatedAt.UpdateDefault.(func() time.Time)
// fileDescSize is the schema descriptor for size field.
fileDescSize := fileFields[3].Descriptor()
fileDescSize := fileFields[5].Descriptor()
// file.DefaultSize holds the default value on creation for the size field.
file.DefaultSize = fileDescSize.Default.(int64)
// fileDescIsSymbolic is the schema descriptor for is_symbolic field.
fileDescIsSymbolic := fileFields[6].Descriptor()
fileDescIsSymbolic := fileFields[8].Descriptor()
// file.DefaultIsSymbolic holds the default value on creation for the is_symbolic field.
file.DefaultIsSymbolic = fileDescIsSymbolic.Default.(bool)
fseventMixin := schema.FsEvent{}.Mixin()
fseventMixinHooks0 := fseventMixin[0].Hooks()
fsevent.Hooks[0] = fseventMixinHooks0[0]
fseventMixinInters0 := fseventMixin[0].Interceptors()
fsevent.Interceptors[0] = fseventMixinInters0[0]
fseventMixinFields0 := fseventMixin[0].Fields()
_ = fseventMixinFields0
fseventFields := schema.FsEvent{}.Fields()
_ = fseventFields
// fseventDescCreatedAt is the schema descriptor for created_at field.
fseventDescCreatedAt := fseventMixinFields0[0].Descriptor()
// fsevent.DefaultCreatedAt holds the default value on creation for the created_at field.
fsevent.DefaultCreatedAt = fseventDescCreatedAt.Default.(func() time.Time)
// fseventDescUpdatedAt is the schema descriptor for updated_at field.
fseventDescUpdatedAt := fseventMixinFields0[1].Descriptor()
// fsevent.DefaultUpdatedAt holds the default value on creation for the updated_at field.
fsevent.DefaultUpdatedAt = fseventDescUpdatedAt.Default.(func() time.Time)
// fsevent.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.
fsevent.UpdateDefaultUpdatedAt = fseventDescUpdatedAt.UpdateDefault.(func() time.Time)
groupMixin := schema.Group{}.Mixin()
groupMixinHooks0 := groupMixin[0].Hooks()
group.Hooks[0] = groupMixinHooks0[0]

View File

@ -25,8 +25,9 @@ func (Entity) Fields() []ent.Field {
field.UUID("upload_session_id", uuid.Must(uuid.NewV4())).
Optional().
Nillable(),
field.JSON("recycle_options", &types.EntityRecycleOption{}).
Optional(),
field.JSON("props", &types.EntityProps{}).
Optional().
StorageKey("recycle_options"),
}
}

View File

@ -1,10 +1,15 @@
package schema
import (
"context"
"time"
"entgo.io/ent"
"entgo.io/ent/dialect"
"entgo.io/ent/schema/edge"
"entgo.io/ent/schema/field"
"entgo.io/ent/schema/index"
"github.com/cloudreve/Cloudreve/v4/ent/hook"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
)
@ -16,6 +21,17 @@ type File struct {
// Fields of the File.
func (File) Fields() []ent.Field {
return []ent.Field{
field.Time("created_at").
Immutable().
Default(time.Now).
SchemaType(map[string]string{
dialect.MySQL: "datetime",
}),
field.Time("updated_at").
Default(time.Now).
SchemaType(map[string]string{
dialect.MySQL: "datetime",
}),
field.Int("type"),
field.String("name"),
field.Int("owner_id"),
@ -66,8 +82,19 @@ func (File) Indexes() []ent.Index {
}
}
func (File) Mixin() []ent.Mixin {
return []ent.Mixin{
CommonMixin{},
func (f File) Hooks() []ent.Hook {
return []ent.Hook{
hook.On(func(next ent.Mutator) ent.Mutator {
return ent.MutateFunc(func(ctx context.Context, m ent.Mutation) (ent.Value, error) {
if s, ok := m.(interface{ SetUpdatedAt(time.Time) }); ok {
_, set := m.Field("updated_at")
if !set {
s.SetUpdatedAt(time.Now())
}
}
v, err := next.Mutate(ctx, m)
return v, err
})
}, ent.OpUpdate|ent.OpUpdateOne),
}
}

38
ent/schema/fsevent.go Normal file
View File

@ -0,0 +1,38 @@
package schema
import (
"entgo.io/ent"
"entgo.io/ent/schema/edge"
"entgo.io/ent/schema/field"
"github.com/gofrs/uuid"
)
// FsEvent holds the schema definition for the FsEvent entity.
type FsEvent struct {
ent.Schema
}
// Fields of the FsEvent.
func (FsEvent) Fields() []ent.Field {
return []ent.Field{
field.Text("event"),
field.UUID("subscriber", uuid.Must(uuid.NewV4())),
field.Int("user_fsevent").Optional(),
}
}
// Edges of the Task.
func (FsEvent) Edges() []ent.Edge {
return []ent.Edge{
edge.From("user", User.Type).
Ref("fsevents").
Field("user_fsevent").
Unique(),
}
}
func (FsEvent) Mixin() []ent.Mixin {
return []ent.Mixin{
CommonMixin{},
}
}

View File

@ -51,6 +51,7 @@ func (User) Edges() []ent.Edge {
edge.To("shares", Share.Type),
edge.To("passkey", Passkey.Type),
edge.To("tasks", Task.Type),
edge.To("fsevents", FsEvent.Type),
edge.To("entities", Entity.Type),
}
}

View File

@ -22,6 +22,8 @@ type Tx struct {
Entity *EntityClient
// File is the client for interacting with the File builders.
File *FileClient
// FsEvent is the client for interacting with the FsEvent builders.
FsEvent *FsEventClient
// Group is the client for interacting with the Group builders.
Group *GroupClient
// Metadata is the client for interacting with the Metadata builders.
@ -175,6 +177,7 @@ func (tx *Tx) init() {
tx.DirectLink = NewDirectLinkClient(tx.config)
tx.Entity = NewEntityClient(tx.config)
tx.File = NewFileClient(tx.config)
tx.FsEvent = NewFsEventClient(tx.config)
tx.Group = NewGroupClient(tx.config)
tx.Metadata = NewMetadataClient(tx.config)
tx.Node = NewNodeClient(tx.config)

View File

@ -64,11 +64,13 @@ type UserEdges struct {
Passkey []*Passkey `json:"passkey,omitempty"`
// Tasks holds the value of the tasks edge.
Tasks []*Task `json:"tasks,omitempty"`
// Fsevents holds the value of the fsevents edge.
Fsevents []*FsEvent `json:"fsevents,omitempty"`
// Entities holds the value of the entities edge.
Entities []*Entity `json:"entities,omitempty"`
// loadedTypes holds the information for reporting if a
// type was loaded (or requested) in eager-loading or not.
loadedTypes [7]bool
loadedTypes [8]bool
}
// GroupOrErr returns the Group value or an error if the edge
@ -129,10 +131,19 @@ func (e UserEdges) TasksOrErr() ([]*Task, error) {
return nil, &NotLoadedError{edge: "tasks"}
}
// FseventsOrErr returns the Fsevents value or an error if the edge
// was not loaded in eager-loading.
func (e UserEdges) FseventsOrErr() ([]*FsEvent, error) {
if e.loadedTypes[6] {
return e.Fsevents, nil
}
return nil, &NotLoadedError{edge: "fsevents"}
}
// EntitiesOrErr returns the Entities value or an error if the edge
// was not loaded in eager-loading.
func (e UserEdges) EntitiesOrErr() ([]*Entity, error) {
if e.loadedTypes[6] {
if e.loadedTypes[7] {
return e.Entities, nil
}
return nil, &NotLoadedError{edge: "entities"}
@ -290,6 +301,11 @@ func (u *User) QueryTasks() *TaskQuery {
return NewUserClient(u.config).QueryTasks(u)
}
// QueryFsevents queries the "fsevents" edge of the User entity.
func (u *User) QueryFsevents() *FsEventQuery {
return NewUserClient(u.config).QueryFsevents(u)
}
// QueryEntities queries the "entities" edge of the User entity.
func (u *User) QueryEntities() *EntityQuery {
return NewUserClient(u.config).QueryEntities(u)
@ -393,10 +409,16 @@ func (e *User) SetTasks(v []*Task) {
e.Edges.loadedTypes[5] = true
}
// SetFsevents manually set the edge as loaded state.
func (e *User) SetFsevents(v []*FsEvent) {
e.Edges.Fsevents = v
e.Edges.loadedTypes[6] = true
}
// SetEntities manually set the edge as loaded state.
func (e *User) SetEntities(v []*Entity) {
e.Edges.Entities = v
e.Edges.loadedTypes[6] = true
e.Edges.loadedTypes[7] = true
}
// Users is a parsable slice of User.

View File

@ -53,6 +53,8 @@ const (
EdgePasskey = "passkey"
// EdgeTasks holds the string denoting the tasks edge name in mutations.
EdgeTasks = "tasks"
// EdgeFsevents holds the string denoting the fsevents edge name in mutations.
EdgeFsevents = "fsevents"
// EdgeEntities holds the string denoting the entities edge name in mutations.
EdgeEntities = "entities"
// Table holds the table name of the user in the database.
@ -99,6 +101,13 @@ const (
TasksInverseTable = "tasks"
// TasksColumn is the table column denoting the tasks relation/edge.
TasksColumn = "user_tasks"
// FseventsTable is the table that holds the fsevents relation/edge.
FseventsTable = "fs_events"
// FseventsInverseTable is the table name for the FsEvent entity.
// It exists in this package in order to avoid circular dependency with the "fsevent" package.
FseventsInverseTable = "fs_events"
// FseventsColumn is the table column denoting the fsevents relation/edge.
FseventsColumn = "user_fsevent"
// EntitiesTable is the table that holds the entities relation/edge.
EntitiesTable = "entities"
// EntitiesInverseTable is the table name for the Entity entity.
@ -327,6 +336,20 @@ func ByTasks(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
}
}
// ByFseventsCount orders the results by fsevents count.
func ByFseventsCount(opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborsCount(s, newFseventsStep(), opts...)
}
}
// ByFsevents orders the results by fsevents terms.
func ByFsevents(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newFseventsStep(), append([]sql.OrderTerm{term}, terms...)...)
}
}
// ByEntitiesCount orders the results by entities count.
func ByEntitiesCount(opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
@ -382,6 +405,13 @@ func newTasksStep() *sqlgraph.Step {
sqlgraph.Edge(sqlgraph.O2M, false, TasksTable, TasksColumn),
)
}
func newFseventsStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(FseventsInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, FseventsTable, FseventsColumn),
)
}
func newEntitiesStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),

View File

@ -818,6 +818,29 @@ func HasTasksWith(preds ...predicate.Task) predicate.User {
})
}
// HasFsevents applies the HasEdge predicate on the "fsevents" edge.
func HasFsevents() predicate.User {
return predicate.User(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, FseventsTable, FseventsColumn),
)
sqlgraph.HasNeighbors(s, step)
})
}
// HasFseventsWith applies the HasEdge predicate on the "fsevents" edge with a given conditions (other predicates).
func HasFseventsWith(preds ...predicate.FsEvent) predicate.User {
return predicate.User(func(s *sql.Selector) {
step := newFseventsStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
}
})
})
}
// HasEntities applies the HasEdge predicate on the "entities" edge.
func HasEntities() predicate.User {
return predicate.User(func(s *sql.Selector) {

View File

@ -14,6 +14,7 @@ import (
"github.com/cloudreve/Cloudreve/v4/ent/davaccount"
"github.com/cloudreve/Cloudreve/v4/ent/entity"
"github.com/cloudreve/Cloudreve/v4/ent/file"
"github.com/cloudreve/Cloudreve/v4/ent/fsevent"
"github.com/cloudreve/Cloudreve/v4/ent/group"
"github.com/cloudreve/Cloudreve/v4/ent/passkey"
"github.com/cloudreve/Cloudreve/v4/ent/share"
@ -252,6 +253,21 @@ func (uc *UserCreate) AddTasks(t ...*Task) *UserCreate {
return uc.AddTaskIDs(ids...)
}
// AddFseventIDs adds the "fsevents" edge to the FsEvent entity by IDs.
func (uc *UserCreate) AddFseventIDs(ids ...int) *UserCreate {
uc.mutation.AddFseventIDs(ids...)
return uc
}
// AddFsevents adds the "fsevents" edges to the FsEvent entity.
func (uc *UserCreate) AddFsevents(f ...*FsEvent) *UserCreate {
ids := make([]int, len(f))
for i := range f {
ids[i] = f[i].ID
}
return uc.AddFseventIDs(ids...)
}
// AddEntityIDs adds the "entities" edge to the Entity entity by IDs.
func (uc *UserCreate) AddEntityIDs(ids ...int) *UserCreate {
uc.mutation.AddEntityIDs(ids...)
@ -549,6 +565,22 @@ func (uc *UserCreate) createSpec() (*User, *sqlgraph.CreateSpec) {
}
_spec.Edges = append(_spec.Edges, edge)
}
if nodes := uc.mutation.FseventsIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,
Inverse: false,
Table: user.FseventsTable,
Columns: []string{user.FseventsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(fsevent.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges = append(_spec.Edges, edge)
}
if nodes := uc.mutation.EntitiesIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,

View File

@ -14,6 +14,7 @@ import (
"github.com/cloudreve/Cloudreve/v4/ent/davaccount"
"github.com/cloudreve/Cloudreve/v4/ent/entity"
"github.com/cloudreve/Cloudreve/v4/ent/file"
"github.com/cloudreve/Cloudreve/v4/ent/fsevent"
"github.com/cloudreve/Cloudreve/v4/ent/group"
"github.com/cloudreve/Cloudreve/v4/ent/passkey"
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
@ -35,6 +36,7 @@ type UserQuery struct {
withShares *ShareQuery
withPasskey *PasskeyQuery
withTasks *TaskQuery
withFsevents *FsEventQuery
withEntities *EntityQuery
// intermediate query (i.e. traversal path).
sql *sql.Selector
@ -204,6 +206,28 @@ func (uq *UserQuery) QueryTasks() *TaskQuery {
return query
}
// QueryFsevents chains the current query on the "fsevents" edge.
func (uq *UserQuery) QueryFsevents() *FsEventQuery {
query := (&FsEventClient{config: uq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := uq.prepareQuery(ctx); err != nil {
return nil, err
}
selector := uq.sqlQuery(ctx)
if err := selector.Err(); err != nil {
return nil, err
}
step := sqlgraph.NewStep(
sqlgraph.From(user.Table, user.FieldID, selector),
sqlgraph.To(fsevent.Table, fsevent.FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, user.FseventsTable, user.FseventsColumn),
)
fromU = sqlgraph.SetNeighbors(uq.driver.Dialect(), step)
return fromU, nil
}
return query
}
// QueryEntities chains the current query on the "entities" edge.
func (uq *UserQuery) QueryEntities() *EntityQuery {
query := (&EntityClient{config: uq.config}).Query()
@ -424,6 +448,7 @@ func (uq *UserQuery) Clone() *UserQuery {
withShares: uq.withShares.Clone(),
withPasskey: uq.withPasskey.Clone(),
withTasks: uq.withTasks.Clone(),
withFsevents: uq.withFsevents.Clone(),
withEntities: uq.withEntities.Clone(),
// clone intermediate query.
sql: uq.sql.Clone(),
@ -497,6 +522,17 @@ func (uq *UserQuery) WithTasks(opts ...func(*TaskQuery)) *UserQuery {
return uq
}
// WithFsevents tells the query-builder to eager-load the nodes that are connected to
// the "fsevents" edge. The optional arguments are used to configure the query builder of the edge.
func (uq *UserQuery) WithFsevents(opts ...func(*FsEventQuery)) *UserQuery {
query := (&FsEventClient{config: uq.config}).Query()
for _, opt := range opts {
opt(query)
}
uq.withFsevents = query
return uq
}
// WithEntities tells the query-builder to eager-load the nodes that are connected to
// the "entities" edge. The optional arguments are used to configure the query builder of the edge.
func (uq *UserQuery) WithEntities(opts ...func(*EntityQuery)) *UserQuery {
@ -586,13 +622,14 @@ func (uq *UserQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*User, e
var (
nodes = []*User{}
_spec = uq.querySpec()
loadedTypes = [7]bool{
loadedTypes = [8]bool{
uq.withGroup != nil,
uq.withFiles != nil,
uq.withDavAccounts != nil,
uq.withShares != nil,
uq.withPasskey != nil,
uq.withTasks != nil,
uq.withFsevents != nil,
uq.withEntities != nil,
}
)
@ -655,6 +692,13 @@ func (uq *UserQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*User, e
return nil, err
}
}
if query := uq.withFsevents; query != nil {
if err := uq.loadFsevents(ctx, query, nodes,
func(n *User) { n.Edges.Fsevents = []*FsEvent{} },
func(n *User, e *FsEvent) { n.Edges.Fsevents = append(n.Edges.Fsevents, e) }); err != nil {
return nil, err
}
}
if query := uq.withEntities; query != nil {
if err := uq.loadEntities(ctx, query, nodes,
func(n *User) { n.Edges.Entities = []*Entity{} },
@ -845,6 +889,36 @@ func (uq *UserQuery) loadTasks(ctx context.Context, query *TaskQuery, nodes []*U
}
return nil
}
func (uq *UserQuery) loadFsevents(ctx context.Context, query *FsEventQuery, nodes []*User, init func(*User), assign func(*User, *FsEvent)) error {
fks := make([]driver.Value, 0, len(nodes))
nodeids := make(map[int]*User)
for i := range nodes {
fks = append(fks, nodes[i].ID)
nodeids[nodes[i].ID] = nodes[i]
if init != nil {
init(nodes[i])
}
}
if len(query.ctx.Fields) > 0 {
query.ctx.AppendFieldOnce(fsevent.FieldUserFsevent)
}
query.Where(predicate.FsEvent(func(s *sql.Selector) {
s.Where(sql.InValues(s.C(user.FseventsColumn), fks...))
}))
neighbors, err := query.All(ctx)
if err != nil {
return err
}
for _, n := range neighbors {
fk := n.UserFsevent
node, ok := nodeids[fk]
if !ok {
return fmt.Errorf(`unexpected referenced foreign-key "user_fsevent" returned %v for node %v`, fk, n.ID)
}
assign(node, n)
}
return nil
}
func (uq *UserQuery) loadEntities(ctx context.Context, query *EntityQuery, nodes []*User, init func(*User), assign func(*User, *Entity)) error {
fks := make([]driver.Value, 0, len(nodes))
nodeids := make(map[int]*User)

View File

@ -14,6 +14,7 @@ import (
"github.com/cloudreve/Cloudreve/v4/ent/davaccount"
"github.com/cloudreve/Cloudreve/v4/ent/entity"
"github.com/cloudreve/Cloudreve/v4/ent/file"
"github.com/cloudreve/Cloudreve/v4/ent/fsevent"
"github.com/cloudreve/Cloudreve/v4/ent/group"
"github.com/cloudreve/Cloudreve/v4/ent/passkey"
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
@ -297,6 +298,21 @@ func (uu *UserUpdate) AddTasks(t ...*Task) *UserUpdate {
return uu.AddTaskIDs(ids...)
}
// AddFseventIDs adds the "fsevents" edge to the FsEvent entity by IDs.
func (uu *UserUpdate) AddFseventIDs(ids ...int) *UserUpdate {
uu.mutation.AddFseventIDs(ids...)
return uu
}
// AddFsevents adds the "fsevents" edges to the FsEvent entity.
func (uu *UserUpdate) AddFsevents(f ...*FsEvent) *UserUpdate {
ids := make([]int, len(f))
for i := range f {
ids[i] = f[i].ID
}
return uu.AddFseventIDs(ids...)
}
// AddEntityIDs adds the "entities" edge to the Entity entity by IDs.
func (uu *UserUpdate) AddEntityIDs(ids ...int) *UserUpdate {
uu.mutation.AddEntityIDs(ids...)
@ -428,6 +444,27 @@ func (uu *UserUpdate) RemoveTasks(t ...*Task) *UserUpdate {
return uu.RemoveTaskIDs(ids...)
}
// ClearFsevents clears all "fsevents" edges to the FsEvent entity.
func (uu *UserUpdate) ClearFsevents() *UserUpdate {
uu.mutation.ClearFsevents()
return uu
}
// RemoveFseventIDs removes the "fsevents" edge to FsEvent entities by IDs.
func (uu *UserUpdate) RemoveFseventIDs(ids ...int) *UserUpdate {
uu.mutation.RemoveFseventIDs(ids...)
return uu
}
// RemoveFsevents removes "fsevents" edges to FsEvent entities.
func (uu *UserUpdate) RemoveFsevents(f ...*FsEvent) *UserUpdate {
ids := make([]int, len(f))
for i := range f {
ids[i] = f[i].ID
}
return uu.RemoveFseventIDs(ids...)
}
// ClearEntities clears all "entities" edges to the Entity entity.
func (uu *UserUpdate) ClearEntities() *UserUpdate {
uu.mutation.ClearEntities()
@ -828,6 +865,51 @@ func (uu *UserUpdate) sqlSave(ctx context.Context) (n int, err error) {
}
_spec.Edges.Add = append(_spec.Edges.Add, edge)
}
if uu.mutation.FseventsCleared() {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,
Inverse: false,
Table: user.FseventsTable,
Columns: []string{user.FseventsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(fsevent.FieldID, field.TypeInt),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
}
if nodes := uu.mutation.RemovedFseventsIDs(); len(nodes) > 0 && !uu.mutation.FseventsCleared() {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,
Inverse: false,
Table: user.FseventsTable,
Columns: []string{user.FseventsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(fsevent.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
}
if nodes := uu.mutation.FseventsIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,
Inverse: false,
Table: user.FseventsTable,
Columns: []string{user.FseventsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(fsevent.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges.Add = append(_spec.Edges.Add, edge)
}
if uu.mutation.EntitiesCleared() {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,
@ -1154,6 +1236,21 @@ func (uuo *UserUpdateOne) AddTasks(t ...*Task) *UserUpdateOne {
return uuo.AddTaskIDs(ids...)
}
// AddFseventIDs adds the "fsevents" edge to the FsEvent entity by IDs.
func (uuo *UserUpdateOne) AddFseventIDs(ids ...int) *UserUpdateOne {
uuo.mutation.AddFseventIDs(ids...)
return uuo
}
// AddFsevents adds the "fsevents" edges to the FsEvent entity.
func (uuo *UserUpdateOne) AddFsevents(f ...*FsEvent) *UserUpdateOne {
ids := make([]int, len(f))
for i := range f {
ids[i] = f[i].ID
}
return uuo.AddFseventIDs(ids...)
}
// AddEntityIDs adds the "entities" edge to the Entity entity by IDs.
func (uuo *UserUpdateOne) AddEntityIDs(ids ...int) *UserUpdateOne {
uuo.mutation.AddEntityIDs(ids...)
@ -1285,6 +1382,27 @@ func (uuo *UserUpdateOne) RemoveTasks(t ...*Task) *UserUpdateOne {
return uuo.RemoveTaskIDs(ids...)
}
// ClearFsevents clears all "fsevents" edges to the FsEvent entity.
func (uuo *UserUpdateOne) ClearFsevents() *UserUpdateOne {
uuo.mutation.ClearFsevents()
return uuo
}
// RemoveFseventIDs removes the "fsevents" edge to FsEvent entities by IDs.
func (uuo *UserUpdateOne) RemoveFseventIDs(ids ...int) *UserUpdateOne {
uuo.mutation.RemoveFseventIDs(ids...)
return uuo
}
// RemoveFsevents removes "fsevents" edges to FsEvent entities.
func (uuo *UserUpdateOne) RemoveFsevents(f ...*FsEvent) *UserUpdateOne {
ids := make([]int, len(f))
for i := range f {
ids[i] = f[i].ID
}
return uuo.RemoveFseventIDs(ids...)
}
// ClearEntities clears all "entities" edges to the Entity entity.
func (uuo *UserUpdateOne) ClearEntities() *UserUpdateOne {
uuo.mutation.ClearEntities()
@ -1715,6 +1833,51 @@ func (uuo *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error)
}
_spec.Edges.Add = append(_spec.Edges.Add, edge)
}
if uuo.mutation.FseventsCleared() {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,
Inverse: false,
Table: user.FseventsTable,
Columns: []string{user.FseventsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(fsevent.FieldID, field.TypeInt),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
}
if nodes := uuo.mutation.RemovedFseventsIDs(); len(nodes) > 0 && !uuo.mutation.FseventsCleared() {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,
Inverse: false,
Table: user.FseventsTable,
Columns: []string{user.FseventsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(fsevent.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
}
if nodes := uuo.mutation.FseventsIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,
Inverse: false,
Table: user.FseventsTable,
Columns: []string{user.FseventsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(fsevent.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges.Add = append(_spec.Edges.Add, edge)
}
if uuo.mutation.EntitiesCleared() {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,

95
go.mod
View File

@ -1,13 +1,15 @@
module github.com/cloudreve/Cloudreve/v4
go 1.23.0
go 1.24.0
toolchain go1.24.9
require (
entgo.io/ent v0.13.0
github.com/Masterminds/semver/v3 v3.3.1
github.com/abslant/gzip v0.0.9
github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible
github.com/aliyun/alibabacloud-oss-go-sdk-v2 v1.3.0
github.com/aws/aws-sdk-go v1.31.5
github.com/bodgit/sevenzip v1.6.0
github.com/cloudflare/cfssl v1.6.1
github.com/dhowden/tag v0.0.0-20230630033851-978a0926ee25
github.com/dsoprea/go-exif/v3 v3.0.1
@ -17,13 +19,13 @@ require (
github.com/dsoprea/go-tiff-image-structure v0.0.0-20221003165014-8ecc4f52edca
github.com/dsoprea/go-utility v0.0.0-20200711062821-fab8125e9bdf
github.com/fatih/color v1.18.0
github.com/gin-contrib/cors v1.3.0
github.com/gin-contrib/cors v1.6.0
github.com/gin-contrib/gzip v1.2.4
github.com/gin-contrib/sessions v1.0.2
github.com/gin-contrib/static v0.0.0-20191128031702-f81c604d8ac2
github.com/gin-gonic/gin v1.10.0
github.com/gin-gonic/gin v1.11.0
github.com/go-ini/ini v1.50.0
github.com/go-mail/mail v2.3.1+incompatible
github.com/go-playground/validator/v10 v10.20.0
github.com/go-playground/validator/v10 v10.28.0
github.com/go-sql-driver/mysql v1.6.0
github.com/go-webauthn/webauthn v0.11.2
github.com/gofrs/uuid v4.0.0+incompatible
@ -38,8 +40,9 @@ require (
github.com/jinzhu/gorm v1.9.11
github.com/jpillora/backoff v1.0.0
github.com/juju/ratelimit v1.0.1
github.com/ks3sdklib/aws-sdk-go v1.6.2
github.com/lib/pq v1.10.9
github.com/mholt/archiver/v4 v4.0.0-alpha.6
github.com/mholt/archives v0.1.3
github.com/mojocn/base64Captcha v0.0.0-20190801020520-752b1cd608b2
github.com/pquerna/otp v1.2.0
github.com/qiniu/go-sdk/v7 v7.19.0
@ -49,33 +52,37 @@ require (
github.com/speps/go-hashids v2.0.0+incompatible
github.com/spf13/cobra v1.7.0
github.com/spf13/pflag v1.0.5
github.com/stretchr/testify v1.9.0
github.com/stretchr/testify v1.11.1
github.com/tencentyun/cos-go-sdk-v5 v0.7.54
github.com/ua-parser/uap-go v0.0.0-20250213224047-9c035f085b90
github.com/upyun/go-sdk v2.1.0+incompatible
github.com/wneessen/go-mail v0.7.1
golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e
golang.org/x/image v0.0.0-20211028202545-6944b10bf410
golang.org/x/text v0.23.0
golang.org/x/image v0.18.0
golang.org/x/text v0.30.0
golang.org/x/time v0.5.0
golang.org/x/tools v0.24.0
golang.org/x/tools v0.38.0
modernc.org/sqlite v1.30.0
)
require (
ariga.io/atlas v0.19.1-0.20240203083654-5948b60a8e43 // indirect
cloud.google.com/go v0.81.0 // indirect
github.com/STARRY-S/zip v0.2.1 // indirect
github.com/agext/levenshtein v1.2.1 // indirect
github.com/andybalholm/brotli v1.0.4 // indirect
github.com/andybalholm/brotli v1.1.2-0.20250424173009-453214e765f3 // indirect
github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect
github.com/bodgit/plumbing v1.3.0 // indirect
github.com/bodgit/windows v1.0.1 // indirect
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect
github.com/bytedance/sonic v1.11.6 // indirect
github.com/bytedance/sonic/loader v0.1.1 // indirect
github.com/bytedance/gopkg v0.1.3 // indirect
github.com/bytedance/sonic v1.14.1 // indirect
github.com/bytedance/sonic/loader v0.3.0 // indirect
github.com/clbanning/mxj v1.8.4 // indirect
github.com/cloudwego/base64x v0.1.4 // indirect
github.com/cloudwego/iasm v0.2.0 // indirect
github.com/cloudwego/base64x v0.1.6 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/denisenkom/go-mssqldb v0.0.0-20190515213511-eb9f6a1743f3 // indirect
github.com/dsnet/compress v0.0.1 // indirect
github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 // indirect
github.com/dsoprea/go-exif/v2 v2.0.0-20200604193436-ca8584a0e1c4 // indirect
github.com/dsoprea/go-iptc v0.0.0-20200609062250-162ae6b44feb // indirect
github.com/dsoprea/go-logging v0.0.0-20200710184922-b02d349568dd // indirect
@ -83,21 +90,23 @@ require (
github.com/dsoprea/go-utility/v2 v2.0.0-20221003172846-a3e1774ef349 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.3 // indirect
github.com/gin-contrib/sse v0.1.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.10 // indirect
github.com/gin-contrib/sse v1.1.0 // indirect
github.com/go-errors/errors v1.4.2 // indirect
github.com/go-openapi/inflect v0.19.0 // indirect
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-webauthn/x v0.1.14 // indirect
github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b // indirect
github.com/goccy/go-json v0.10.2 // indirect
github.com/goccy/go-json v0.10.5 // indirect
github.com/goccy/go-yaml v1.18.0 // indirect
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 // indirect
github.com/golang/geo v0.0.0-20210211234256-740aa86cb551 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/go-cmp v0.7.0 // indirect
github.com/google/go-tpm v0.9.1 // indirect
github.com/gorilla/context v1.1.2 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
github.com/hashicorp/hcl/v2 v2.13.0 // indirect
@ -105,41 +114,43 @@ require (
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jmespath/go-jmespath v0.3.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.17.7 // indirect
github.com/klauspost/cpuid/v2 v2.2.7 // indirect
github.com/klauspost/pgzip v1.2.5 // indirect
github.com/klauspost/compress v1.17.11 // indirect
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
github.com/klauspost/pgzip v1.2.6 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/leodido/go-urn v1.4.0 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mikelolasagasti/xz v1.0.1 // indirect
github.com/minio/minlz v1.0.0 // indirect
github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/mozillazg/go-httpheader v0.4.0 // indirect
github.com/ncruces/go-strftime v0.1.9 // indirect
github.com/nwaples/rardecode/v2 v2.0.0-beta.2 // indirect
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
github.com/pierrec/lz4/v4 v4.1.14 // indirect
github.com/nwaples/rardecode/v2 v2.1.0 // indirect
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
github.com/pierrec/lz4/v4 v4.1.21 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/quic-go/qpack v0.5.1 // indirect
github.com/quic-go/quic-go v0.55.0 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
github.com/sorairolake/lzip-go v0.3.5 // indirect
github.com/stretchr/objx v0.5.2 // indirect
github.com/therootcompany/xz v1.0.1 // indirect
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
github.com/ugorji/go/codec v1.2.12 // indirect
github.com/ulikunitz/xz v0.5.10 // indirect
github.com/ugorji/go/codec v1.3.0 // indirect
github.com/ulikunitz/xz v0.5.12 // indirect
github.com/x448/float16 v0.8.4 // indirect
github.com/zclconf/go-cty v1.8.0 // indirect
go4.org v0.0.0-20200411211856-f5505b9728dd // indirect
golang.org/x/arch v0.8.0 // indirect
golang.org/x/crypto v0.36.0 // indirect
golang.org/x/mod v0.20.0 // indirect
golang.org/x/net v0.38.0 // indirect
golang.org/x/sync v0.12.0 // indirect
golang.org/x/sys v0.31.0 // indirect
google.golang.org/protobuf v1.34.2 // indirect
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect
gopkg.in/mail.v2 v2.3.1 // indirect
go4.org v0.0.0-20230225012048-214862532bf5 // indirect
golang.org/x/arch v0.22.0 // indirect
golang.org/x/crypto v0.43.0 // indirect
golang.org/x/mod v0.29.0 // indirect
golang.org/x/net v0.46.0 // indirect
golang.org/x/sync v0.17.0 // indirect
golang.org/x/sys v0.37.0 // indirect
google.golang.org/protobuf v1.36.10 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6 // indirect

206
go.sum
View File

@ -82,11 +82,11 @@ github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuN
github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/QcloudApi/qcloud_sign_golang v0.0.0-20141224014652-e4130a326409/go.mod h1:1pk82RBxDY/JZnPQrtqHlUFfCctgdorsd9M06fMynOM=
github.com/STARRY-S/zip v0.2.1 h1:pWBd4tuSGm3wtpoqRZZ2EAwOmcHK6XFf7bU9qcJXyFg=
github.com/STARRY-S/zip v0.2.1/go.mod h1:xNvshLODWtC4EJ702g7cTYn13G53o1+X9BWnPFpcWV4=
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
github.com/abslant/gzip v0.0.9 h1:zxuOQ8QmPwni7vwgE3EyOygdmeCo2UkCmO5t+7Ms6cA=
github.com/abslant/gzip v0.0.9/go.mod h1:IcN2c50tZn2y54oysNcIavbTAc1s0B2f5TqTEA+WCas=
github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
github.com/agext/levenshtein v1.2.1 h1:QmvMAjj2aEICytGiWzmxoE0x2KZvE0fvmqMOfy2tjT8=
github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
@ -98,10 +98,10 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible h1:8psS8a+wKfiLt1iVDX79F7Y6wUM49Lcha2FMXt4UM8g=
github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8=
github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY=
github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/aliyun/alibabacloud-oss-go-sdk-v2 v1.3.0 h1:wQlqotpyjYPjJz+Noh5bRu7Snmydk8SKC5Z6u1CR20Y=
github.com/aliyun/alibabacloud-oss-go-sdk-v2 v1.3.0/go.mod h1:FTzydeQVmR24FI0D6XWUOMKckjXehM/jgMn1xC+DA9M=
github.com/andybalholm/brotli v1.1.2-0.20250424173009-453214e765f3 h1:8PmGpDEZl9yDpcdEr6Odf23feCxK3LNUNMxjXg41pZQ=
github.com/andybalholm/brotli v1.1.2-0.20250424173009-453214e765f3/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ=
@ -138,12 +138,20 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb/go.mod h1:PkYb9DJNAwrSvRx5DYA+gUcOIgTGVMNkfSCbZM8cWpI=
github.com/bodgit/plumbing v1.3.0 h1:pf9Itz1JOQgn7vEOE7v7nlEfBykYqvUYioC61TwWCFU=
github.com/bodgit/plumbing v1.3.0/go.mod h1:JOTb4XiRu5xfnmdnDJo6GmSbSbtSyufrsyZFByMtKEs=
github.com/bodgit/sevenzip v1.6.0 h1:a4R0Wu6/P1o1pP/3VV++aEOcyeBxeO/xE2Y9NSTrr6A=
github.com/bodgit/sevenzip v1.6.0/go.mod h1:zOBh9nJUof7tcrlqJFv1koWRrhz3LbDbUNngkuZxLMc=
github.com/bodgit/windows v1.0.1 h1:tF7K6KOluPYygXa3Z2594zxlkbKPAOvqr97etrGNIz4=
github.com/bodgit/windows v1.0.1/go.mod h1:a6JLwrB4KrTR5hBpp8FI9/9W9jJfeQ2h4XDXU74ZCdM=
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8WK8raXaxBx6fRVTlJILwEwQGL1I/ByEI=
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
github.com/bytedance/sonic v1.11.6 h1:oUp34TzMlL+OY1OUWxHqsdkgC/Zfc85zGqw9siXjrc0=
github.com/bytedance/sonic v1.11.6/go.mod h1:LysEHSvpvDySVdC2f87zGWf6CIKJcAvqab1ZaiQtds4=
github.com/bytedance/sonic/loader v0.1.1 h1:c+e5Pt1k/cy5wMveRDyk2X4B9hF4g7an8N3zCYjJFNM=
github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU=
github.com/bytedance/gopkg v0.1.3 h1:TPBSwH8RsouGCBcMBktLt1AymVo2TVsBVCY4b6TnZ/M=
github.com/bytedance/gopkg v0.1.3/go.mod h1:576VvJ+eJgyCzdjS+c4+77QF3p7ubbtiKARP3TxducM=
github.com/bytedance/sonic v1.14.1 h1:FBMC0zVz5XUmE4z9wF4Jey0An5FueFvOsTKKKtwIl7w=
github.com/bytedance/sonic v1.14.1/go.mod h1:gi6uhQLMbTdeP0muCnrjHLeCUPyb70ujhnNlhOylAFc=
github.com/bytedance/sonic/loader v0.3.0 h1:dskwH8edlzNMctoruo8FPTJDF3vLtDT0sXZwvZJyqeA=
github.com/bytedance/sonic/loader v0.3.0/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI=
github.com/caarlos0/ctrlc v1.0.0/go.mod h1:CdXpj4rmq0q/1Eb44M9zi2nKB0QraNKuRGYGrrHhcQw=
github.com/campoy/unique v0.0.0-20180121183637-88950e537e7e/go.mod h1:9IOqJGCPMSc6E5ydlp5NIonxObaeu/Iub/X03EKPVYo=
github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
@ -167,10 +175,8 @@ github.com/cloudflare/backoff v0.0.0-20161212185259-647f3cdfc87a/go.mod h1:rzgs2
github.com/cloudflare/cfssl v1.6.1 h1:aIOUjpeuDJOpWjVJFP2ByplF53OgqG8I1S40Ggdlk3g=
github.com/cloudflare/cfssl v1.6.1/go.mod h1:ENhCj4Z17+bY2XikpxVmTHDg/C2IsG2Q0ZBeXpAqhCk=
github.com/cloudflare/redoctober v0.0.0-20201013214028-99c99a8e7544/go.mod h1:6Se34jNoqrd8bTxrmJB2Bg2aoZ2CdSXonils9NsiNgo=
github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/0Y=
github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w=
github.com/cloudwego/iasm v0.2.0 h1:1KNIy1I1H9hNNFEEH3DVnI4UujN+1zjpuk6gwHLTssg=
github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY=
github.com/cloudwego/base64x v0.1.6 h1:t11wG9AECkCDk5fMSoxmufanudBtJ+/HemLstXDLI2M=
github.com/cloudwego/base64x v0.1.6/go.mod h1:OFcloc187FXDaYHvrNIjxSe8ncn0OOM8gEHfghB2IPU=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
@ -213,8 +219,8 @@ github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8
github.com/dhowden/tag v0.0.0-20230630033851-978a0926ee25 h1:simG0vMYFvNriGhaaat7QVVkaVkXzvqcohaBoLZl9Hg=
github.com/dhowden/tag v0.0.0-20230630033851-978a0926ee25/go.mod h1:Z3Lomva4pyMWYezjMAU5QWRh0p1VvO4199OHlFnyKkM=
github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
github.com/dsnet/compress v0.0.1 h1:PlZu0n3Tuv04TzpfPbrnI0HW/YwodEXDS+oPKahKF0Q=
github.com/dsnet/compress v0.0.1/go.mod h1:Aw8dCMJ7RioblQeTqt88akK31OvO8Dhf5JflhBbQEHo=
github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 h1:2tV76y6Q9BB+NEBasnqvs7e49aEBFI8ejC89PSnWH+4=
github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707/go.mod h1:qssHWj60/X5sZFNxpG4HBPDHVqxNm4DfnCKgrbZOT+s=
github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY=
github.com/dsoprea/go-exif/v2 v2.0.0-20200321225314-640175a69fe4/go.mod h1:Lm2lMM2zx8p4a34ZemkaUV95AnMl4ZvLbCUbwOvLC2E=
github.com/dsoprea/go-exif/v2 v2.0.0-20200520183328-015129a9efd5/go.mod h1:9EXlPeHfblFFnwu5UOqmP2eoZfJyAZ2Ri/Vki33ajO0=
@ -274,7 +280,6 @@ github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5 h1:Yzb9+7DP
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0=
github.com/etcd-io/gofail v0.0.0-20190801230047-ad7f989257ca/go.mod h1:49H/RkXP8pKaZy4h0d+NW16rSLhyVBt4o6VLJbmOqDE=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
@ -290,24 +295,24 @@ github.com/fullstorydev/grpcurl v1.8.0/go.mod h1:Mn2jWbdMrQGJQ8UD62uNyMumT2acsZU
github.com/fullstorydev/grpcurl v1.8.1/go.mod h1:3BWhvHZwNO7iLXaQlojdg5NA6SxUDePli4ecpK1N7gw=
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0=
github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk=
github.com/gabriel-vasile/mimetype v1.4.10 h1:zyueNbySn/z8mJZHLt6IPw0KoZsiQNszIpU+bX4+ZK0=
github.com/gabriel-vasile/mimetype v1.4.10/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s=
github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gin-contrib/cors v1.3.0 h1:PolezCc89peu+NgkIWt9OB01Kbzt6IP0J/JvkG6xxlg=
github.com/gin-contrib/cors v1.3.0/go.mod h1:artPvLlhkF7oG06nK8v3U8TNz6IeX+w1uzCSEId5/Vc=
github.com/gin-contrib/cors v1.6.0 h1:0Z7D/bVhE6ja07lI8CTjTonp6SB07o8bNuFyRbsBUQg=
github.com/gin-contrib/cors v1.6.0/go.mod h1:cI+h6iOAyxKRtUtC6iF/Si1KSFvGm/gK+kshxlCi8ro=
github.com/gin-contrib/gzip v1.2.4 h1:yNz4EhPC2kHSZJD1oc1zwp7MLEhEZ3goQeGM3a1b6jU=
github.com/gin-contrib/gzip v1.2.4/go.mod h1:aomRgR7ftdZV3uWY0gW/m8rChfxau0n8YVvwlOHONzw=
github.com/gin-contrib/sessions v1.0.2 h1:UaIjUvTH1cMeOdj3in6dl+Xb6It8RiKRF9Z1anbUyCA=
github.com/gin-contrib/sessions v1.0.2/go.mod h1:KxKxWqWP5LJVDCInulOl4WbLzK2KSPlLesfZ66wRvMs=
github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s=
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
github.com/gin-contrib/sse v1.1.0 h1:n0w2GMuUpWDVp7qSpvze6fAu9iRxJY4Hmj6AmBOU05w=
github.com/gin-contrib/sse v1.1.0/go.mod h1:hxRZ5gVpWMT7Z0B0gSNYqqsSCNIJMjzvm6fqCz9vjwM=
github.com/gin-contrib/static v0.0.0-20191128031702-f81c604d8ac2 h1:xLG16iua01X7Gzms9045s2Y2niNpvSY/Zb1oBwgNYZY=
github.com/gin-contrib/static v0.0.0-20191128031702-f81c604d8ac2/go.mod h1:VhW/Ch/3FhimwZb8Oj+qJmdMmoB8r7lmJ5auRjm50oQ=
github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM=
github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do=
github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M=
github.com/gin-gonic/gin v1.10.0 h1:nTuyha1TYqgedzytsKYqna+DfLos46nTv2ygFy86HFU=
github.com/gin-gonic/gin v1.10.0/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y=
github.com/gin-gonic/gin v1.11.0 h1:OW/6PLjyusp2PPXtyxKHU0RbX6I/l28FTdDlae5ueWk=
github.com/gin-gonic/gin v1.11.0/go.mod h1:+iq/FyxlGzII0KHiBGjuNn4UNENUlKbGlNmc+W50Dls=
github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
github.com/go-errors/errors v1.0.2/go.mod h1:psDX2osz5VnTOnFWbDeWwS7yejl+uV3FEWEp4lssFEs=
@ -326,8 +331,6 @@ github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgO
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-mail/mail v2.3.1+incompatible h1:UzNOn0k5lpfVtO31cK3hn6I4VEVGhe3lX8AJBAxXExM=
github.com/go-mail/mail v2.3.1+incompatible/go.mod h1:VPWjmmNyRsWXQZHVHT3g0YbIINUkSmuKOiLIDkWbL6M=
github.com/go-openapi/inflect v0.19.0 h1:9jCH9scKIbHeV9m12SmPilScz6krDxKRasNNSNPXu/4=
github.com/go-openapi/inflect v0.19.0/go.mod h1:lHpZVlpIQqLyKwJ4N+YSc9hchQy/i12fJykb83CRBH4=
github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
@ -343,10 +346,9 @@ github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+
github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA=
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI=
github.com/go-playground/validator/v10 v10.8.0/go.mod h1:9JhgTzTaE31GZDpH/HSvHiRJrJ3iKAgqqH0Bl/Ocjdk=
github.com/go-playground/validator/v10 v10.20.0 h1:K9ISHbSaI0lyB2eWMPJo+kOS/FBExVwjEviJTixqxL8=
github.com/go-playground/validator/v10 v10.20.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
github.com/go-playground/validator/v10 v10.28.0 h1:Q7ibns33JjyW48gHkuFT91qX48KG0ktULL6FgHdG688=
github.com/go-playground/validator/v10 v10.28.0/go.mod h1:GoI6I1SjPBh9p7ykNE/yj3fFYbyDOpwMn5KXd+m2hUU=
github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
@ -362,8 +364,10 @@ github.com/go-webauthn/x v0.1.14 h1:1wrB8jzXAofojJPAaRxnZhRgagvLGnLjhCAwg3kTpT0=
github.com/go-webauthn/x v0.1.14/go.mod h1:UuVvFZ8/NbOnkDz3y1NaxtUN87pmtpC1PQ+/5BBQRdc=
github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b h1:khEcpUM4yFcxg4/FHQWkvVRmgijNXRfzkIDHh23ggEo=
github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM=
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4=
github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw=
github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA=
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw=
@ -417,8 +421,6 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS
github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/gomodule/redigo v1.9.2 h1:HrutZBLhSIU8abiSfW8pj8mPhOyMYjZT/wcA4/L9L9s=
github.com/gomodule/redigo v1.9.2/go.mod h1:KsU3hiK/Ay8U42qpaJk+kuNa3C+spxapWpM+ywhcgtw=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
@ -438,8 +440,8 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/go-github/v28 v28.1.1/go.mod h1:bsqJWQX05omyWVmc00nEUql9mhQyv38lDZ8kPZcQVoM=
github.com/google/go-licenses v0.0.0-20210329231322-ce1d9163b77d/go.mod h1:+TYOmkVoJOpwnS0wfdsJCV9CoD5nJYsHoFk/0CrTK4M=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
@ -522,11 +524,15 @@ github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoP
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/go-retryablehttp v0.6.4/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY=
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
@ -613,15 +619,13 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE=
github.com/kisom/goutils v1.4.3/go.mod h1:Lp5qrquG7yhYnWzZCI/68Pa/GpFynw//od6EkGnWpac=
github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg=
github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM=
github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M=
github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU=
github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
@ -636,11 +640,12 @@ github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/ks3sdklib/aws-sdk-go v1.6.2 h1:nxtaaU3hDD5x6gmoxs/qijSJqZrjFapYYuTiVCEgobA=
github.com/ks3sdklib/aws-sdk-go v1.6.2/go.mod h1:jGcsV0dJgMmStAyqjkKVUu6F167pAXYZAS3LqoZMmtM=
github.com/kylelemons/go-gypsy v1.0.0/go.mod h1:chkXM0zjdpXOiqkCW1XcCHDfjfk14PH2KKkQWxfJUcU=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw=
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
@ -660,19 +665,15 @@ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE=
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
@ -689,11 +690,15 @@ github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S
github.com/mattn/go-zglob v0.0.1/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
github.com/mholt/archiver/v4 v4.0.0-alpha.6 h1:3wvos9Kn1GpKNBz+MpozinGREPslLo1ds1W16vTkErQ=
github.com/mholt/archiver/v4 v4.0.0-alpha.6/go.mod h1:9PTygYq90FQBWPspdwAng6dNjYiBuTYKqmA6c15KuCo=
github.com/mholt/archives v0.1.3 h1:aEAaOtNra78G+TvV5ohmXrJOAzf++dIlYeDW3N9q458=
github.com/mholt/archives v0.1.3/go.mod h1:LUCGp++/IbV/I0Xq4SzcIR6uwgeh2yjnQWamjRQfLTU=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
github.com/mikelolasagasti/xz v1.0.1 h1:Q2F2jX0RYJUG3+WsM+FJknv+6eVjsjXNDV0KJXZzkD0=
github.com/mikelolasagasti/xz v1.0.1/go.mod h1:muAirjiOUxPRXwm9HdDtB3uoRPrGnL85XHtokL9Hcgc=
github.com/minio/minlz v1.0.0 h1:Kj7aJZ1//LlTP1DM8Jm7lNKvvJS2m74gyyXXn3+uJWQ=
github.com/minio/minlz v1.0.0/go.mod h1:qT0aEB35q79LLornSzeDH75LBf3aH1MV+jB5w9Wasec=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
@ -739,8 +744,8 @@ github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdh
github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
github.com/nishanths/predeclared v0.0.0-20200524104333-86fad755b4d3/go.mod h1:nt3d53pc1VYcphSCIaYAJtnPYnr3Zyn8fMq2wvPGPso=
github.com/nkovacs/streamquote v1.0.0/go.mod h1:BN+NaZ2CmdKqUuTUXUEm9j95B2TRbpOWpxbJYzzgUsc=
github.com/nwaples/rardecode/v2 v2.0.0-beta.2 h1:e3mzJFJs4k83GXBEiTaQ5HgSc/kOK8q0rDaRO0MPaOk=
github.com/nwaples/rardecode/v2 v2.0.0-beta.2/go.mod h1:yntwv/HfMc/Hbvtq9I19D1n58te3h6KsqCf3GxyfBGY=
github.com/nwaples/rardecode/v2 v2.1.0 h1:JQl9ZoBPDy+nIZGb1mx8+anfHp/LV3NE2MjMiv0ct/U=
github.com/nwaples/rardecode/v2 v2.1.0/go.mod h1:7uz379lSxPe6j9nvzxUZ+n7mnJNgjsRNb6IbvGVHRmw=
github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
@ -772,13 +777,13 @@ github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FI
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pierrec/lz4/v4 v4.1.14 h1:+fL8AQEZtz/ijeNnpduH0bROTu0O3NZAlPjQxGn8LwE=
github.com/pierrec/lz4/v4 v4.1.14/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@ -829,6 +834,10 @@ github.com/qiniu/dyn v1.3.0/go.mod h1:E8oERcm8TtwJiZvkQPbcAh0RL8jO1G0VXJMW3FAWdk
github.com/qiniu/go-sdk/v7 v7.19.0 h1:k3AzDPil8QHIQnki6xXt4YRAjE52oRoBUXQ4bV+Wc5U=
github.com/qiniu/go-sdk/v7 v7.19.0/go.mod h1:nqoYCNo53ZlGA521RvRethvxUDvXKt4gtYXOwye868w=
github.com/qiniu/x v1.10.5/go.mod h1:03Ni9tj+N2h2aKnAz+6N0Xfl8FwMEDRC2PAlxekASDs=
github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI=
github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg=
github.com/quic-go/quic-go v0.55.0 h1:zccPQIqYCXDt5NmcEabyYvOnomjs8Tlwl7tISjJh9Mk=
github.com/quic-go/quic-go v0.55.0/go.mod h1:DR51ilwU1uE164KuWXhinFcKWGlEjzys2l8zUl5Ss1U=
github.com/rafaeljusto/redigomock v0.0.0-20191117212112-00b2509252a1 h1:leEwA4MD1ew0lNgzz6Q4G76G3AEfeci+TMggN6WuFRs=
github.com/rafaeljusto/redigomock v0.0.0-20191117212112-00b2509252a1/go.mod h1:JaY6n2sDr+z2WTsXkOmNRUfDy6FN0L6Nk7x06ndm4tY=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
@ -880,6 +889,8 @@ github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4k
github.com/soheilhy/cmux v0.1.5-0.20210205191134-5ec6847320e5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
github.com/sorairolake/lzip-go v0.3.5 h1:ms5Xri9o1JBIWvOFAorYtUNik6HI3HgBTkISiqu0Cwg=
github.com/sorairolake/lzip-go v0.3.5/go.mod h1:N0KYq5iWrMXI0ZEXKXaS9hCyOjZUQdBDEIbXfoUwbdk=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/speps/go-hashids v2.0.0+incompatible h1:kSfxGfESueJKTx0mpER9Y/1XHl+FVQjtCqRyYcviFbw=
github.com/speps/go-hashids v2.0.0+incompatible/go.mod h1:P7hqPzMdnZOfyIk+xrlG1QaSMw+gCBdHKsBDnhpaZvc=
@ -923,16 +934,13 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.563/go.mod h1:7sCQWVkxcsR38nffDW057DRGk8mUjK1Ing/EFOK8s8Y=
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/kms v1.0.563/go.mod h1:uom4Nvi9W+Qkom0exYiJ9VWJjXwyxtPYTkKkaLMlfE0=
github.com/tencentyun/cos-go-sdk-v5 v0.7.54 h1:FRamEhNBbSeggyYfWfzFejTLftgbICocSYFk4PKTSV4=
github.com/tencentyun/cos-go-sdk-v5 v0.7.54/go.mod h1:UN+VdbCl1hg+kKi5RXqZgaP+Boqfmk+D04GRc4XFk70=
github.com/therootcompany/xz v1.0.1 h1:CmOtsn1CbtmyYiusbfmhmkpAAETj0wBIH6kCYaX+xzw=
github.com/therootcompany/xz v1.0.1/go.mod h1:3K3UH1yCKgBneZYhuQUvJ9HPD19UEXEI0BWbMn8qNMY=
github.com/tj/assert v0.0.0-20171129193455-018094318fb0/go.mod h1:mZ9/Rh9oLWpLLDRpvE+3b7gP/C2YyLFYxNmcLnPTMe0=
github.com/tj/go-elastic v0.0.0-20171221160941-36157cbbebc2/go.mod h1:WjeM0Oo1eNAjXGDx2yma7uG2XoyRZTq1uv3M/o7imD0=
github.com/tj/go-kinesis v0.0.0-20171128231115-08b17f58cb1b/go.mod h1:/yhzCV0xPfx6jb1bBgRFjl5lytqVqZXEaeqWP8lTEao=
@ -950,12 +958,13 @@ github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGr
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE=
github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
github.com/ugorji/go/codec v1.3.0 h1:Qd2W2sQawAfG8XSvzwhBeoGq71zXOC/Q1E9y/wUcsUA=
github.com/ugorji/go/codec v1.3.0/go.mod h1:pRBVtBSKl77K30Bv8R2P+cLSGaTtex6fsA2Wjqmfxj4=
github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8=
github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8=
github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc=
github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/upyun/go-sdk v2.1.0+incompatible h1:OdjXghQ/TVetWV16Pz3C1/SUpjhGBVPr+cLiqZLLyq0=
github.com/upyun/go-sdk v2.1.0+incompatible/go.mod h1:eu3F5Uz4b9ZE5bE5QsCL6mgSNWRwfj0zpJ9J626HEqs=
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
@ -968,6 +977,8 @@ github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+
github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI=
github.com/weppos/publicsuffix-go v0.13.1-0.20210123135404-5fd73613514e/go.mod h1:HYux0V0Zi04bHNwOHy4cXJVz/TQjYonnF6aoYhj+3QE=
github.com/weppos/publicsuffix-go v0.15.1-0.20210511084619-b1f36a2d6c0b/go.mod h1:HYux0V0Zi04bHNwOHy4cXJVz/TQjYonnF6aoYhj+3QE=
github.com/wneessen/go-mail v0.7.1 h1:rvy63sp14N06/kdGqCYwW8Na5gDCXjTQM1E7So4PuKk=
github.com/wneessen/go-mail v0.7.1/go.mod h1:+TkW6QP3EVkgTEqHtVmnAE/1MRhmzb8Y9/W3pweuS+k=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/xanzy/go-gitlab v0.31.0/go.mod h1:sPLojNBn68fMUWSxIJtdVVIP8uSBYqesTfDUseX11Ug=
@ -975,6 +986,8 @@ github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0B
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU=
github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
@ -1016,6 +1029,8 @@ go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y=
go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
@ -1024,12 +1039,12 @@ go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9E
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ=
go4.org v0.0.0-20200411211856-f5505b9728dd h1:BNJlw5kRTzdmyfh5U8F93HA2OwkP7ZGwA51eJ/0wKOU=
go4.org v0.0.0-20200411211856-f5505b9728dd/go.mod h1:CIiUVy99QCPfoE13bO4EZaz5GZMZXMSBGhxRdsvzbkg=
go4.org v0.0.0-20230225012048-214862532bf5 h1:nifaUDeh+rPaBCMPMQHZmvJf+QdpLFnuQPwx+LxVmtc=
go4.org v0.0.0-20230225012048-214862532bf5/go.mod h1:F57wTi5Lrj6WLyswp5EYV1ncrEbFGHD4hhz6S1ZYeaU=
gocloud.dev v0.19.0/go.mod h1:SmKwiR8YwIMMJvQBKLsC3fHNyMwXLw3PMDO+VVteJMI=
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/arch v0.8.0 h1:3wRIsP3pM4yUptoR96otTUOXI367OS0+c9eeRi9doIc=
golang.org/x/arch v0.8.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys=
golang.org/x/arch v0.22.0 h1:c/Zle32i5ttqRXjdLyyHZESLD/bB90DCU1g9l/0YBDI=
golang.org/x/arch v0.22.0/go.mod h1:dNHoOeKiyja7GTvF9NJS1l3Z2yntpQNzgrjh1cU103A=
golang.org/x/crypto v0.0.0-20180501155221-613d6eafa307/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
@ -1052,8 +1067,8 @@ golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf/go.mod h1:P+XmwS30IXTQdn5
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04=
golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@ -1070,8 +1085,8 @@ golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e/go.mod h1:akd2r19cwCdwSwWeId
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190501045829-6d32002ffd75/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/image v0.0.0-20211028202545-6944b10bf410 h1:hTftEOvwiOq2+O8k2D5/Q7COC7k5Qcrgc2TFURJYnvQ=
golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
golang.org/x/image v0.18.0 h1:jGzIakQa/ZXI1I0Fxvaa9W7yP25TqT6cHIHn+6CqvSQ=
golang.org/x/image v0.18.0/go.mod h1:4yyo5vMFQjVjUcVk4jEQcU9MGy/rulF5WvUILseCM2E=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@ -1096,8 +1111,8 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0=
golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -1152,8 +1167,9 @@ golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20221002022538-bcab6841153b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4=
golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -1182,8 +1198,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -1215,7 +1231,6 @@ golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -1258,12 +1273,13 @@ golang.org/x/sys v0.0.0-20220928140112-f11e5e49a4ec/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -1274,8 +1290,9 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k=
golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@ -1349,8 +1366,8 @@ golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4f
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24=
golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ=
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -1482,11 +1499,9 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba
google.golang.org/protobuf v1.25.1-0.20200805231151-a709e31e5d12/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc h1:2gGKlE2+asNV9m7xrywl36YYNnBG5ZQ0r/BOOxqPpmk=
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@ -1498,12 +1513,9 @@ gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=
gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y=
gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ=
gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno=
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/mail.v2 v2.3.1 h1:WYFn/oANrAGP2C0dcV6/pbkPzv8yGzqTjPmTeO7qoXk=
gopkg.in/mail.v2 v2.3.1/go.mod h1:htwXN1Qh09vZJ1NVKxQqHPBaCBbzKhp5GzuJEA4VJWw=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/src-d/go-billy.v4 v4.3.2/go.mod h1:nDjArDMp+XMs1aFAESLRjfGSgfvoYN0hDfzEk0GjC98=
gopkg.in/src-d/go-git-fixtures.v3 v3.5.0/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g=
@ -1560,10 +1572,8 @@ modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA=
modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0=
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50=
pack.ag/amqp v0.11.2/go.mod h1:4/cbmt4EJXSKlG6LCfWHoqmN0uFdy5i/+YFz+fTfhV4=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=

View File

@ -57,13 +57,22 @@ type (
UserID int
Name string
StoragePolicyID int
HasMetadata string
Shared bool
HasDirectLink bool
}
MetadataFilter struct {
Key string
Value string
Exact bool
}
SearchFileParameters struct {
Name []string
// NameOperatorOr is true if the name should match any of the given names, false if all of them
NameOperatorOr bool
Metadata map[string]string
Metadata []MetadataFilter
Type *types.FileType
UseFullText bool
CaseFolding bool
@ -121,6 +130,7 @@ type (
Size int64
UploadSessionID uuid.UUID
Importing bool
EncryptMetadata *types.EncryptMetadata
}
RelocateEntityParameter struct {
@ -179,7 +189,7 @@ type FileClient interface {
// Copy copies a layer of file to its corresponding destination folder. dstMap is a map from src parent ID to dst parent Files.
Copy(ctx context.Context, files []*ent.File, dstMap map[int][]*ent.File) (map[int][]*ent.File, StorageDiff, error)
// Delete deletes a group of files (and related models) with given entity recycle option
Delete(ctx context.Context, files []*ent.File, options *types.EntityRecycleOption) ([]*ent.Entity, StorageDiff, error)
Delete(ctx context.Context, files []*ent.File, options *types.EntityProps) ([]*ent.Entity, StorageDiff, error)
// StaleEntities returns stale entities of a given file. If ID is not provided, all entities
// will be examined.
StaleEntities(ctx context.Context, ids ...int) ([]*ent.Entity, error)
@ -211,6 +221,8 @@ type FileClient interface {
ListEntities(ctx context.Context, args *ListEntityParameters) (*ListEntityResult, error)
// UpdateProps updates props of a file
UpdateProps(ctx context.Context, file *ent.File, props *types.FileProps) (*ent.File, error)
// UpdateModifiedAt updates modified at of a file
UpdateModifiedAt(ctx context.Context, file *ent.File, modifiedAt time.Time) error
}
func NewFileClient(client *ent.Client, dbType conf.DBType, hasher hashid.Encoder) FileClient {
@ -458,7 +470,7 @@ func (f *fileClient) DeleteByUser(ctx context.Context, uid int) error {
return nil
}
func (f *fileClient) Delete(ctx context.Context, files []*ent.File, options *types.EntityRecycleOption) ([]*ent.Entity, StorageDiff, error) {
func (f *fileClient) Delete(ctx context.Context, files []*ent.File, options *types.EntityProps) ([]*ent.Entity, StorageDiff, error) {
// 1. Decrease reference count for all entities;
// entities stores the relation between its reference count in `files` and entity ID.
entities := make(map[int]int)
@ -514,7 +526,7 @@ func (f *fileClient) Delete(ctx context.Context, files []*ent.File, options *typ
for _, chunk := range chunks {
if err := f.client.Entity.Update().
Where(entity.IDIn(chunk...)).
SetRecycleOptions(options).
SetProps(options).
Exec(ctx); err != nil {
return nil, nil, fmt.Errorf("failed to update recycle options for entities %v: %w", chunk, err)
}
@ -640,6 +652,10 @@ func (f *fileClient) Copy(ctx context.Context, files []*ent.File, dstMap map[int
return newDstMap, map[int]int64{dstMap[files[0].FileChildren][0].OwnerID: sizeDiff}, nil
}
func (f *fileClient) UpdateModifiedAt(ctx context.Context, file *ent.File, modifiedAt time.Time) error {
return f.client.File.UpdateOne(file).SetUpdatedAt(modifiedAt).Exec(ctx)
}
func (f *fileClient) UpsertMetadata(ctx context.Context, file *ent.File, data map[string]string, privateMask map[string]bool) error {
// Validate value length
for key, value := range data {
@ -712,10 +728,15 @@ func (f *fileClient) UpgradePlaceholder(ctx context.Context, file *ent.File, mod
}
if entityType == types.EntityTypeVersion {
if err := f.client.File.UpdateOne(file).
stm := f.client.File.UpdateOne(file).
SetSize(placeholder.Size).
SetPrimaryEntity(placeholder.ID).
Exec(ctx); err != nil {
SetPrimaryEntity(placeholder.ID)
if modifiedAt != nil {
stm.SetUpdatedAt(*modifiedAt)
}
if err := stm.Exec(ctx); err != nil {
return fmt.Errorf("failed to upgrade file primary entity: %v", err)
}
}
@ -864,6 +885,17 @@ func (f *fileClient) RemoveStaleEntities(ctx context.Context, file *ent.File) (S
func (f *fileClient) CreateEntity(ctx context.Context, file *ent.File, args *EntityParameters) (*ent.Entity, StorageDiff, error) {
createdBy := UserFromContext(ctx)
var opt *types.EntityProps
if args.EncryptMetadata != nil {
opt = &types.EntityProps{
EncryptMetadata: &types.EncryptMetadata{
Algorithm: args.EncryptMetadata.Algorithm,
Key: args.EncryptMetadata.Key,
IV: args.EncryptMetadata.IV,
},
}
}
stm := f.client.Entity.
Create().
SetType(int(args.EntityType)).
@ -871,6 +903,10 @@ func (f *fileClient) CreateEntity(ctx context.Context, file *ent.File, args *Ent
SetSize(args.Size).
SetStoragePolicyID(args.StoragePolicyID)
if opt != nil {
stm.SetProps(opt)
}
if createdBy != nil && !IsAnonymousUser(createdBy) {
stm.SetUser(createdBy)
}
@ -890,7 +926,7 @@ func (f *fileClient) CreateEntity(ctx context.Context, file *ent.File, args *Ent
diff := map[int]int64{file.OwnerID: created.Size}
if err := f.client.File.UpdateOne(file).AddEntities(created).Exec(ctx); err != nil {
if err := f.client.Entity.UpdateOne(created).AddFile(file).Exec(ctx); err != nil {
return nil, diff, fmt.Errorf("failed to add file entity: %v", err)
}
@ -1081,6 +1117,18 @@ func (f *fileClient) FlattenListFiles(ctx context.Context, args *FlattenListFile
query = query.Where(file.NameContainsFold(args.Name))
}
if args.HasMetadata != "" {
query = query.Where(file.HasMetadataWith(metadata.Name(args.HasMetadata)))
}
if args.Shared {
query = query.Where(file.HasSharesWith(share.DeletedAtIsNil()))
}
if args.HasDirectLink {
query = query.Where(file.HasDirectLinksWith(directlink.DeletedAtIsNil()))
}
query.Order(getFileOrderOption(&ListFileParameters{
PaginationArgs: args.PaginationArgs,
})...)

View File

@ -16,6 +16,10 @@ import (
"github.com/samber/lo"
)
const (
metadataExactMatchPrefix = "!exact:"
)
func (f *fileClient) searchQuery(q *ent.FileQuery, args *SearchFileParameters, parents []*ent.File, ownerId int) *ent.FileQuery {
if len(parents) == 1 && parents[0] == nil {
q = q.Where(file.OwnerID(ownerId))
@ -69,17 +73,22 @@ func (f *fileClient) searchQuery(q *ent.FileQuery, args *SearchFileParameters, p
}
if len(args.Metadata) > 0 {
metaPredicates := lo.MapToSlice(args.Metadata, func(name string, value string) predicate.Metadata {
nameEq := metadata.NameEQ(value)
if name == "" {
metaPredicates := lo.Map(args.Metadata, func(item MetadataFilter, index int) predicate.Metadata {
if item.Exact {
return metadata.And(metadata.NameEQ(item.Key), metadata.ValueEQ(item.Value))
}
nameEq := metadata.And(metadata.IsPublic(true), metadata.NameEQ(item.Key))
if item.Value == "" {
return nameEq
} else {
valueContain := metadata.ValueContainsFold(value)
return metadata.And(metadata.NameEQ(name), valueContain)
valueContain := metadata.ValueContainsFold(item.Value)
return metadata.And(nameEq, valueContain)
}
})
metaPredicates = append(metaPredicates, metadata.IsPublic(true))
q.Where(file.HasMetadataWith(metadata.And(metaPredicates...)))
q.Where(file.And(lo.Map(metaPredicates, func(item predicate.Metadata, index int) predicate.File {
return file.HasMetadataWith(item)
})...))
}
if args.SizeLte > 0 || args.SizeGte > 0 {

81
inventory/fs_event.go Normal file
View File

@ -0,0 +1,81 @@
package inventory
import (
"context"
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/ent/fsevent"
"github.com/cloudreve/Cloudreve/v4/ent/schema"
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
"github.com/gofrs/uuid"
"github.com/samber/lo"
)
type FsEventClient interface {
TxOperator
// Create a new FsEvent
Create(ctx context.Context, uid int, subscriberId uuid.UUID, events ...string) error
// Delete all FsEvents by subscriber
DeleteBySubscriber(ctx context.Context, subscriberId uuid.UUID) error
// Delete all FsEvents
DeleteAll(ctx context.Context) error
// Get all FsEvents by subscriber and user
TakeBySubscriber(ctx context.Context, subscriberId uuid.UUID, userId int) ([]*ent.FsEvent, error)
}
func NewFsEventClient(client *ent.Client, dbType conf.DBType) FsEventClient {
return &fsEventClient{client: client, maxSQlParam: sqlParamLimit(dbType)}
}
type fsEventClient struct {
maxSQlParam int
client *ent.Client
}
func (c *fsEventClient) SetClient(newClient *ent.Client) TxOperator {
return &fsEventClient{client: newClient, maxSQlParam: c.maxSQlParam}
}
func (c *fsEventClient) GetClient() *ent.Client {
return c.client
}
func (c *fsEventClient) Create(ctx context.Context, uid int, subscriberId uuid.UUID, events ...string) error {
stms := lo.Map(events, func(event string, index int) *ent.FsEventCreate {
res := c.client.FsEvent.
Create().
SetUserFsevent(uid).
SetEvent(event).
SetSubscriber(subscriberId).SetEvent(event)
return res
})
_, err := c.client.FsEvent.CreateBulk(stms...).Save(ctx)
return err
}
func (c *fsEventClient) DeleteBySubscriber(ctx context.Context, subscriberId uuid.UUID) error {
_, err := c.client.FsEvent.Delete().Where(fsevent.Subscriber(subscriberId)).Exec(schema.SkipSoftDelete(ctx))
return err
}
func (c *fsEventClient) DeleteAll(ctx context.Context) error {
_, err := c.client.FsEvent.Delete().Exec(schema.SkipSoftDelete(ctx))
return err
}
func (c *fsEventClient) TakeBySubscriber(ctx context.Context, subscriberId uuid.UUID, userId int) ([]*ent.FsEvent, error) {
res, err := c.client.FsEvent.Query().Where(fsevent.Subscriber(subscriberId), fsevent.UserFsevent(userId)).All(ctx)
if err != nil {
return nil, err
}
// Delete the FsEvents
_, err = c.client.FsEvent.Delete().Where(fsevent.Subscriber(subscriberId), fsevent.UserFsevent(userId)).Exec(schema.SkipSoftDelete(ctx))
if err != nil {
return nil, err
}
return res, nil
}

View File

@ -279,6 +279,53 @@ type (
)
var patches = []Patch{
{
Name: "apply_default_archive_viewer",
EndVersion: "4.7.0",
Func: func(l logging.Logger, client *ent.Client, ctx context.Context) error {
fileViewersSetting, err := client.Setting.Query().Where(setting.Name("file_viewers")).First(ctx)
if err != nil {
return fmt.Errorf("failed to query file_viewers setting: %w", err)
}
var fileViewers []types.ViewerGroup
if err := json.Unmarshal([]byte(fileViewersSetting.Value), &fileViewers); err != nil {
return fmt.Errorf("failed to unmarshal file_viewers setting: %w", err)
}
fileViewerExisted := false
for _, viewer := range fileViewers[0].Viewers {
if viewer.ID == "archive" {
fileViewerExisted = true
break
}
}
// 2.2 If not existed, add it
if !fileViewerExisted {
// Found existing archive viewer default setting
var defaultArchiveViewer types.Viewer
for _, viewer := range defaultFileViewers[0].Viewers {
if viewer.ID == "archive" {
defaultArchiveViewer = viewer
break
}
}
fileViewers[0].Viewers = append(fileViewers[0].Viewers, defaultArchiveViewer)
newFileViewersSetting, err := json.Marshal(fileViewers)
if err != nil {
return fmt.Errorf("failed to marshal file_viewers setting: %w", err)
}
if _, err := client.Setting.UpdateOne(fileViewersSetting).SetValue(string(newFileViewersSetting)).Save(ctx); err != nil {
return fmt.Errorf("failed to update file_viewers setting: %w", err)
}
}
return nil
},
},
{
Name: "apply_default_excalidraw_viewer",
EndVersion: "4.1.0",
@ -367,6 +414,86 @@ var patches = []Patch{
}
}
return nil
},
},
{
Name: "apply_email_title_magic_var",
EndVersion: "4.7.0",
Func: func(l logging.Logger, client *ent.Client, ctx context.Context) error {
// 1. Activate Template
mailActivationTemplateSetting, err := client.Setting.Query().Where(setting.Name("mail_activation_template")).First(ctx)
if err != nil {
return fmt.Errorf("failed to query mail_activation_template setting: %w", err)
}
var mailActivationTemplate []struct {
Title string `json:"title"`
Body string `json:"body"`
Language string `json:"language"`
}
if err := json.Unmarshal([]byte(mailActivationTemplateSetting.Value), &mailActivationTemplate); err != nil {
return fmt.Errorf("failed to unmarshal mail_activation_template setting: %w", err)
}
for i, t := range mailActivationTemplate {
mailActivationTemplate[i].Title = fmt.Sprintf("[{{ .CommonContext.SiteBasic.Name }}] %s", t.Title)
}
newMailActivationTemplate, err := json.Marshal(mailActivationTemplate)
if err != nil {
return fmt.Errorf("failed to marshal mail_activation_template setting: %w", err)
}
if _, err := client.Setting.UpdateOne(mailActivationTemplateSetting).SetValue(string(newMailActivationTemplate)).Save(ctx); err != nil {
return fmt.Errorf("failed to update mail_activation_template setting: %w", err)
}
// 2. Reset Password Template
mailResetTemplateSetting, err := client.Setting.Query().Where(setting.Name("mail_reset_template")).First(ctx)
if err != nil {
return fmt.Errorf("failed to query mail_reset_template setting: %w", err)
}
var mailResetTemplate []struct {
Title string `json:"title"`
Body string `json:"body"`
Language string `json:"language"`
}
if err := json.Unmarshal([]byte(mailResetTemplateSetting.Value), &mailResetTemplate); err != nil {
return fmt.Errorf("failed to unmarshal mail_reset_template setting: %w", err)
}
for i, t := range mailResetTemplate {
mailResetTemplate[i].Title = fmt.Sprintf("[{{ .CommonContext.SiteBasic.Name }}] %s", t.Title)
}
newMailResetTemplate, err := json.Marshal(mailResetTemplate)
if err != nil {
return fmt.Errorf("failed to marshal mail_reset_template setting: %w", err)
}
if _, err := client.Setting.UpdateOne(mailResetTemplateSetting).SetValue(string(newMailResetTemplate)).Save(ctx); err != nil {
return fmt.Errorf("failed to update mail_reset_template setting: %w", err)
}
return nil
},
},
{
Name: "apply_thumb_path_magic_var",
EndVersion: "4.10.0",
Func: func(l logging.Logger, client *ent.Client, ctx context.Context) error {
thumbSuffixSetting, err := client.Setting.Query().Where(setting.Name("thumb_entity_suffix")).First(ctx)
if err != nil {
return fmt.Errorf("failed to query thumb_entity_suffix setting: %w", err)
}
newThumbSuffix := fmt.Sprintf("{blob_path}/{blob_name}%s", thumbSuffixSetting.Value)
if _, err := client.Setting.UpdateOne(thumbSuffixSetting).SetValue(newThumbSuffix).Save(ctx); err != nil {
return fmt.Errorf("failed to update thumb_entity_suffix setting: %w", err)
}
return nil
},
},

View File

@ -27,6 +27,7 @@ type (
SkipStoragePolicyCache struct{}
StoragePolicyClient interface {
TxOperator
// GetByGroup returns the storage policies of the group.
GetByGroup(ctx context.Context, group *ent.Group) (*ent.StoragePolicy, error)
// GetPolicyByID returns the storage policy by id.
@ -64,6 +65,14 @@ type storagePolicyClient struct {
cache cache.Driver
}
func (c *storagePolicyClient) SetClient(newClient *ent.Client) TxOperator {
return &storagePolicyClient{client: newClient, cache: c.cache}
}
func (c *storagePolicyClient) GetClient() *ent.Client {
return c.client
}
func (c *storagePolicyClient) Delete(ctx context.Context, policy *ent.StoragePolicy) error {
if err := c.client.StoragePolicy.DeleteOne(policy).Exec(ctx); err != nil {
return fmt.Errorf("failed to delete storage policy: %w", err)

File diff suppressed because one or more lines are too long

View File

@ -3,6 +3,7 @@ package inventory
import (
"context"
"fmt"
"time"
"entgo.io/ent/dialect/sql"
"github.com/cloudreve/Cloudreve/v4/ent"
@ -44,6 +45,8 @@ type TaskClient interface {
List(ctx context.Context, args *ListTaskArgs) (*ListTaskResult, error)
// DeleteByIDs deletes the tasks with the given IDs.
DeleteByIDs(ctx context.Context, ids ...int) error
// DeleteBy deletes the tasks with the given args.
DeleteBy(ctx context.Context, args *DeleteTaskArgs) error
}
type (
@ -59,6 +62,12 @@ type (
*PaginationResults
Tasks []*ent.Task
}
DeleteTaskArgs struct {
NotAfter time.Time
Types []string
Status []task.Status
}
)
func NewTaskClient(client *ent.Client, dbType conf.DBType, hasher hashid.Encoder) TaskClient {
@ -113,6 +122,23 @@ func (c *taskClient) DeleteByIDs(ctx context.Context, ids ...int) error {
return err
}
func (c *taskClient) DeleteBy(ctx context.Context, args *DeleteTaskArgs) error {
query := c.client.Task.
Delete().
Where(task.CreatedAtLTE(args.NotAfter))
if len(args.Status) > 0 {
query.Where(task.StatusIn(args.Status...))
}
if len(args.Types) > 0 {
query.Where(task.TypeIn(args.Types...))
}
_, err := query.Exec(ctx)
return err
}
func (c *taskClient) Update(ctx context.Context, task *ent.Task, args *TaskArgs) (*ent.Task, error) {
stm := c.client.Task.UpdateOne(task).
SetPublicState(args.PublicState)

View File

@ -3,6 +3,7 @@ package inventory
import (
"context"
"fmt"
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
)
@ -60,6 +61,22 @@ func WithTx[T TxOperator](ctx context.Context, c T) (T, *Tx, context.Context, er
return c.SetClient(txClient).(T), txWrapper, ctx, nil
}
// InheritTx wraps the given inventory client with a transaction.
// If the transaction is already in the context, it will be inherited.
// Otherwise, original client will be returned.
func InheritTx[T TxOperator](ctx context.Context, c T) (T, *Tx) {
var txClient *ent.Client
var txWrapper *Tx
if txInherited, ok := ctx.Value(TxCtx{}).(*Tx); ok && !txInherited.finished {
txWrapper = &Tx{inherited: true, tx: txInherited.tx, parent: txInherited}
txClient = txWrapper.tx.Client()
return c.SetClient(txClient).(T), txWrapper
}
return c, nil
}
func Rollback(tx *Tx) error {
if !tx.inherited {
tx.finished = true

View File

@ -7,17 +7,20 @@ import (
// UserSetting 用户其他配置
type (
UserSetting struct {
ProfileOff bool `json:"profile_off,omitempty"`
PreferredTheme string `json:"preferred_theme,omitempty"`
VersionRetention bool `json:"version_retention,omitempty"`
VersionRetentionExt []string `json:"version_retention_ext,omitempty"`
VersionRetentionMax int `json:"version_retention_max,omitempty"`
Pined []PinedFile `json:"pined,omitempty"`
Language string `json:"email_language,omitempty"`
DisableViewSync bool `json:"disable_view_sync,omitempty"`
FsViewMap map[string]ExplorerView `json:"fs_view_map,omitempty"`
ProfileOff bool `json:"profile_off,omitempty"`
PreferredTheme string `json:"preferred_theme,omitempty"`
VersionRetention bool `json:"version_retention,omitempty"`
VersionRetentionExt []string `json:"version_retention_ext,omitempty"`
VersionRetentionMax int `json:"version_retention_max,omitempty"`
Pined []PinedFile `json:"pined,omitempty"`
Language string `json:"email_language,omitempty"`
DisableViewSync bool `json:"disable_view_sync,omitempty"`
FsViewMap map[string]ExplorerView `json:"fs_view_map,omitempty"`
ShareLinksInProfile ShareLinksInProfileLevel `json:"share_links_in_profile,omitempty"`
}
ShareLinksInProfileLevel string
PinedFile struct {
Uri string `json:"uri"`
Name string `json:"name,omitempty"`
@ -41,6 +44,12 @@ type (
Token string `json:"token"`
// 允许的文件扩展名
FileType []string `json:"file_type"`
// IsFileTypeDenyList Whether above list is a deny list.
IsFileTypeDenyList bool `json:"is_file_type_deny_list,omitempty"`
// FileRegexp 文件扩展名正则表达式
NameRegexp string `json:"file_regexp,omitempty"`
// IsNameRegexp Whether above regexp is a deny list.
IsNameRegexpDenyList bool `json:"is_name_regexp_deny_list,omitempty"`
// OauthRedirect Oauth 重定向地址
OauthRedirect string `json:"od_redirect,omitempty"`
// CustomProxy whether to use custom-proxy to get file content
@ -90,6 +99,12 @@ type (
UseCname bool `json:"use_cname,omitempty"`
// CDN domain does not need to be signed.
SourceAuth bool `json:"source_auth,omitempty"`
// QiniuUploadCdn whether to use CDN for Qiniu upload.
QiniuUploadCdn bool `json:"qiniu_upload_cdn,omitempty"`
// ChunkConcurrency the number of chunks to upload concurrently.
ChunkConcurrency int `json:"chunk_concurrency,omitempty"`
// Whether to enable file encryption.
Encryption bool `json:"encryption,omitempty"`
}
FileType int
@ -141,8 +156,18 @@ type (
MasterSiteVersion string `json:"master_site_version,omitempty"`
}
EntityRecycleOption struct {
UnlinkOnly bool `json:"unlink_only,omitempty"`
EntityProps struct {
UnlinkOnly bool `json:"unlink_only,omitempty"`
EncryptMetadata *EncryptMetadata `json:"encrypt_metadata,omitempty"`
}
Cipher string
EncryptMetadata struct {
Algorithm Cipher `json:"algorithm"`
Key []byte `json:"key"`
KeyPlainText []byte `json:"key_plain_text,omitempty"`
IV []byte `json:"iv"`
}
DavAccountProps struct {
@ -171,12 +196,15 @@ type (
}
ColumTypeProps struct {
MetadataKey string `json:"metadata_key,omitempty" binding:"max=255"`
MetadataKey string `json:"metadata_key,omitempty" binding:"max=255"`
CustomPropsID string `json:"custom_props_id,omitempty" binding:"max=255"`
}
ShareProps struct {
// Whether to share view setting from owner
ShareView bool `json:"share_view,omitempty"`
// Whether to automatically show readme file in share view
ShowReadMe bool `json:"show_read_me,omitempty"`
}
FileTypeIconSetting struct {
@ -241,6 +269,7 @@ func FileTypeFromString(s string) FileType {
const (
DavAccountReadOnly DavAccountOption = iota
DavAccountProxy
DavAccountDisableSysFiles
)
const (
@ -250,6 +279,7 @@ const (
PolicyTypeOss = "oss"
PolicyTypeCos = "cos"
PolicyTypeS3 = "s3"
PolicyTypeKs3 = "ks3"
PolicyTypeOd = "onedrive"
PolicyTypeRemote = "remote"
PolicyTypeObs = "obs"
@ -274,25 +304,62 @@ const (
ViewerTypeCustom = "custom"
)
type Viewer struct {
ID string `json:"id"`
Type ViewerType `json:"type"`
DisplayName string `json:"display_name"`
Exts []string `json:"exts"`
Url string `json:"url,omitempty"`
Icon string `json:"icon,omitempty"`
WopiActions map[string]map[ViewerAction]string `json:"wopi_actions,omitempty"`
Props map[string]string `json:"props,omitempty"`
MaxSize int64 `json:"max_size,omitempty"`
Disabled bool `json:"disabled,omitempty"`
Templates []NewFileTemplate `json:"templates,omitempty"`
}
type (
Viewer struct {
ID string `json:"id"`
Type ViewerType `json:"type"`
DisplayName string `json:"display_name"`
Exts []string `json:"exts"`
Url string `json:"url,omitempty"`
Icon string `json:"icon,omitempty"`
WopiActions map[string]map[ViewerAction]string `json:"wopi_actions,omitempty"`
Props map[string]string `json:"props,omitempty"`
MaxSize int64 `json:"max_size,omitempty"`
Disabled bool `json:"disabled,omitempty"`
Templates []NewFileTemplate `json:"templates,omitempty"`
Platform string `json:"platform,omitempty"`
RequiredGroupPermission []GroupPermission `json:"required_group_permission,omitempty"`
}
ViewerGroup struct {
Viewers []Viewer `json:"viewers"`
}
type ViewerGroup struct {
Viewers []Viewer `json:"viewers"`
}
NewFileTemplate struct {
Ext string `json:"ext"`
DisplayName string `json:"display_name"`
}
)
type NewFileTemplate struct {
Ext string `json:"ext"`
DisplayName string `json:"display_name"`
}
type (
CustomPropsType string
CustomProps struct {
ID string `json:"id"`
Name string `json:"name"`
Type CustomPropsType `json:"type"`
Max int `json:"max,omitempty"`
Min int `json:"min,omitempty"`
Default string `json:"default,omitempty"`
Options []string `json:"options,omitempty"`
Icon string `json:"icon,omitempty"`
}
)
const (
CustomPropsTypeText = "text"
CustomPropsTypeNumber = "number"
CustomPropsTypeBoolean = "boolean"
CustomPropsTypeSelect = "select"
CustomPropsTypeMultiSelect = "multi_select"
CustomPropsTypeLink = "link"
CustomPropsTypeRating = "rating"
)
const (
ProfilePublicShareOnly = ShareLinksInProfileLevel("")
ProfileAllShare = ShareLinksInProfileLevel("all_share")
ProfileHideShare = ShareLinksInProfileLevel("hide_share")
)
const (
CipherAES256CTR Cipher = "aes-256-ctr"
)

View File

@ -220,8 +220,29 @@ func (c *userClient) Delete(ctx context.Context, uid int) error {
func (c *userClient) ApplyStorageDiff(ctx context.Context, diffs StorageDiff) error {
ae := serializer.NewAggregateError()
for uid, diff := range diffs {
if err := c.client.User.Update().Where(user.ID(uid)).AddStorage(diff).Exec(ctx); err != nil {
ae.Add(fmt.Sprintf("%d", uid), fmt.Errorf("failed to apply storage diff for user %d: %w", uid, err))
// Retry logic for MySQL deadlock (Error 1213)
// This is a temporary workaround. TODO: optimize storage mutation
maxRetries := 3
var lastErr error
for attempt := 0; attempt < maxRetries; attempt++ {
if err := c.client.User.Update().Where(user.ID(uid)).AddStorage(diff).Exec(ctx); err != nil {
lastErr = err
// Check if it's a MySQL deadlock error (Error 1213)
if strings.Contains(err.Error(), "Error 1213") && attempt < maxRetries-1 {
// Wait a bit before retrying with exponential backoff
time.Sleep(time.Duration(attempt+1) * 10 * time.Millisecond)
continue
}
ae.Add(fmt.Sprintf("%d", uid), fmt.Errorf("failed to apply storage diff for user %d: %w", uid, err))
break
}
// Success, break out of retry loop
lastErr = nil
break
}
if lastErr != nil {
ae.Add(fmt.Sprintf("%d", uid), fmt.Errorf("failed to apply storage diff for user %d: %w", uid, lastErr))
}
}

View File

@ -1,8 +1,10 @@
//go:debug rsa1024min=0
package main
import (
_ "embed"
"flag"
"github.com/cloudreve/Cloudreve/v4/cmd"
"github.com/cloudreve/Cloudreve/v4/pkg/util"
)

View File

@ -3,6 +3,10 @@ package middleware
import (
"context"
"fmt"
"net/http"
"strings"
"time"
"github.com/cloudreve/Cloudreve/v4/application/constants"
"github.com/cloudreve/Cloudreve/v4/application/dependency"
"github.com/cloudreve/Cloudreve/v4/pkg/auth/requestinfo"
@ -14,8 +18,6 @@ import (
"github.com/cloudreve/Cloudreve/v4/pkg/util"
"github.com/gin-gonic/gin"
"github.com/gofrs/uuid"
"net/http"
"time"
)
// HashID 将给定对象的HashID转换为真实ID
@ -92,10 +94,16 @@ func MobileRequestOnly() gin.HandlerFunc {
// 2. Generate and inject correlation ID for diagnostic.
func InitializeHandling(dep dependency.Dep) gin.HandlerFunc {
return func(c *gin.Context) {
clientIp := c.ClientIP()
if idx := strings.Index(clientIp, ","); idx > 0 {
clientIp = clientIp[:idx]
}
reqInfo := &requestinfo.RequestInfo{
IP: c.ClientIP(),
IP: clientIp,
Host: c.Request.Host,
UserAgent: c.Request.UserAgent(),
ClientID: c.GetHeader(request.ClientIDHeader),
}
cid := uuid.FromStringOrNil(c.GetHeader(request.CorrelationHeader))
if cid == uuid.Nil {

View File

@ -220,5 +220,8 @@ func getUrlSignContent(ctx context.Context, url *url.URL) string {
// host = strings.TrimSuffix(host, "/")
// // remove port if it exists
// host = strings.Split(host, ":")[0]
if url.Path == "" {
return "/"
}
return url.Path
}

View File

@ -22,4 +22,6 @@ type RequestInfo struct {
Host string
IP string
UserAgent string
// ID of sync client
ClientID string
}

View File

@ -180,9 +180,9 @@ func SlaveFileContentUrl(base *url.URL, srcPath, name string, download bool, spe
return base
}
func SlaveMediaMetaRoute(src, ext string) string {
func SlaveMediaMetaRoute(src, ext, language string) string {
src = url.PathEscape(base64.URLEncoding.EncodeToString([]byte(src)))
return fmt.Sprintf("file/meta/%s/%s", src, url.PathEscape(ext))
return fmt.Sprintf("file/meta/%s/%s?language=%s", src, url.PathEscape(ext), language)
}
func SlaveFileListRoute(srcPath string, recursive bool) string {

View File

@ -46,7 +46,7 @@ type System struct {
SessionSecret string
HashIDSalt string // deprecated
GracePeriod int `validate:"gte=0"`
ProxyHeader string `validate:"required_with=Listen"`
ProxyHeader string
LogLevel string `validate:"oneof=debug info warning error"`
}
@ -114,7 +114,7 @@ var SystemConfig = &System{
Debug: false,
Mode: MasterMode,
Listen: ":5212",
ProxyHeader: "X-Forwarded-For",
ProxyHeader: "",
LogLevel: "info",
}

View File

@ -32,18 +32,18 @@ const (
)
var (
supportDownloadOptions = map[string]bool{
"cookie": true,
"skip_checking": true,
"root_folder": true,
"rename": true,
"upLimit": true,
"dlLimit": true,
"ratioLimit": true,
"seedingTimeLimit": true,
"autoTMM": true,
"sequentialDownload": true,
"firstLastPiecePrio": true,
downloadOptionFormatTypes = map[string]string{
"cookie": "%s",
"skip_checking": "%s",
"root_folder": "%s",
"rename": "%s",
"upLimit": "%.0f",
"dlLimit": "%.0f",
"ratioLimit": "%f",
"seedingTimeLimit": "%.0f",
"autoTMM": "%t",
"sequentialDownload": "%s",
"firstLastPiecePrio": "%t",
}
)
@ -271,15 +271,15 @@ func (c *qbittorrentClient) CreateTask(ctx context.Context, url string, options
// Apply global options
for k, v := range c.options.Options {
if _, ok := supportDownloadOptions[k]; ok {
_ = formWriter.WriteField(k, fmt.Sprintf("%s", v))
if _, ok := downloadOptionFormatTypes[k]; ok {
_ = formWriter.WriteField(k, fmt.Sprintf(downloadOptionFormatTypes[k], v))
}
}
// Apply group options
for k, v := range options {
if _, ok := supportDownloadOptions[k]; ok {
_ = formWriter.WriteField(k, fmt.Sprintf("%s", v))
if _, ok := downloadOptionFormatTypes[k]; ok {
_ = formWriter.WriteField(k, fmt.Sprintf(downloadOptionFormatTypes[k], v))
}
}

View File

@ -2,6 +2,7 @@ package email
import (
"context"
"errors"
"fmt"
"strings"
"time"
@ -9,8 +10,7 @@ import (
"github.com/cloudreve/Cloudreve/v4/inventory"
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
"github.com/go-mail/mail"
"github.com/gofrs/uuid"
"github.com/wneessen/go-mail"
)
// SMTPPool SMTP协议发送邮件
@ -38,9 +38,11 @@ type SMTPConfig struct {
}
type message struct {
msg *mail.Message
cid string
userID int
msg *mail.Msg
to string
subject string
cid string
userID int
}
// NewSMTPPool initializes a new SMTP based email sending queue.
@ -81,17 +83,21 @@ func (client *SMTPPool) Send(ctx context.Context, to, title, body string) error
return nil
}
m := mail.NewMessage()
m.SetAddressHeader("From", client.config.From, client.config.FromName)
m.SetAddressHeader("Reply-To", client.config.ReplyTo, client.config.FromName)
m.SetHeader("To", to)
m.SetHeader("Subject", title)
m.SetHeader("Message-ID", fmt.Sprintf("<%s@%s>", uuid.Must(uuid.NewV4()).String(), "cloudreve"))
m.SetBody("text/html", body)
m := mail.NewMsg()
if err := m.FromFormat(client.config.FromName, client.config.From); err != nil {
return err
}
m.ReplyToFormat(client.config.FromName, client.config.ReplyTo)
m.To(to)
m.Subject(title)
m.SetMessageID()
m.SetBodyString(mail.TypeTextHTML, body)
client.ch <- &message{
msg: m,
cid: logging.CorrelationID(ctx).String(),
userID: inventory.UserIDFromContext(ctx),
msg: m,
subject: title,
to: to,
cid: logging.CorrelationID(ctx).String(),
userID: inventory.UserIDFromContext(ctx),
}
return nil
}
@ -116,17 +122,24 @@ func (client *SMTPPool) Init() {
}
}()
d := mail.NewDialer(client.config.Host, client.config.Port, client.config.User, client.config.Password)
d.Timeout = time.Duration(client.config.Keepalive+5) * time.Second
client.chOpen = true
// 是否启用 SSL
d.SSL = false
if client.config.ForceEncryption {
d.SSL = true
opts := []mail.Option{
mail.WithPort(client.config.Port),
mail.WithTimeout(time.Duration(client.config.Keepalive+5) * time.Second),
mail.WithSMTPAuth(mail.SMTPAuthAutoDiscover), mail.WithTLSPortPolicy(mail.TLSOpportunistic),
mail.WithUsername(client.config.User), mail.WithPassword(client.config.Password),
}
if client.config.ForceEncryption {
opts = append(opts, mail.WithSSL())
}
d.StartTLSPolicy = mail.OpportunisticStartTLS
var s mail.SendCloser
d, diaErr := mail.NewClient(client.config.Host, opts...)
if diaErr != nil {
client.l.Panic("Failed to create SMTP client: %s", diaErr)
return
}
client.chOpen = true
var err error
open := false
for {
@ -139,22 +152,32 @@ func (client *SMTPPool) Init() {
}
if !open {
if s, err = d.Dial(); err != nil {
if err = d.DialWithContext(context.Background()); err != nil {
panic(err)
}
open = true
}
l := client.l.CopyWithPrefix(fmt.Sprintf("[Cid: %s]", m.cid))
if err := mail.Send(s, m.msg); err != nil {
if err := d.Send(m.msg); err != nil {
// Check if this is an SMTP RESET error after successful delivery
var sendErr *mail.SendError
var errParsed = errors.As(err, &sendErr)
if errParsed && sendErr.Reason == mail.ErrSMTPReset {
open = false
l.Debug("SMTP RESET error, closing connection...")
// https://github.com/wneessen/go-mail/issues/463
continue // Don't treat this as a delivery failure since mail was sent
}
l.Warning("Failed to send email: %s, Cid=%s", err, m.cid)
} else {
l.Info("Email sent to %q, title: %q.", m.msg.GetHeader("To"), m.msg.GetHeader("Subject"))
l.Info("Email sent to %q, title: %q.", m.to, m.subject)
}
// 长时间没有新邮件则关闭SMTP连接
case <-time.After(time.Duration(client.config.Keepalive) * time.Second):
if open {
if err := s.Close(); err != nil {
if err := d.Close(); err != nil {
client.l.Warning("Failed to close SMTP connection: %s", err)
}
open = false

View File

@ -38,18 +38,29 @@ func NewResetEmail(ctx context.Context, settings setting.Provider, user *ent.Use
Url: url,
}
tmpl, err := template.New("reset").Parse(selected.Body)
tmplTitle, err := template.New("resetTitle").Parse(selected.Title)
if err != nil {
return "", "", fmt.Errorf("failed to parse email title: %w", err)
}
var resTitle strings.Builder
err = tmplTitle.Execute(&resTitle, resetCtx)
if err != nil {
return "", "", fmt.Errorf("failed to execute email title: %w", err)
}
tmplBody, err := template.New("resetBody").Parse(selected.Body)
if err != nil {
return "", "", fmt.Errorf("failed to parse email template: %w", err)
}
var res strings.Builder
err = tmpl.Execute(&res, resetCtx)
var resBody strings.Builder
err = tmplBody.Execute(&resBody, resetCtx)
if err != nil {
return "", "", fmt.Errorf("failed to execute email template: %w", err)
}
return fmt.Sprintf("[%s] %s", resetCtx.SiteBasic.Name, selected.Title), res.String(), nil
return resTitle.String(), resBody.String(), nil
}
// ActivationContext used for variables in activation email
@ -73,18 +84,29 @@ func NewActivationEmail(ctx context.Context, settings setting.Provider, user *en
Url: url,
}
tmpl, err := template.New("activation").Parse(selected.Body)
tmplTitle, err := template.New("activationTitle").Parse(selected.Title)
if err != nil {
return "", "", fmt.Errorf("failed to parse email title: %w", err)
}
var resTitle strings.Builder
err = tmplTitle.Execute(&resTitle, activationCtx)
if err != nil {
return "", "", fmt.Errorf("failed to execute email title: %w", err)
}
tmplBody, err := template.New("activationBody").Parse(selected.Body)
if err != nil {
return "", "", fmt.Errorf("failed to parse email template: %w", err)
}
var res strings.Builder
err = tmpl.Execute(&res, activationCtx)
var resBody strings.Builder
err = tmplBody.Execute(&resBody, activationCtx)
if err != nil {
return "", "", fmt.Errorf("failed to execute email template: %w", err)
}
return fmt.Sprintf("[%s] %s", activationCtx.SiteBasic.Name, selected.Title), res.String(), nil
return resTitle.String(), resBody.String(), nil
}
func commonContext(ctx context.Context, settings setting.Provider) *CommonContext {
@ -122,4 +144,4 @@ func selectTemplate(templates []setting.EmailTemplate, u *ent.User) setting.Emai
}
return selected
}
}

View File

@ -244,7 +244,7 @@ func (handler *Driver) Put(ctx context.Context, file *fs.UploadRequest) error {
mimeType := file.Props.MimeType
if mimeType == "" {
handler.mime.TypeByName(file.Props.Uri.Name())
mimeType = handler.mime.TypeByName(file.Props.Uri.Name())
}
// 是否允许覆盖
@ -352,6 +352,14 @@ func (handler Driver) Thumb(ctx context.Context, expire *time.Time, ext string,
w, h := handler.settings.ThumbSize(ctx)
thumbParam := fmt.Sprintf("imageMogr2/thumbnail/%dx%d", w, h)
enco := handler.settings.ThumbEncode(ctx)
switch enco.Format {
case "jpg", "webp":
thumbParam += fmt.Sprintf("/format/%s/rquality/%d", enco.Format, enco.Quality)
case "png":
thumbParam += fmt.Sprintf("/format/%s", enco.Format)
}
source, err := handler.signSourceURL(
ctx,
e.Source(),
@ -374,7 +382,12 @@ func (handler Driver) Thumb(ctx context.Context, expire *time.Time, ext string,
func (handler Driver) Source(ctx context.Context, e fs.Entity, args *driver.GetSourceArgs) (string, error) {
// 添加各项设置
options := urlOption{}
if args.Speed > 0 {
// Byte 转换为 bit
args.Speed *= 8
// COS对速度值有范围限制
if args.Speed < 819200 {
args.Speed = 819200
}
@ -383,6 +396,7 @@ func (handler Driver) Source(ctx context.Context, e fs.Entity, args *driver.GetS
}
options.Speed = args.Speed
}
if args.IsDownload {
encodedFilename := url.PathEscape(args.DisplayName)
options.ContentDescription = fmt.Sprintf(`attachment; filename="%s"; filename*=UTF-8''%s`,
@ -441,7 +455,7 @@ func (handler Driver) Token(ctx context.Context, uploadSession *fs.UploadSession
mimeType := file.Props.MimeType
if mimeType == "" {
handler.mime.TypeByName(file.Props.Uri.Name())
mimeType = handler.mime.TypeByName(file.Props.Uri.Name())
}
// 初始化分片上传
@ -580,7 +594,7 @@ func (handler Driver) Meta(ctx context.Context, path string) (*MetaData, error)
}, nil
}
func (handler *Driver) MediaMeta(ctx context.Context, path, ext string) ([]driver.MediaMeta, error) {
func (handler *Driver) MediaMeta(ctx context.Context, path, ext, language string) ([]driver.MediaMeta, error) {
if util.ContainsString(supportedImageExt, ext) {
return handler.extractImageMeta(ctx, path)
}

View File

@ -83,7 +83,7 @@ type (
Capabilities() *Capabilities
// MediaMeta extracts media metadata from the given file.
MediaMeta(ctx context.Context, path, ext string) ([]MediaMeta, error)
MediaMeta(ctx context.Context, path, ext, language string) ([]MediaMeta, error)
}
Capabilities struct {
@ -117,6 +117,7 @@ const (
MetaTypeExif MetaType = "exif"
MediaTypeMusic MetaType = "music"
MetaTypeStreamMedia MetaType = "stream"
MetaTypeGeocoding MetaType = "geocoding"
)
type ForceUsePublicEndpointCtx struct{}

View File

@ -0,0 +1,592 @@
package ks3
import (
"context"
"errors"
"fmt"
"github.com/aws/aws-sdk-go/aws/request"
"io"
"net/url"
"os"
"path"
"path/filepath"
"strings"
"time"
"strconv"
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
"github.com/cloudreve/Cloudreve/v4/pkg/cluster/routes"
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/chunk"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/chunk/backoff"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/mime"
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
"github.com/ks3sdklib/aws-sdk-go/aws/awserr"
"github.com/ks3sdklib/aws-sdk-go/service/s3/s3manager"
"github.com/samber/lo"
"github.com/ks3sdklib/aws-sdk-go/aws"
"github.com/ks3sdklib/aws-sdk-go/aws/credentials"
"github.com/ks3sdklib/aws-sdk-go/service/s3"
)
// Driver KS3 compatible driver
type Driver struct {
policy *ent.StoragePolicy
chunkSize int64
settings setting.Provider
l logging.Logger
config conf.ConfigProvider
mime mime.MimeDetector
sess *aws.Config
svc *s3.S3
}
// UploadPolicy KS3上传策略
type UploadPolicy struct {
Expiration string `json:"expiration"`
Conditions []interface{} `json:"conditions"`
}
type Session struct {
Config *aws.Config
Handlers request.Handlers
}
// MetaData 文件信息
type MetaData struct {
Size int64
Etag string
}
var (
features = &boolset.BooleanSet{}
)
func init() {
boolset.Sets(map[driver.HandlerCapability]bool{
driver.HandlerCapabilityUploadSentinelRequired: true,
}, features)
}
func Int64(v int64) *int64 {
return &v
}
func New(ctx context.Context, policy *ent.StoragePolicy, settings setting.Provider,
config conf.ConfigProvider, l logging.Logger, mime mime.MimeDetector) (*Driver, error) {
chunkSize := policy.Settings.ChunkSize
if policy.Settings.ChunkSize == 0 {
chunkSize = 25 << 20 // 25 MB
}
driver := &Driver{
policy: policy,
settings: settings,
chunkSize: chunkSize,
config: config,
l: l,
mime: mime,
}
sess := aws.Config{
Credentials: credentials.NewStaticCredentials(policy.AccessKey, policy.SecretKey, ""),
Endpoint: policy.Server,
Region: policy.Settings.Region,
S3ForcePathStyle: policy.Settings.S3ForcePathStyle,
}
driver.sess = &sess
driver.svc = s3.New(&sess)
return driver, nil
}
// List 列出给定路径下的文件
func (handler *Driver) List(ctx context.Context, base string, onProgress driver.ListProgressFunc, recursive bool) ([]fs.PhysicalObject, error) {
// 初始化列目录参数
base = strings.TrimPrefix(base, "/")
if base != "" {
base += "/"
}
opt := &s3.ListObjectsInput{
Bucket: &handler.policy.BucketName,
Prefix: &base,
MaxKeys: Int64(1000),
}
// 是否为递归列出
if !recursive {
opt.Delimiter = aws.String("/")
}
var (
objects []*s3.Object
commons []*s3.CommonPrefix
)
for {
res, err := handler.svc.ListObjectsWithContext(ctx, opt)
if err != nil {
return nil, err
}
objects = append(objects, res.Contents...)
commons = append(commons, res.CommonPrefixes...)
// 如果本次未列取完则继续使用marker获取结果
if *res.IsTruncated {
opt.Marker = res.NextMarker
} else {
break
}
}
// 处理列取结果
res := make([]fs.PhysicalObject, 0, len(objects)+len(commons))
// 处理目录
for _, object := range commons {
rel, err := filepath.Rel(*opt.Prefix, *object.Prefix)
if err != nil {
continue
}
res = append(res, fs.PhysicalObject{
Name: path.Base(*object.Prefix),
RelativePath: filepath.ToSlash(rel),
Size: 0,
IsDir: true,
LastModify: time.Now(),
})
}
onProgress(len(commons))
// 处理文件
for _, object := range objects {
rel, err := filepath.Rel(*opt.Prefix, *object.Key)
if err != nil {
continue
}
res = append(res, fs.PhysicalObject{
Name: path.Base(*object.Key),
Source: *object.Key,
RelativePath: filepath.ToSlash(rel),
Size: *object.Size,
IsDir: false,
LastModify: time.Now(),
})
}
onProgress(len(objects))
return res, nil
}
// Open 打开文件
func (handler *Driver) Open(ctx context.Context, path string) (*os.File, error) {
return nil, errors.New("not implemented")
}
// Put 将文件流保存到指定目录
func (handler *Driver) Put(ctx context.Context, file *fs.UploadRequest) error {
defer file.Close()
// 是否允许覆盖
overwrite := file.Mode&fs.ModeOverwrite == fs.ModeOverwrite
if !overwrite {
// Check for duplicated file
if _, err := handler.Meta(ctx, file.Props.SavePath); err == nil {
return fs.ErrFileExisted
}
}
// 初始化配置
uploader := s3manager.NewUploader(&s3manager.UploadOptions{
S3: handler.svc, // S3Client实例必填
PartSize: handler.chunkSize, // 分块大小默认为5MB非必填
})
mimeType := file.Props.MimeType
if mimeType == "" {
mimeType = handler.mime.TypeByName(file.Props.Uri.Name())
}
_, err := uploader.UploadWithContext(ctx, &s3manager.UploadInput{
Bucket: &handler.policy.BucketName,
Key: &file.Props.SavePath,
Body: io.LimitReader(file, file.Props.Size),
ContentType: aws.String(mimeType),
})
if err != nil {
return err
}
return nil
}
// Delete 删除文件
func (handler *Driver) Delete(ctx context.Context, files ...string) ([]string, error) {
failed := make([]string, 0, len(files))
batchSize := handler.policy.Settings.S3DeleteBatchSize
if batchSize == 0 {
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html
// The request can contain a list of up to 1000 keys that you want to delete.
batchSize = 1000
}
var lastErr error
groups := lo.Chunk(files, batchSize)
for _, group := range groups {
if len(group) == 1 {
// Invoke single file delete API
_, err := handler.svc.DeleteObjectWithContext(ctx, &s3.DeleteObjectInput{
Bucket: &handler.policy.BucketName,
Key: &group[0],
})
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
// Ignore NoSuchKey error
if aerr.Code() == s3.ErrCodeNoSuchKey {
continue
}
}
failed = append(failed, group[0])
lastErr = err
}
} else {
// Invoke batch delete API
res, err := handler.svc.DeleteObjects(
&s3.DeleteObjectsInput{
Bucket: &handler.policy.BucketName,
Delete: &s3.Delete{
Objects: lo.Map(group, func(s string, i int) *s3.ObjectIdentifier {
return &s3.ObjectIdentifier{Key: &s}
}),
},
})
if err != nil {
failed = append(failed, group...)
lastErr = err
continue
}
for _, v := range res.Errors {
handler.l.Debug("Failed to delete file: %s, Code:%s, Message:%s", v.Key, v.Code, v.Key)
failed = append(failed, *v.Key)
}
}
}
return failed, lastErr
}
// Thumb 获取缩略图URL
func (handler *Driver) Thumb(ctx context.Context, expire *time.Time, ext string, e fs.Entity) (string, error) {
w, h := handler.settings.ThumbSize(ctx)
thumbParam := fmt.Sprintf("@base@tag=imgScale&m=0&w=%d&h=%d", w, h)
enco := handler.settings.ThumbEncode(ctx)
switch enco.Format {
case "jpg", "webp":
thumbParam += fmt.Sprintf("&q=%d&F=%s", enco.Quality, enco.Format)
case "png":
thumbParam += fmt.Sprintf("&F=%s", enco.Format)
}
// 确保过期时间不小于 0 ,如果小于则设置为 7 天
var ttl int64
if expire != nil {
ttl = int64(time.Until(*expire).Seconds())
} else {
ttl = 604800
}
thumbUrl, err := handler.svc.GeneratePresignedUrl(&s3.GeneratePresignedUrlInput{
HTTPMethod: s3.GET, // 请求方法
Bucket: &handler.policy.BucketName, // 存储空间名称
Key: aws.String(e.Source() + thumbParam), // 对象的key
Expires: ttl, // 过期时间,转换为秒数
})
if err != nil {
return "", err
}
// 将最终生成的签名URL域名换成用户自定义的加速域名如果有
finalThumbURL, err := url.Parse(thumbUrl)
if err != nil {
return "", err
}
// 公有空间替换掉Key及不支持的头
if !handler.policy.IsPrivate {
finalThumbURL.RawQuery = ""
}
return finalThumbURL.String(), nil
}
// Source 获取文件外链
func (handler *Driver) Source(ctx context.Context, e fs.Entity, args *driver.GetSourceArgs) (string, error) {
var contentDescription *string
if args.IsDownload {
encodedFilename := url.PathEscape(args.DisplayName)
contentDescription = aws.String(fmt.Sprintf(`attachment; filename=%s`, encodedFilename))
}
// 确保过期时间不小于 0 ,如果小于则设置为 7 天
var ttl int64
if args.Expire != nil {
ttl = int64(time.Until(*args.Expire).Seconds())
} else {
ttl = 604800
}
downloadUrl, err := handler.svc.GeneratePresignedUrl(&s3.GeneratePresignedUrlInput{
HTTPMethod: s3.GET, // 请求方法
Bucket: &handler.policy.BucketName, // 存储空间名称
Key: aws.String(e.Source()), // 对象的key
Expires: ttl, // 过期时间,转换为秒数
ResponseContentDisposition: contentDescription, // 设置响应头部 Content-Disposition
})
if err != nil {
return "", err
}
// 将最终生成的签名URL域名换成用户自定义的加速域名如果有
finalURL, err := url.Parse(downloadUrl)
if err != nil {
return "", err
}
// 公有空间替换掉Key及不支持的头
if !handler.policy.IsPrivate {
finalURL.RawQuery = ""
}
return finalURL.String(), nil
}
// Token 获取上传凭证
func (handler *Driver) Token(ctx context.Context, uploadSession *fs.UploadSession, file *fs.UploadRequest) (*fs.UploadCredential, error) {
// Check for duplicated file
if _, err := handler.Meta(ctx, file.Props.SavePath); err == nil {
return nil, fs.ErrFileExisted
}
// 生成回调地址
siteURL := handler.settings.SiteURL(setting.UseFirstSiteUrl(ctx))
// 在从机端创建上传会话
uploadSession.ChunkSize = handler.chunkSize
uploadSession.Callback = routes.MasterSlaveCallbackUrl(siteURL, types.PolicyTypeKs3, uploadSession.Props.UploadSessionID, uploadSession.CallbackSecret).String()
mimeType := file.Props.MimeType
if mimeType == "" {
mimeType = handler.mime.TypeByName(file.Props.Uri.Name())
}
// 创建分片上传
res, err := handler.svc.CreateMultipartUploadWithContext(ctx, &s3.CreateMultipartUploadInput{
Bucket: &handler.policy.BucketName,
Key: &uploadSession.Props.SavePath,
Expires: &uploadSession.Props.ExpireAt,
ContentType: aws.String(mimeType),
})
if err != nil {
return nil, fmt.Errorf("failed to create multipart upload: %w", err)
}
uploadSession.UploadID = *res.UploadID
// 为每个分片签名上传 URL
chunks := chunk.NewChunkGroup(file, handler.chunkSize, &backoff.ConstantBackoff{}, false, handler.l, "")
urls := make([]string, chunks.Num())
for chunks.Next() {
err := chunks.Process(func(c *chunk.ChunkGroup, chunk io.Reader) error {
// 计算过期时间(秒)
expireSeconds := int(time.Until(uploadSession.Props.ExpireAt).Seconds())
partNumber := c.Index() + 1
// 生成预签名URL
signedURL, err := handler.svc.GeneratePresignedUrl(&s3.GeneratePresignedUrlInput{
HTTPMethod: s3.PUT,
Bucket: &handler.policy.BucketName,
Key: &uploadSession.Props.SavePath,
Expires: int64(expireSeconds),
Parameters: map[string]*string{
"partNumber": aws.String(strconv.Itoa(partNumber)),
"uploadId": res.UploadID,
},
ContentType: aws.String("application/octet-stream"),
})
if err != nil {
return fmt.Errorf("failed to generate presigned upload url for chunk %d: %w", partNumber, err)
}
urls[c.Index()] = signedURL
return nil
})
if err != nil {
return nil, err
}
}
// 签名完成分片上传的请求URL
expireSeconds := int(time.Until(uploadSession.Props.ExpireAt).Seconds())
signedURL, err := handler.svc.GeneratePresignedUrl(&s3.GeneratePresignedUrlInput{
HTTPMethod: s3.POST,
Bucket: &handler.policy.BucketName,
Key: &file.Props.SavePath,
Expires: int64(expireSeconds),
Parameters: map[string]*string{
"uploadId": res.UploadID,
},
ContentType: aws.String("application/octet-stream"),
})
if err != nil {
return nil, err
}
// 生成上传凭证
return &fs.UploadCredential{
UploadID: *res.UploadID,
UploadURLs: urls,
CompleteURL: signedURL,
SessionID: uploadSession.Props.UploadSessionID,
ChunkSize: handler.chunkSize,
}, nil
}
// CancelToken 取消上传凭证
func (handler *Driver) CancelToken(ctx context.Context, uploadSession *fs.UploadSession) error {
_, err := handler.svc.AbortMultipartUploadWithContext(ctx, &s3.AbortMultipartUploadInput{
UploadID: &uploadSession.UploadID,
Bucket: &handler.policy.BucketName,
Key: &uploadSession.Props.SavePath,
})
return err
}
// cancelUpload 取消分片上传
func (handler *Driver) cancelUpload(key, id *string) {
if _, err := handler.svc.AbortMultipartUpload(&s3.AbortMultipartUploadInput{
Bucket: &handler.policy.BucketName,
UploadID: id,
Key: key,
}); err != nil {
handler.l.Warning("failed to abort multipart upload: %s", err)
}
}
// Capabilities 获取存储能力
func (handler *Driver) Capabilities() *driver.Capabilities {
return &driver.Capabilities{
StaticFeatures: features,
MediaMetaProxy: handler.policy.Settings.MediaMetaGeneratorProxy,
ThumbProxy: handler.policy.Settings.ThumbGeneratorProxy,
MaxSourceExpire: time.Duration(604800) * time.Second,
}
}
// MediaMeta 获取媒体元信息
func (handler *Driver) MediaMeta(ctx context.Context, path, ext, language string) ([]driver.MediaMeta, error) {
return nil, errors.New("not implemented")
}
// LocalPath 获取本地路径
func (handler *Driver) LocalPath(ctx context.Context, path string) string {
return ""
}
// CompleteUpload 完成上传
func (handler *Driver) CompleteUpload(ctx context.Context, session *fs.UploadSession) error {
if session.SentinelTaskID == 0 {
return nil
}
// Make sure uploaded file size is correct
res, err := handler.Meta(ctx, session.Props.SavePath)
if err != nil {
return fmt.Errorf("failed to get uploaded file size: %w", err)
}
if res.Size != session.Props.Size {
return serializer.NewError(
serializer.CodeMetaMismatch,
fmt.Sprintf("File size not match, expected: %d, actual: %d", session.Props.Size, res.Size),
nil,
)
}
return nil
}
// Meta 获取文件元信息
func (handler *Driver) Meta(ctx context.Context, path string) (*MetaData, error) {
res, err := handler.svc.HeadObjectWithContext(ctx,
&s3.HeadObjectInput{
Bucket: &handler.policy.BucketName,
Key: &path,
})
if err != nil {
return nil, err
}
return &MetaData{
Size: *res.ContentLength,
Etag: *res.ETag,
}, nil
}
// CORS 设置CORS规则
func (handler *Driver) CORS() error {
rule := s3.CORSRule{
AllowedMethod: []string{
"GET",
"POST",
"PUT",
"DELETE",
"HEAD",
},
AllowedOrigin: []string{"*"},
AllowedHeader: []string{"*"},
ExposeHeader: []string{"ETag"},
MaxAgeSeconds: 3600,
}
_, err := handler.svc.PutBucketCORS(&s3.PutBucketCORSInput{
Bucket: &handler.policy.BucketName,
CORSConfiguration: &s3.CORSConfiguration{
Rules: []*s3.CORSRule{&rule},
},
})
return err
}
// Reader 读取器
type Reader struct {
r io.Reader
}
// Read 读取数据
func (r Reader) Read(p []byte) (int, error) {
return r.r.Read(p)
}

View File

@ -1,13 +1,14 @@
package local
import (
"os"
"time"
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
"github.com/cloudreve/Cloudreve/v4/pkg/util"
"github.com/gofrs/uuid"
"os"
"time"
)
// NewLocalFileEntity creates a new local file entity.
@ -73,3 +74,11 @@ func (l *localFileEntity) UploadSessionID() *uuid.UUID {
func (l *localFileEntity) Model() *ent.Entity {
return nil
}
func (l *localFileEntity) Props() *types.EntityProps {
return nil
}
func (l *localFileEntity) Encrypted() bool {
return false
}

View File

@ -140,9 +140,9 @@ func (handler *Driver) Put(ctx context.Context, file *fs.UploadRequest) error {
}
openMode := os.O_CREATE | os.O_RDWR
if file.Mode&fs.ModeOverwrite == fs.ModeOverwrite && file.Offset == 0 {
openMode |= os.O_TRUNC
}
// if file.Mode&fs.ModeOverwrite == fs.ModeOverwrite && file.Offset == 0 {
// openMode |= os.O_TRUNC
// }
out, err := os.OpenFile(dst, openMode, Perm)
if err != nil {
@ -298,6 +298,6 @@ func (handler *Driver) Capabilities() *driver.Capabilities {
return capabilities
}
func (handler *Driver) MediaMeta(ctx context.Context, path, ext string) ([]driver.MediaMeta, error) {
func (handler *Driver) MediaMeta(ctx context.Context, path, ext, language string) ([]driver.MediaMeta, error) {
return nil, errors.New("not implemented")
}

View File

@ -17,7 +17,7 @@ import (
"github.com/samber/lo"
)
func (d *Driver) MediaMeta(ctx context.Context, path, ext string) ([]driver.MediaMeta, error) {
func (d *Driver) MediaMeta(ctx context.Context, path, ext, language string) ([]driver.MediaMeta, error) {
thumbURL, err := d.signSourceURL(&obs.CreateSignedUrlInput{
Method: obs.HttpMethodGet,
Bucket: d.policy.BucketName,

View File

@ -335,13 +335,23 @@ func (d *Driver) LocalPath(ctx context.Context, path string) string {
func (d *Driver) Thumb(ctx context.Context, expire *time.Time, ext string, e fs.Entity) (string, error) {
w, h := d.settings.ThumbSize(ctx)
thumbParam := fmt.Sprintf("image/resize,m_lfit,w_%d,h_%d", w, h)
enco := d.settings.ThumbEncode(ctx)
switch enco.Format {
case "jpg", "webp":
thumbParam += fmt.Sprintf("/format,%s/quality,q_%d", enco.Format, enco.Quality)
case "png":
thumbParam += fmt.Sprintf("/format,%s", enco.Format)
}
thumbURL, err := d.signSourceURL(&obs.CreateSignedUrlInput{
Method: obs.HttpMethodGet,
Bucket: d.policy.BucketName,
Key: e.Source(),
Expires: int(time.Until(*expire).Seconds()),
QueryParams: map[string]string{
imageProcessHeader: fmt.Sprintf("image/resize,m_lfit,w_%d,h_%d", w, h),
imageProcessHeader: thumbParam,
},
})

View File

@ -241,7 +241,7 @@ func (handler *Driver) Capabilities() *driver.Capabilities {
}
}
func (handler *Driver) MediaMeta(ctx context.Context, path, ext string) ([]driver.MediaMeta, error) {
func (handler *Driver) MediaMeta(ctx context.Context, path, ext, language string) ([]driver.MediaMeta, error) {
return nil, errors.New("not implemented")
}

View File

@ -10,12 +10,13 @@ import (
"encoding/pem"
"errors"
"fmt"
"github.com/cloudreve/Cloudreve/v4/pkg/cache"
"github.com/cloudreve/Cloudreve/v4/pkg/request"
"io"
"net/http"
"net/url"
"strings"
"github.com/cloudreve/Cloudreve/v4/pkg/cache"
"github.com/cloudreve/Cloudreve/v4/pkg/request"
)
const (

View File

@ -5,16 +5,17 @@ import (
"encoding/json"
"encoding/xml"
"fmt"
"github.com/aliyun/aliyun-oss-go-sdk/oss"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
"github.com/cloudreve/Cloudreve/v4/pkg/mediameta"
"github.com/cloudreve/Cloudreve/v4/pkg/request"
"github.com/samber/lo"
"math"
"net/http"
"strconv"
"strings"
"time"
"github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
"github.com/cloudreve/Cloudreve/v4/pkg/mediameta"
"github.com/cloudreve/Cloudreve/v4/pkg/request"
"github.com/samber/lo"
)
const (
@ -265,13 +266,14 @@ func (handler *Driver) extractImageMeta(ctx context.Context, path string) ([]dri
// extractMediaInfo Sends API calls to OSS IMM service to extract media info.
func (handler *Driver) extractMediaInfo(ctx context.Context, path string, category string, forceSign bool) (string, error) {
mediaOption := []oss.Option{oss.Process(category)}
mediaInfoExpire := time.Now().Add(mediaInfoTTL)
thumbURL, err := handler.signSourceURL(
ctx,
path,
&mediaInfoExpire,
mediaOption,
&oss.GetObjectRequest{
Process: oss.Ptr(category),
},
forceSign,
)
if err != nil {

View File

@ -15,7 +15,8 @@ import (
"strings"
"time"
"github.com/aliyun/aliyun-oss-go-sdk/oss"
"github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss"
"github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials"
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
@ -52,7 +53,6 @@ type Driver struct {
policy *ent.StoragePolicy
client *oss.Client
bucket *oss.Bucket
settings setting.Provider
l logging.Logger
config conf.ConfigProvider
@ -65,12 +65,12 @@ type Driver struct {
type key int
const (
chunkRetrySleep = time.Duration(5) * time.Second
uploadIdParam = "uploadId"
partNumberParam = "partNumber"
callbackParam = "callback"
completeAllHeader = "x-oss-complete-all"
maxDeleteBatch = 1000
chunkRetrySleep = time.Duration(5) * time.Second
maxDeleteBatch = 1000
maxSignTTL = time.Duration(24) * time.Hour * 7
completeAllHeader = "x-oss-complete-all"
forbidOverwriteHeader = "x-oss-forbid-overwrite"
trafficLimitHeader = "x-oss-traffic-limit"
// MultiPartUploadThreshold 服务端使用分片上传的阈值
MultiPartUploadThreshold int64 = 5 * (1 << 30) // 5GB
@ -102,21 +102,27 @@ func New(ctx context.Context, policy *ent.StoragePolicy, settings setting.Provid
// CORS 创建跨域策略
func (handler *Driver) CORS() error {
return handler.client.SetBucketCORS(handler.policy.BucketName, []oss.CORSRule{
{
AllowedOrigin: []string{"*"},
AllowedMethod: []string{
"GET",
"POST",
"PUT",
"DELETE",
"HEAD",
_, err := handler.client.PutBucketCors(context.Background(), &oss.PutBucketCorsRequest{
Bucket: &handler.policy.BucketName,
CORSConfiguration: &oss.CORSConfiguration{
CORSRules: []oss.CORSRule{
{
AllowedOrigins: []string{"*"},
AllowedMethods: []string{
"GET",
"POST",
"PUT",
"DELETE",
"HEAD",
},
ExposeHeaders: []string{},
AllowedHeaders: []string{"*"},
MaxAgeSeconds: oss.Ptr(int64(3600)),
},
},
ExposeHeader: []string{},
AllowedHeader: []string{"*"},
MaxAgeSeconds: 3600,
},
})
}})
return err
}
// InitOSSClient 初始化OSS鉴权客户端
@ -125,34 +131,28 @@ func (handler *Driver) InitOSSClient(forceUsePublicEndpoint bool) error {
return errors.New("empty policy")
}
opt := make([]oss.ClientOption, 0)
// 决定是否使用内网 Endpoint
endpoint := handler.policy.Server
useCname := false
if handler.policy.Settings.ServerSideEndpoint != "" && !forceUsePublicEndpoint {
endpoint = handler.policy.Settings.ServerSideEndpoint
} else if handler.policy.Settings.UseCname {
opt = append(opt, oss.UseCname(true))
useCname = true
}
if !strings.HasPrefix(endpoint, "http://") && !strings.HasPrefix(endpoint, "https://") {
endpoint = "https://" + endpoint
}
cfg := oss.LoadDefaultConfig().
WithCredentialsProvider(credentials.NewStaticCredentialsProvider(handler.policy.AccessKey, handler.policy.SecretKey, "")).
WithEndpoint(endpoint).
WithRegion(handler.policy.Settings.Region).
WithUseCName(useCname)
// 初始化客户端
client, err := oss.New(endpoint, handler.policy.AccessKey, handler.policy.SecretKey, opt...)
if err != nil {
return err
}
client := oss.NewClient(cfg)
handler.client = client
// 初始化存储桶
bucket, err := client.Bucket(handler.policy.BucketName)
if err != nil {
return err
}
handler.bucket = bucket
return nil
}
@ -166,38 +166,40 @@ func (handler *Driver) List(ctx context.Context, base string, onProgress driver.
var (
delimiter string
marker string
objects []oss.ObjectProperties
commons []string
commons []oss.CommonPrefix
)
if !recursive {
delimiter = "/"
}
for {
subRes, err := handler.bucket.ListObjects(oss.Marker(marker), oss.Prefix(base),
oss.MaxKeys(1000), oss.Delimiter(delimiter))
p := handler.client.NewListObjectsPaginator(&oss.ListObjectsRequest{
Bucket: &handler.policy.BucketName,
Prefix: &base,
MaxKeys: 1000,
Delimiter: &delimiter,
})
for p.HasNext() {
page, err := p.NextPage(ctx)
if err != nil {
return nil, err
}
objects = append(objects, subRes.Objects...)
commons = append(commons, subRes.CommonPrefixes...)
marker = subRes.NextMarker
if marker == "" {
break
}
objects = append(objects, page.Contents...)
commons = append(commons, page.CommonPrefixes...)
}
// 处理列取结果
res := make([]fs.PhysicalObject, 0, len(objects)+len(commons))
// 处理目录
for _, object := range commons {
rel, err := filepath.Rel(base, object)
rel, err := filepath.Rel(base, *object.Prefix)
if err != nil {
continue
}
res = append(res, fs.PhysicalObject{
Name: path.Base(object),
Name: path.Base(*object.Prefix),
RelativePath: filepath.ToSlash(rel),
Size: 0,
IsDir: true,
@ -208,17 +210,17 @@ func (handler *Driver) List(ctx context.Context, base string, onProgress driver.
// 处理文件
for _, object := range objects {
rel, err := filepath.Rel(base, object.Key)
rel, err := filepath.Rel(base, *object.Key)
if err != nil {
continue
}
res = append(res, fs.PhysicalObject{
Name: path.Base(object.Key),
Source: object.Key,
Name: path.Base(*object.Key),
Source: *object.Key,
RelativePath: filepath.ToSlash(rel),
Size: object.Size,
IsDir: false,
LastModify: object.LastModified,
LastModify: *object.LastModified,
})
}
onProgress(len(res))
@ -240,30 +242,39 @@ func (handler *Driver) Put(ctx context.Context, file *fs.UploadRequest) error {
mimeType := file.Props.MimeType
if mimeType == "" {
handler.mime.TypeByName(file.Props.Uri.Name())
mimeType = handler.mime.TypeByName(file.Props.Uri.Name())
}
// 是否允许覆盖
overwrite := file.Mode&fs.ModeOverwrite == fs.ModeOverwrite
options := []oss.Option{
oss.WithContext(ctx),
oss.Expires(time.Now().Add(credentialTTL * time.Second)),
oss.ForbidOverWrite(!overwrite),
oss.ContentType(mimeType),
}
forbidOverwrite := oss.Ptr(strconv.FormatBool(!overwrite))
exipires := oss.Ptr(time.Now().Add(credentialTTL * time.Second).Format(time.RFC3339))
// 小文件直接上传
if file.Props.Size < MultiPartUploadThreshold {
return handler.bucket.PutObject(file.Props.SavePath, file, options...)
_, err := handler.client.PutObject(ctx, &oss.PutObjectRequest{
Bucket: &handler.policy.BucketName,
Key: &file.Props.SavePath,
Body: file,
ForbidOverwrite: forbidOverwrite,
ContentType: oss.Ptr(mimeType),
})
return err
}
// 超过阈值时使用分片上传
imur, err := handler.bucket.InitiateMultipartUpload(file.Props.SavePath, options...)
imur, err := handler.client.InitiateMultipartUpload(ctx, &oss.InitiateMultipartUploadRequest{
Bucket: &handler.policy.BucketName,
Key: &file.Props.SavePath,
ContentType: oss.Ptr(mimeType),
ForbidOverwrite: forbidOverwrite,
Expires: exipires,
})
if err != nil {
return fmt.Errorf("failed to initiate multipart upload: %w", err)
}
parts := make([]oss.UploadPart, 0)
parts := make([]*oss.UploadPartResult, 0)
chunks := chunk.NewChunkGroup(file, handler.chunkSize, &backoff.ConstantBackoff{
Max: handler.settings.ChunkRetryLimit(ctx),
@ -271,7 +282,13 @@ func (handler *Driver) Put(ctx context.Context, file *fs.UploadRequest) error {
}, handler.settings.UseChunkBuffer(ctx), handler.l, handler.settings.TempPath(ctx))
uploadFunc := func(current *chunk.ChunkGroup, content io.Reader) error {
part, err := handler.bucket.UploadPart(imur, content, current.Length(), current.Index()+1, oss.WithContext(ctx))
part, err := handler.client.UploadPart(ctx, &oss.UploadPartRequest{
Bucket: &handler.policy.BucketName,
Key: &file.Props.SavePath,
UploadId: imur.UploadId,
PartNumber: int32(current.Index() + 1),
Body: content,
})
if err == nil {
parts = append(parts, part)
}
@ -280,14 +297,27 @@ func (handler *Driver) Put(ctx context.Context, file *fs.UploadRequest) error {
for chunks.Next() {
if err := chunks.Process(uploadFunc); err != nil {
handler.cancelUpload(imur)
handler.cancelUpload(*imur)
return fmt.Errorf("failed to upload chunk #%d: %w", chunks.Index(), err)
}
}
_, err = handler.bucket.CompleteMultipartUpload(imur, parts, oss.ForbidOverWrite(!overwrite), oss.WithContext(ctx))
_, err = handler.client.CompleteMultipartUpload(ctx, &oss.CompleteMultipartUploadRequest{
Bucket: &handler.policy.BucketName,
Key: imur.Key,
UploadId: imur.UploadId,
CompleteMultipartUpload: &oss.CompleteMultipartUpload{
Parts: lo.Map(parts, func(part *oss.UploadPartResult, i int) oss.UploadPart {
return oss.UploadPart{
PartNumber: int32(i + 1),
ETag: part.ETag,
}
}),
},
ForbidOverwrite: oss.Ptr(strconv.FormatBool(!overwrite)),
})
if err != nil {
handler.cancelUpload(imur)
handler.cancelUpload(*imur)
}
return err
@ -302,7 +332,12 @@ func (handler *Driver) Delete(ctx context.Context, files ...string) ([]string, e
for index, group := range groups {
handler.l.Debug("Process delete group #%d: %v", index, group)
// 删除文件
delRes, err := handler.bucket.DeleteObjects(group)
delRes, err := handler.client.DeleteMultipleObjects(ctx, &oss.DeleteMultipleObjectsRequest{
Bucket: &handler.policy.BucketName,
Objects: lo.Map(group, func(v string, i int) oss.DeleteObject {
return oss.DeleteObject{Key: &v}
}),
})
if err != nil {
failed = append(failed, group...)
lastError = err
@ -310,7 +345,14 @@ func (handler *Driver) Delete(ctx context.Context, files ...string) ([]string, e
}
// 统计未删除的文件
failed = append(failed, util.SliceDifference(files, delRes.DeletedObjects)...)
failed = append(
failed,
util.SliceDifference(files,
lo.Map(delRes.DeletedObjects, func(v oss.DeletedInfo, i int) string {
return *v.Key
}),
)...,
)
}
if len(failed) > 0 && lastError == nil {
@ -334,12 +376,23 @@ func (handler *Driver) Thumb(ctx context.Context, expire *time.Time, ext string,
w, h := handler.settings.ThumbSize(ctx)
thumbParam := fmt.Sprintf("image/resize,m_lfit,h_%d,w_%d", h, w)
thumbOption := []oss.Option{oss.Process(thumbParam)}
enco := handler.settings.ThumbEncode(ctx)
switch enco.Format {
case "jpg", "webp":
thumbParam += fmt.Sprintf("/format,%s/quality,q_%d", enco.Format, enco.Quality)
case "png":
thumbParam += fmt.Sprintf("/format,%s", enco.Format)
}
req := &oss.GetObjectRequest{
Process: oss.Ptr(thumbParam),
}
thumbURL, err := handler.signSourceURL(
ctx,
e.Source(),
expire,
thumbOption,
req,
false,
)
if err != nil {
@ -361,11 +414,11 @@ func (handler *Driver) Source(ctx context.Context, e fs.Entity, args *driver.Get
}
// 添加各项设置
var signOptions = make([]oss.Option, 0, 2)
req := &oss.GetObjectRequest{}
if args.IsDownload {
encodedFilename := url.PathEscape(args.DisplayName)
signOptions = append(signOptions, oss.ResponseContentDisposition(fmt.Sprintf(`attachment; filename="%s"; filename*=UTF-8''%s`,
encodedFilename, encodedFilename)))
req.ResponseContentDisposition = oss.Ptr(fmt.Sprintf(`attachment; filename="%s"; filename*=UTF-8''%s`,
encodedFilename, encodedFilename))
}
if args.Speed > 0 {
// Byte 转换为 bit
@ -378,25 +431,39 @@ func (handler *Driver) Source(ctx context.Context, e fs.Entity, args *driver.Get
if args.Speed > 838860800 {
args.Speed = 838860800
}
signOptions = append(signOptions, oss.TrafficLimitParam(args.Speed))
req.Parameters = map[string]string{
trafficLimitHeader: strconv.FormatInt(args.Speed, 10),
}
}
return handler.signSourceURL(ctx, e.Source(), args.Expire, signOptions, false)
return handler.signSourceURL(ctx, e.Source(), args.Expire, req, false)
}
func (handler *Driver) signSourceURL(ctx context.Context, path string, expire *time.Time, options []oss.Option, forceSign bool) (string, error) {
ttl := int64(86400 * 365 * 20)
func (handler *Driver) signSourceURL(ctx context.Context, path string, expire *time.Time, req *oss.GetObjectRequest, forceSign bool) (string, error) {
// V4 Sign 最大过期时间为7天
ttl := maxSignTTL
if expire != nil {
ttl = int64(time.Until(*expire).Seconds())
ttl = time.Until(*expire)
if ttl > maxSignTTL {
ttl = maxSignTTL
}
}
signedURL, err := handler.bucket.SignURL(path, oss.HTTPGet, ttl, options...)
if req == nil {
req = &oss.GetObjectRequest{}
}
req.Bucket = &handler.policy.BucketName
req.Key = &path
// signedURL, err := handler.client.Presign(path, oss.HTTPGet, ttl, options...)
result, err := handler.client.Presign(ctx, req, oss.PresignExpires(ttl))
if err != nil {
return "", err
}
// 将最终生成的签名URL域名换成用户自定义的加速域名如果有
finalURL, err := url.Parse(signedURL)
finalURL, err := url.Parse(result.URL)
if err != nil {
return "", err
}
@ -404,10 +471,12 @@ func (handler *Driver) signSourceURL(ctx context.Context, path string, expire *t
// 公有空间替换掉Key及不支持的头
if !handler.policy.IsPrivate && !forceSign {
query := finalURL.Query()
query.Del("OSSAccessKeyId")
query.Del("Signature")
query.Del("x-oss-credential")
query.Del("x-oss-date")
query.Del("x-oss-expires")
query.Del("x-oss-signature")
query.Del("x-oss-signature-version")
query.Del("response-content-disposition")
query.Del("x-oss-traffic-limit")
finalURL.RawQuery = query.Encode()
}
return finalURL.String(), nil
@ -441,38 +510,45 @@ func (handler *Driver) Token(ctx context.Context, uploadSession *fs.UploadSessio
mimeType := file.Props.MimeType
if mimeType == "" {
handler.mime.TypeByName(file.Props.Uri.Name())
mimeType = handler.mime.TypeByName(file.Props.Uri.Name())
}
// 初始化分片上传
options := []oss.Option{
oss.WithContext(ctx),
oss.Expires(uploadSession.Props.ExpireAt),
oss.ForbidOverWrite(true),
oss.ContentType(mimeType),
}
imur, err := handler.bucket.InitiateMultipartUpload(file.Props.SavePath, options...)
imur, err := handler.client.InitiateMultipartUpload(ctx, &oss.InitiateMultipartUploadRequest{
Bucket: &handler.policy.BucketName,
Key: &file.Props.SavePath,
ContentType: oss.Ptr(mimeType),
ForbidOverwrite: oss.Ptr(strconv.FormatBool(true)),
Expires: oss.Ptr(uploadSession.Props.ExpireAt.Format(time.RFC3339)),
})
if err != nil {
return nil, fmt.Errorf("failed to initialize multipart upload: %w", err)
}
uploadSession.UploadID = imur.UploadID
uploadSession.UploadID = *imur.UploadId
// 为每个分片签名上传 URL
chunks := chunk.NewChunkGroup(file, handler.chunkSize, &backoff.ConstantBackoff{}, false, handler.l, "")
urls := make([]string, chunks.Num())
ttl := int64(time.Until(uploadSession.Props.ExpireAt).Seconds())
ttl := time.Until(uploadSession.Props.ExpireAt)
for chunks.Next() {
err := chunks.Process(func(c *chunk.ChunkGroup, chunk io.Reader) error {
signedURL, err := handler.bucket.SignURL(file.Props.SavePath, oss.HTTPPut,
ttl,
oss.AddParam(partNumberParam, strconv.Itoa(c.Index()+1)),
oss.AddParam(uploadIdParam, imur.UploadID),
oss.ContentType("application/octet-stream"))
signedURL, err := handler.client.Presign(ctx, &oss.UploadPartRequest{
Bucket: &handler.policy.BucketName,
Key: &file.Props.SavePath,
UploadId: imur.UploadId,
PartNumber: int32(c.Index() + 1),
Body: chunk,
RequestCommon: oss.RequestCommon{
Headers: map[string]string{
"Content-Type": "application/octet-stream",
},
},
}, oss.PresignExpires(ttl))
if err != nil {
return err
}
urls[c.Index()] = signedURL
urls[c.Index()] = signedURL.URL
return nil
})
if err != nil {
@ -481,29 +557,43 @@ func (handler *Driver) Token(ctx context.Context, uploadSession *fs.UploadSessio
}
// 签名完成分片上传的URL
completeURL, err := handler.bucket.SignURL(file.Props.SavePath, oss.HTTPPost, ttl,
oss.ContentType("application/octet-stream"),
oss.AddParam(uploadIdParam, imur.UploadID),
oss.Expires(time.Now().Add(time.Duration(ttl)*time.Second)),
oss.SetHeader(completeAllHeader, "yes"),
oss.ForbidOverWrite(true),
oss.AddParam(callbackParam, callbackPolicyEncoded))
completeURL, err := handler.client.Presign(ctx, &oss.CompleteMultipartUploadRequest{
Bucket: &handler.policy.BucketName,
Key: &file.Props.SavePath,
UploadId: imur.UploadId,
RequestCommon: oss.RequestCommon{
Parameters: map[string]string{
"callback": callbackPolicyEncoded,
},
Headers: map[string]string{
"Content-Type": "application/octet-stream",
completeAllHeader: "yes",
forbidOverwriteHeader: "true",
},
},
}, oss.PresignExpires(ttl))
if err != nil {
return nil, err
}
return &fs.UploadCredential{
UploadID: imur.UploadID,
UploadID: *imur.UploadId,
UploadURLs: urls,
CompleteURL: completeURL,
CompleteURL: completeURL.URL,
SessionID: uploadSession.Props.UploadSessionID,
ChunkSize: handler.chunkSize,
Callback: callbackPolicyEncoded,
}, nil
}
// 取消上传凭证
func (handler *Driver) CancelToken(ctx context.Context, uploadSession *fs.UploadSession) error {
return handler.bucket.AbortMultipartUpload(oss.InitiateMultipartUploadResult{UploadID: uploadSession.UploadID, Key: uploadSession.Props.SavePath}, oss.WithContext(ctx))
_, err := handler.client.AbortMultipartUpload(ctx, &oss.AbortMultipartUploadRequest{
Bucket: &handler.policy.BucketName,
Key: &uploadSession.Props.SavePath,
UploadId: &uploadSession.UploadID,
})
return err
}
func (handler *Driver) CompleteUpload(ctx context.Context, session *fs.UploadSession) error {
@ -526,7 +616,7 @@ func (handler *Driver) Capabilities() *driver.Capabilities {
}
}
func (handler *Driver) MediaMeta(ctx context.Context, path, ext string) ([]driver.MediaMeta, error) {
func (handler *Driver) MediaMeta(ctx context.Context, path, ext, language string) ([]driver.MediaMeta, error) {
if util.ContainsString(supportedImageExt, ext) {
return handler.extractImageMeta(ctx, path)
}
@ -547,7 +637,11 @@ func (handler *Driver) LocalPath(ctx context.Context, path string) string {
}
func (handler *Driver) cancelUpload(imur oss.InitiateMultipartUploadResult) {
if err := handler.bucket.AbortMultipartUpload(imur); err != nil {
if _, err := handler.client.AbortMultipartUpload(context.Background(), &oss.AbortMultipartUploadRequest{
Bucket: &handler.policy.BucketName,
Key: imur.Key,
UploadId: imur.UploadId,
}); err != nil {
handler.l.Warning("failed to abort multipart upload: %s", err)
}
}

View File

@ -67,7 +67,10 @@ func New(ctx context.Context, policy *ent.StoragePolicy, settings setting.Provid
}
mac := qbox.NewMac(policy.AccessKey, policy.SecretKey)
cfg := &storage.Config{UseHTTPS: true}
cfg := &storage.Config{
UseHTTPS: true,
UseCdnDomains: policy.Settings.QiniuUploadCdn,
}
driver := &Driver{
policy: policy,
@ -220,7 +223,7 @@ func (handler *Driver) Put(ctx context.Context, file *fs.UploadRequest) error {
mimeType := file.Props.MimeType
if mimeType == "" {
handler.mime.TypeByName(file.Props.Uri.Name())
mimeType = handler.mime.TypeByName(file.Props.Uri.Name())
}
err = resumeUploader.CompleteParts(ctx, upToken, upHost, nil, handler.policy.BucketName,
@ -274,10 +277,20 @@ func (handler *Driver) Delete(ctx context.Context, files ...string) ([]string, e
// Thumb 获取文件缩略图
func (handler *Driver) Thumb(ctx context.Context, expire *time.Time, ext string, e fs.Entity) (string, error) {
w, h := handler.settings.ThumbSize(ctx)
thumbParam := fmt.Sprintf("imageView2/1/w/%d/h/%d", w, h)
enco := handler.settings.ThumbEncode(ctx)
switch enco.Format {
case "jpg", "webp":
thumbParam += fmt.Sprintf("/format/%s/q/%d", enco.Format, enco.Quality)
case "png":
thumbParam += fmt.Sprintf("/format/%s", enco.Format)
}
return handler.signSourceURL(
e.Source(),
url.Values{
fmt.Sprintf("imageView2/1/w/%d/h/%d", w, h): []string{},
thumbParam: []string{},
},
expire,
), nil
@ -376,7 +389,7 @@ func (handler *Driver) Token(ctx context.Context, uploadSession *fs.UploadSessio
mimeType := file.Props.MimeType
if mimeType == "" {
handler.mime.TypeByName(file.Props.Uri.Name())
mimeType = handler.mime.TypeByName(file.Props.Uri.Name())
}
uploadSession.UploadID = ret.UploadID
@ -420,7 +433,7 @@ func (handler *Driver) Capabilities() *driver.Capabilities {
}
}
func (handler *Driver) MediaMeta(ctx context.Context, path, ext string) ([]driver.MediaMeta, error) {
func (handler *Driver) MediaMeta(ctx context.Context, path, ext, language string) ([]driver.MediaMeta, error) {
if util.ContainsString(supportedImageExt, ext) {
return handler.extractImageMeta(ctx, path)
}

View File

@ -43,7 +43,7 @@ type Client interface {
// DeleteUploadSession deletes remote upload session
DeleteUploadSession(ctx context.Context, sessionID string) error
// MediaMeta gets media meta from remote server
MediaMeta(ctx context.Context, src, ext string) ([]driver.MediaMeta, error)
MediaMeta(ctx context.Context, src, ext, language string) ([]driver.MediaMeta, error)
// DeleteFiles deletes files from remote server
DeleteFiles(ctx context.Context, files ...string) ([]string, error)
// List lists files from remote server
@ -183,10 +183,10 @@ func (c *remoteClient) DeleteFiles(ctx context.Context, files ...string) ([]stri
return nil, nil
}
func (c *remoteClient) MediaMeta(ctx context.Context, src, ext string) ([]driver.MediaMeta, error) {
func (c *remoteClient) MediaMeta(ctx context.Context, src, ext, language string) ([]driver.MediaMeta, error) {
resp, err := c.httpClient.Request(
http.MethodGet,
routes.SlaveMediaMetaRoute(src, ext),
routes.SlaveMediaMetaRoute(src, ext, language),
nil,
request.WithContext(ctx),
request.WithLogger(c.l),

View File

@ -179,6 +179,6 @@ func (handler *Driver) Capabilities() *driver.Capabilities {
}
}
func (handler *Driver) MediaMeta(ctx context.Context, path, ext string) ([]driver.MediaMeta, error) {
return handler.uploadClient.MediaMeta(ctx, path, ext)
func (handler *Driver) MediaMeta(ctx context.Context, path, ext, language string) ([]driver.MediaMeta, error) {
return handler.uploadClient.MediaMeta(ctx, path, ext, language)
}

View File

@ -207,7 +207,7 @@ func (handler *Driver) Put(ctx context.Context, file *fs.UploadRequest) error {
mimeType := file.Props.MimeType
if mimeType == "" {
handler.mime.TypeByName(file.Props.Uri.Name())
mimeType = handler.mime.TypeByName(file.Props.Uri.Name())
}
_, err := uploader.UploadWithContext(ctx, &s3manager.UploadInput{
@ -344,7 +344,7 @@ func (handler *Driver) Token(ctx context.Context, uploadSession *fs.UploadSessio
mimeType := file.Props.MimeType
if mimeType == "" {
handler.mime.TypeByName(file.Props.Uri.Name())
mimeType = handler.mime.TypeByName(file.Props.Uri.Name())
}
// 创建分片上传
@ -482,7 +482,7 @@ func (handler *Driver) Capabilities() *driver.Capabilities {
}
}
func (handler *Driver) MediaMeta(ctx context.Context, path, ext string) ([]driver.MediaMeta, error) {
func (handler *Driver) MediaMeta(ctx context.Context, path, ext, language string) ([]driver.MediaMeta, error) {
return nil, errors.New("not implemented")
}

View File

@ -161,7 +161,7 @@ func (handler *Driver) Put(ctx context.Context, file *fs.UploadRequest) error {
mimeType := file.Props.MimeType
if mimeType == "" {
handler.mime.TypeByName(file.Props.Uri.Name())
mimeType = handler.mime.TypeByName(file.Props.Uri.Name())
}
err := handler.up.Put(&upyun.PutObjectConfig{
@ -203,8 +203,16 @@ func (handler *Driver) Delete(ctx context.Context, files ...string) ([]string, e
// Thumb 获取文件缩略图
func (handler *Driver) Thumb(ctx context.Context, expire *time.Time, ext string, e fs.Entity) (string, error) {
w, h := handler.settings.ThumbSize(ctx)
thumbParam := fmt.Sprintf("!/fwfh/%dx%d", w, h)
enco := handler.settings.ThumbEncode(ctx)
switch enco.Format {
case "jpg", "webp":
thumbParam += fmt.Sprintf("/format/%s/quality/%d", enco.Format, enco.Quality)
case "png":
thumbParam += fmt.Sprintf("/format/%s", enco.Format)
}
thumbURL, err := handler.signURL(ctx, e.Source()+thumbParam, nil, expire)
if err != nil {
return "", err
@ -301,7 +309,7 @@ func (handler *Driver) Token(ctx context.Context, uploadSession *fs.UploadSessio
mimeType := file.Props.MimeType
if mimeType == "" {
handler.mime.TypeByName(file.Props.Uri.Name())
mimeType = handler.mime.TypeByName(file.Props.Uri.Name())
}
return &fs.UploadCredential{
@ -337,7 +345,7 @@ func (handler *Driver) Capabilities() *driver.Capabilities {
}
}
func (handler *Driver) MediaMeta(ctx context.Context, path, ext string) ([]driver.MediaMeta, error) {
func (handler *Driver) MediaMeta(ctx context.Context, path, ext, language string) ([]driver.MediaMeta, error) {
return handler.extractImageMeta(ctx, path)
}

View File

@ -0,0 +1,360 @@
// Package encrypt provides AES-256-CTR encryption and decryption functionality
// compatible with the JavaScript EncryptedBlob implementation.
//
// # Usage Example
//
// Basic usage with encrypted metadata:
//
// // Create AES256CTR instance
// aes := NewAES256CTR(masterKeyVault)
//
// // Load encrypted metadata (key is encrypted with master key)
// err := aes.LoadMetadata(ctx, encryptedMetadata, masterKeyVault)
// if err != nil {
// return err
// }
//
// // Set encrypted source stream
// err = aes.SetSource(encryptedStream, 0)
// if err != nil {
// return err
// }
//
// // Read decrypted data
// decryptedData, err := io.ReadAll(aes)
// if err != nil {
// return err
// }
// aes.Close()
//
// Usage with plain metadata (already decrypted):
//
// aes := NewAES256CTR(masterKeyVault)
// err := aes.LoadPlainMetadata(plainMetadata)
// err = aes.SetSource(encryptedStream, 0)
// // Read decrypted data...
//
// Usage with counter offset (for chunked/sliced streams):
//
// // If reading from byte offset 1048576 (1MB) of the encrypted file
// aes := NewAES256CTR(masterKeyVault)
// err := aes.LoadPlainMetadata(metadata)
// err = aes.SetSource(encryptedStreamStartingAt1MB, 1048576)
// // This ensures proper counter alignment for correct decryption
//
// Using the Seeker interface (requires seekable source):
//
// aes := NewAES256CTR(masterKeyVault)
// err := aes.LoadPlainMetadata(metadata)
// err = aes.SetSource(seekableEncryptedStream, 0)
// aes.SetSize(totalFileSize) // Required for io.SeekEnd
//
// // Seek to position 1048576
// newPos, err := aes.Seek(1048576, io.SeekStart)
// // Read from that position...
//
// // Seek relative to current position
// newPos, err = aes.Seek(100, io.SeekCurrent)
//
// // Seek from end (requires SetSize to be called first)
// newPos, err = aes.Seek(-1024, io.SeekEnd)
//
// Using the factory pattern:
//
// factory := NewDecrypterFactory(masterKeyVault)
// decrypter, err := factory(types.CipherAES256CTR)
// if err != nil {
// return err
// }
// err = decrypter.LoadMetadata(ctx, encryptedMetadata, masterKeyVault)
// err = decrypter.SetSource(encryptedStream, 0)
// defer decrypter.Close()
package encrypt
import (
"context"
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"fmt"
"io"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
)
// AES256CTR provides both encryption and decryption for AES-256-CTR.
// It implements both Cryptor and Decrypter interfaces.
type AES256CTR struct {
masterKeyVault MasterEncryptKeyVault
// Decryption fields
src io.ReadCloser // Source encrypted stream
seeker io.Seeker // Seeker for the source stream
stream cipher.Stream // AES-CTR cipher stream
metadata *types.EncryptMetadata
counterOffset int64 // Byte offset for sliced streams
pos int64 // Current read position relative to counterOffset
size int64 // Total size of encrypted data (for SeekEnd support, -1 if unknown)
eof bool // EOF flag
}
func NewAES256CTR(masterKeyVault MasterEncryptKeyVault) *AES256CTR {
return &AES256CTR{
masterKeyVault: masterKeyVault,
size: -1, // Unknown by default
}
}
func (e *AES256CTR) GenerateMetadata(ctx context.Context) (*types.EncryptMetadata, error) {
// Generate random 32-byte key for AES-256
key := make([]byte, 32)
if _, err := io.ReadFull(rand.Reader, key); err != nil {
return nil, err
}
// Generate random 16-byte IV for CTR mode
iv := make([]byte, 16)
if _, err := io.ReadFull(rand.Reader, iv); err != nil {
return nil, err
}
// Get master key from vault
masterKey, err := e.masterKeyVault.GetMasterKey(ctx)
if err != nil {
return nil, err
}
// Encrypt the key with master key
encryptedKey, err := EncryptWithMasterKey(masterKey, key)
if err != nil {
return nil, err
}
return &types.EncryptMetadata{
Algorithm: types.CipherAES256CTR,
Key: encryptedKey,
KeyPlainText: key,
IV: iv,
}, nil
}
// LoadMetadata loads and decrypts the encryption metadata using the master key.
func (e *AES256CTR) LoadMetadata(ctx context.Context, encryptedMetadata *types.EncryptMetadata) error {
if encryptedMetadata == nil {
return fmt.Errorf("encryption metadata is nil")
}
if encryptedMetadata.Algorithm != types.CipherAES256CTR {
return fmt.Errorf("unsupported algorithm: %s", encryptedMetadata.Algorithm)
}
if len(encryptedMetadata.KeyPlainText) > 0 {
e.metadata = encryptedMetadata
return nil
}
// Decrypt the encryption key
decryptedKey, err := DecriptKey(ctx, e.masterKeyVault, encryptedMetadata.Key)
if err != nil {
return fmt.Errorf("failed to decrypt encryption key: %w", err)
}
// Store decrypted metadata
e.metadata = &types.EncryptMetadata{
Algorithm: encryptedMetadata.Algorithm,
KeyPlainText: decryptedKey,
IV: encryptedMetadata.IV,
}
return nil
}
// SetSource sets the encrypted data source and initializes the cipher stream.
// The counterOffset parameter allows for proper decryption of sliced streams,
// where the stream doesn't start at byte 0 of the original file.
//
// For non-block-aligned offsets (offset % 16 != 0), this method advances the
// cipher stream to the correct position within the block to ensure proper decryption.
func (e *AES256CTR) SetSource(src io.ReadCloser, seeker io.Seeker, size, counterOffset int64) error {
if e.metadata == nil {
return fmt.Errorf("metadata not loaded, call LoadMetadata first")
}
e.src = src
e.seeker = seeker
e.counterOffset = counterOffset
e.pos = 0 // Reset position to start
e.eof = false // Reset EOF flag
e.size = size
// Initialize cipher stream at counterOffset position
return e.initCipherStream(counterOffset)
}
// Read implements io.Reader interface to read decrypted data.
// It reads encrypted data from the source and decrypts it on-the-fly.
func (e *AES256CTR) Read(p []byte) (int, error) {
if e.src == nil {
return 0, fmt.Errorf("source not set, call SetSource first")
}
if e.eof {
return 0, io.EOF
}
// Read encrypted data from source
n, err := e.src.Read(p)
if err != nil {
if err == io.EOF {
e.eof = true
if n == 0 {
return 0, io.EOF
}
} else {
return n, err
}
}
// Decrypt data in place
if n > 0 {
e.stream.XORKeyStream(p[:n], p[:n])
e.pos += int64(n) // Update current position
}
return n, err
}
// Close implements io.Closer interface.
func (e *AES256CTR) Close() error {
if e.src != nil {
return e.src.Close()
}
return nil
}
// Seek implements io.Seeker interface for seeking within the encrypted stream.
// It properly adjusts the AES-CTR counter based on the seek position.
//
// Parameters:
// - offset: byte offset relative to whence
// - whence: io.SeekStart, io.SeekCurrent, or io.SeekEnd
//
// Returns the new absolute position (relative to counterOffset start).
//
// Note: For io.SeekEnd to work, you must call SetSize() first, otherwise it returns an error.
// Also note that seeking requires the underlying source to support seeking (io.Seeker).
func (e *AES256CTR) Seek(offset int64, whence int) (int64, error) {
if e.metadata == nil {
return 0, fmt.Errorf("metadata not loaded, call LoadMetadata first")
}
if e.src == nil {
return 0, fmt.Errorf("source not set, call SetSource first")
}
// Check if source supports seeking
if e.seeker == nil {
return 0, fmt.Errorf("source does not support seeking")
}
// Calculate new absolute position
var newPos int64
switch whence {
case io.SeekStart:
newPos = offset
case io.SeekCurrent:
newPos = e.pos + offset
case io.SeekEnd:
if e.size < 0 {
return 0, fmt.Errorf("size unknown, call SetSize before using SeekEnd")
}
newPos = e.size + offset
default:
return 0, fmt.Errorf("invalid whence: %d", whence)
}
// Validate new position
if newPos < 0 {
return 0, fmt.Errorf("negative position: %d", newPos)
}
// Seek in the underlying source stream
// The absolute position in the source is counterOffset + newPos
absPos := e.counterOffset + newPos
_, err := e.seeker.Seek(absPos, io.SeekStart)
if err != nil {
return 0, fmt.Errorf("failed to seek source: %w", err)
}
// Reinitialize cipher stream with new counter position
if err := e.initCipherStream(absPos); err != nil {
return 0, fmt.Errorf("failed to reinitialize cipher stream: %w", err)
}
// Update position and reset EOF flag
e.pos = newPos
e.eof = false
return newPos, nil
}
// initCipherStream initializes the cipher stream with proper counter alignment
// for the given absolute byte position.
func (e *AES256CTR) initCipherStream(absolutePosition int64) error {
// Create AES cipher block
block, err := aes.NewCipher(e.metadata.KeyPlainText)
if err != nil {
return fmt.Errorf("failed to create AES cipher: %w", err)
}
// Create counter value (16 bytes IV) and apply offset for position
counter := make([]byte, 16)
copy(counter, e.metadata.IV)
// Apply counter offset based on byte position (each block is 16 bytes)
if absolutePosition > 0 {
blockOffset := absolutePosition / 16
incrementCounter(counter, blockOffset)
}
// Create CTR cipher stream
e.stream = cipher.NewCTR(block, counter)
// For non-block-aligned offsets, we need to advance the stream position
// within the current block to match the offset
offsetInBlock := absolutePosition % 16
if offsetInBlock > 0 {
// Create a dummy buffer to advance the stream
dummy := make([]byte, offsetInBlock)
e.stream.XORKeyStream(dummy, dummy)
}
return nil
}
// incrementCounter increments a counter ([]byte) by a given number of blocks.
// This matches the JavaScript implementation's incrementCounter function.
// The counter is treated as a big-endian 128-bit integer.
func incrementCounter(counter []byte, blocks int64) {
// Convert blocks to add into bytes (big-endian)
// We only need to handle the lower 64 bits since blocks is int64
for i := 15; i >= 0 && blocks > 0; i-- {
// Add the lowest byte of blocks to current counter byte
sum := uint64(counter[i]) + uint64(blocks&0xff)
counter[i] = byte(sum & 0xff)
// Shift blocks right by 8 bits for next iteration
blocks = blocks >> 8
// Add carry from this position to the next
if sum > 0xff {
carry := sum >> 8
// Propagate carry to higher bytes
for j := i - 1; j >= 0 && carry > 0; j-- {
sum = uint64(counter[j]) + carry
counter[j] = byte(sum & 0xff)
carry = sum >> 8
}
}
}
}

View File

@ -0,0 +1,97 @@
package encrypt
import (
"context"
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"fmt"
"io"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
)
type (
Cryptor interface {
io.ReadCloser
io.Seeker
// LoadMetadata loads and decrypts the encryption metadata using the master key
LoadMetadata(ctx context.Context, encryptedMetadata *types.EncryptMetadata) error
// SetSource sets the encrypted data source and initializes the cipher stream
SetSource(src io.ReadCloser, seeker io.Seeker, size, counterOffset int64) error
// GenerateMetadata generates a new encryption metadata
GenerateMetadata(ctx context.Context) (*types.EncryptMetadata, error)
}
CryptorFactory func(algorithm types.Cipher) (Cryptor, error)
)
func NewCryptorFactory(masterKeyVault MasterEncryptKeyVault) CryptorFactory {
return func(algorithm types.Cipher) (Cryptor, error) {
switch algorithm {
case types.CipherAES256CTR:
return NewAES256CTR(masterKeyVault), nil
default:
return nil, fmt.Errorf("unknown algorithm: %s", algorithm)
}
}
}
// EncryptWithMasterKey encrypts data using the master key with AES-256-CTR
// Returns: [16-byte IV] + [encrypted data]
func EncryptWithMasterKey(masterKey, data []byte) ([]byte, error) {
// Create AES cipher with master key
block, err := aes.NewCipher(masterKey)
if err != nil {
return nil, err
}
// Generate random IV for encryption
iv := make([]byte, 16)
if _, err := io.ReadFull(rand.Reader, iv); err != nil {
return nil, err
}
// Encrypt data
stream := cipher.NewCTR(block, iv)
encrypted := make([]byte, len(data))
stream.XORKeyStream(encrypted, data)
// Return IV + encrypted data
result := append(iv, encrypted...)
return result, nil
}
func DecriptKey(ctx context.Context, keyVault MasterEncryptKeyVault, encryptedKey []byte) ([]byte, error) {
masterKey, err := keyVault.GetMasterKey(ctx)
if err != nil {
return nil, fmt.Errorf("failed to get master key: %w", err)
}
return DecryptWithMasterKey(masterKey, encryptedKey)
}
// DecryptWithMasterKey decrypts data using the master key with AES-256-CTR
// Input format: [16-byte IV] + [encrypted data]
func DecryptWithMasterKey(masterKey, encryptedData []byte) ([]byte, error) {
// Validate input length
if len(encryptedData) < 16 {
return nil, aes.KeySizeError(len(encryptedData))
}
// Extract IV and encrypted data
iv := encryptedData[:16]
encrypted := encryptedData[16:]
// Create AES cipher with master key
block, err := aes.NewCipher(masterKey)
if err != nil {
return nil, err
}
// Decrypt data
stream := cipher.NewCTR(block, iv)
decrypted := make([]byte, len(encrypted))
stream.XORKeyStream(decrypted, encrypted)
return decrypted, nil
}

View File

@ -0,0 +1,105 @@
package encrypt
import (
"context"
"encoding/base64"
"errors"
"fmt"
"os"
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
)
const (
EnvMasterEncryptKey = "CR_ENCRYPT_MASTER_KEY"
)
// MasterEncryptKeyVault is a vault for the master encrypt key.
type MasterEncryptKeyVault interface {
GetMasterKey(ctx context.Context) ([]byte, error)
}
func NewMasterEncryptKeyVault(ctx context.Context, settings setting.Provider) MasterEncryptKeyVault {
vaultType := settings.MasterEncryptKeyVault(ctx)
switch vaultType {
case setting.MasterEncryptKeyVaultTypeEnv:
return NewEnvMasterEncryptKeyVault()
case setting.MasterEncryptKeyVaultTypeFile:
return NewFileMasterEncryptKeyVault(settings.MasterEncryptKeyFile(ctx))
default:
return NewSettingMasterEncryptKeyVault(settings)
}
}
// settingMasterEncryptKeyVault is a vault for the master encrypt key that gets the key from the setting KV.
type settingMasterEncryptKeyVault struct {
setting setting.Provider
}
func NewSettingMasterEncryptKeyVault(setting setting.Provider) MasterEncryptKeyVault {
return &settingMasterEncryptKeyVault{setting: setting}
}
func (v *settingMasterEncryptKeyVault) GetMasterKey(ctx context.Context) ([]byte, error) {
key := v.setting.MasterEncryptKey(ctx)
if key == nil {
return nil, errors.New("master encrypt key is not set")
}
return key, nil
}
func NewEnvMasterEncryptKeyVault() MasterEncryptKeyVault {
return &envMasterEncryptKeyVault{}
}
type envMasterEncryptKeyVault struct {
}
var envMasterKeyCache = []byte{}
func (v *envMasterEncryptKeyVault) GetMasterKey(ctx context.Context) ([]byte, error) {
if len(envMasterKeyCache) > 0 {
return envMasterKeyCache, nil
}
key := os.Getenv(EnvMasterEncryptKey)
if key == "" {
return nil, errors.New("master encrypt key is not set")
}
decodedKey, err := base64.StdEncoding.DecodeString(key)
if err != nil {
return nil, fmt.Errorf("failed to decode master encrypt key: %w", err)
}
envMasterKeyCache = decodedKey
return decodedKey, nil
}
func NewFileMasterEncryptKeyVault(path string) MasterEncryptKeyVault {
return &fileMasterEncryptKeyVault{path: path}
}
var fileMasterKeyCache = []byte{}
type fileMasterEncryptKeyVault struct {
path string
}
func (v *fileMasterEncryptKeyVault) GetMasterKey(ctx context.Context) ([]byte, error) {
if len(fileMasterKeyCache) > 0 {
return fileMasterKeyCache, nil
}
key, err := os.ReadFile(v.path)
if err != nil {
return nil, fmt.Errorf("invalid master encrypt key file")
}
decodedKey, err := base64.StdEncoding.DecodeString(string(key))
if err != nil {
return nil, fmt.Errorf("invalid master encrypt key")
}
fileMasterKeyCache = decodedKey
return fileMasterKeyCache, nil
}

View File

@ -0,0 +1,193 @@
package eventhub
import "errors"
type (
Event struct {
Type EventType `json:"type"`
FileID string `json:"file_id"`
From string `json:"from"`
To string `json:"to"`
}
EventType string
)
const (
EventTypeCreate = "create"
EventTypeModify = "modify"
EventTypeRename = "rename"
EventTypeDelete = "delete"
)
var (
// ErrEventHubClosed is returned when operations are attempted on a closed EventHub.
ErrEventHubClosed = errors.New("event hub is closed")
)
// eventState tracks the accumulated state for each file
type eventState struct {
baseType EventType // The base event type (Create, Delete, or first event type)
originalSrc string // Original source path (for Create or first Rename)
currentDst string // Current destination path
}
/*
Modify + Modify keep only the last Modify;
Create + Modify fold into a single Create with final metadata/content.
Create + Rename(ab) Create at b.
Create + Delete drop both (ephemeral object never needs to reach clients).
Modify + Delete Delete (intermediate Modify is irrelevant to final state).
Rename(ab) + Rename(bc) Rename(ac).
Rename(ab) + Modify emit Rename(ab) then a single Modify at b (or fold Modify into Create if the chain starts with Create).
Rename(ab) + Delete emit only Delete(object_id);
Rename(ab) + Rename(ba) with no intervening Modify drop both (rename there-and-back is a no-op).
Delete + Create might be a valid case, e.g. user restore same file from trash bin.
*/
// DebounceEvents takes time-ordered events and returns debounced/merged events.
func DebounceEvents(in []*Event) []*Event {
if len(in) == 0 {
return nil
}
states := make(map[string]*eventState) // keyed by FileID
order := make([]string, 0) // to preserve order of first appearance
for _, e := range in {
state, exists := states[e.FileID]
if !exists {
// First event for this file
order = append(order, e.FileID)
states[e.FileID] = &eventState{
baseType: e.Type,
originalSrc: e.From,
currentDst: e.To,
}
continue
}
switch e.Type {
case EventTypeCreate:
// Delete + Create → keep as Create (e.g. restore from trash)
if state.baseType == EventTypeDelete {
state.baseType = EventTypeCreate
state.originalSrc = e.From
state.currentDst = ""
}
case EventTypeModify:
switch state.baseType {
case EventTypeCreate:
// Create + Modify → fold into Create (no change needed, Create already implies content)
case EventTypeModify:
// Modify + Modify → keep only last Modify (state already correct)
case EventTypeRename:
// Rename + Modify → fold into first Rename
case EventTypeDelete:
// Delete + Modify → should not happen, but ignore Modify
}
case EventTypeRename:
switch state.baseType {
case EventTypeCreate:
// Create + Rename(a→b) → Create at b
state.originalSrc = e.To
state.currentDst = ""
case EventTypeModify:
// Modify + Rename → emit Rename only
state.baseType = EventTypeRename
state.currentDst = e.To
state.originalSrc = e.From
case EventTypeRename:
// Rename(a→b) + Rename(b→c) → Rename(a→c)
// Check for no-op: Rename(a→b) + Rename(b→a) → drop both
if state.originalSrc == e.To {
// Rename there-and-back, drop both
delete(states, e.FileID)
// Remove from order
for i, id := range order {
if id == e.FileID {
order = append(order[:i], order[i+1:]...)
break
}
}
} else {
state.currentDst = e.To
}
case EventTypeDelete:
// Delete + Rename → should not happen, ignore
}
case EventTypeDelete:
switch state.baseType {
case EventTypeCreate:
// Create + Delete → drop both (ephemeral object)
delete(states, e.FileID)
// Remove from order
for i, id := range order {
if id == e.FileID {
order = append(order[:i], order[i+1:]...)
break
}
}
case EventTypeModify:
// Modify + Delete → Delete
state.baseType = EventTypeDelete
state.originalSrc = e.From
state.currentDst = ""
case EventTypeRename:
// Rename + Delete → Delete only
state.baseType = EventTypeDelete
state.originalSrc = e.From
state.currentDst = ""
case EventTypeDelete:
// Delete + Delete → keep Delete (should not happen normally)
}
}
}
// Build output events in order
result := make([]*Event, 0, len(order))
for _, fileID := range order {
state, exists := states[fileID]
if !exists {
continue
}
switch state.baseType {
case EventTypeCreate:
result = append(result, &Event{
Type: EventTypeCreate,
FileID: fileID,
From: state.originalSrc,
})
case EventTypeModify:
result = append(result, &Event{
Type: EventTypeModify,
FileID: fileID,
From: state.originalSrc,
})
case EventTypeRename:
// If hasModify and base was originally Modify (converted to Rename),
// we need to emit Modify first at original location
// But in our current logic, Modify+Rename sets hasModify=true
// We emit Rename, then Modify if needed
result = append(result, &Event{
Type: EventTypeRename,
FileID: fileID,
From: state.originalSrc,
To: state.currentDst,
})
case EventTypeDelete:
result = append(result, &Event{
Type: EventTypeDelete,
FileID: fileID,
From: state.originalSrc,
})
}
}
return result
}

View File

@ -0,0 +1,199 @@
package eventhub
import (
"context"
"sync"
"time"
"github.com/cloudreve/Cloudreve/v4/inventory"
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
)
type (
EventHub interface {
// Subscribe to a topic and return a channel to receive events.
// If a subscriber with the same ID already exists and is offline,
// it will be reactivated and any buffered events will be flushed.
Subscribe(ctx context.Context, topic int, id string) (chan *Event, bool, error)
// Unsubscribe marks the subscriber as offline instead of removing it.
// Buffered events will be kept for when the subscriber reconnects.
// Subscribers that remain offline for more than 14 days will be permanently removed.
Unsubscribe(ctx context.Context, topic int, id string)
// Get subscribers of a topic.
GetSubscribers(ctx context.Context, topic int) []Subscriber
// Close shuts down the event hub and disconnects all subscribers.
Close()
}
)
const (
bufSize = 16
cleanupPeriod = 1 * time.Hour
)
type eventHub struct {
mu sync.RWMutex
topics map[int]map[string]*subscriber
userClient inventory.UserClient
fsEventClient inventory.FsEventClient
closed bool
closeCh chan struct{}
wg sync.WaitGroup
}
func NewEventHub(userClient inventory.UserClient, fsEventClient inventory.FsEventClient) EventHub {
e := &eventHub{
topics: make(map[int]map[string]*subscriber),
userClient: userClient,
fsEventClient: fsEventClient,
closeCh: make(chan struct{}),
}
// Remove all existing FsEvents
fsEventClient.DeleteAll(context.Background())
// Start background cleanup goroutine
e.wg.Add(1)
go e.cleanupLoop()
return e
}
// cleanupLoop periodically removes subscribers that have been offline for too long.
func (e *eventHub) cleanupLoop() {
defer e.wg.Done()
ticker := time.NewTicker(cleanupPeriod)
defer ticker.Stop()
for {
select {
case <-e.closeCh:
return
case <-ticker.C:
e.cleanupExpiredSubscribers()
}
}
}
// cleanupExpiredSubscribers removes subscribers that have been offline for more than 14 days.
func (e *eventHub) cleanupExpiredSubscribers() {
e.mu.Lock()
defer e.mu.Unlock()
if e.closed {
return
}
for topic, subs := range e.topics {
for id, sub := range subs {
if sub.shouldExpire() {
sub.close()
delete(subs, id)
}
}
if len(subs) == 0 {
delete(e.topics, topic)
}
}
}
func (e *eventHub) GetSubscribers(ctx context.Context, topic int) []Subscriber {
e.mu.RLock()
defer e.mu.RUnlock()
subs := make([]Subscriber, 0, len(e.topics[topic]))
for _, v := range e.topics[topic] {
subs = append(subs, v)
}
return subs
}
func (e *eventHub) Subscribe(ctx context.Context, topic int, id string) (chan *Event, bool, error) {
l := logging.FromContext(ctx)
l.Info("Subscribing to event hub for topic %d with id %s", topic, id)
e.mu.Lock()
defer e.mu.Unlock()
if e.closed {
return nil, false, ErrEventHubClosed
}
subs, ok := e.topics[topic]
if !ok {
subs = make(map[string]*subscriber)
e.topics[topic] = subs
}
// Check if subscriber already exists
if existingSub, ok := subs[id]; ok {
if existingSub.isClosed() {
// Subscriber was closed, create a new one
delete(subs, id)
} else {
// Reactivate the offline subscriber
l.Info("Reactivating offline subscriber %s for topic %d", id, topic)
existingSub.setOnline(ctx)
return existingSub.ch, true, nil
}
}
sub, err := newSubscriber(ctx, id, e.userClient, e.fsEventClient)
if err != nil {
return nil, false, err
}
e.topics[topic][id] = sub
return sub.ch, false, nil
}
func (e *eventHub) Unsubscribe(ctx context.Context, topic int, id string) {
l := logging.FromContext(ctx)
l.Info("Marking subscriber offline for topic %d with id %s", topic, id)
e.mu.Lock()
defer e.mu.Unlock()
if e.closed {
return
}
subs, ok := e.topics[topic]
if !ok {
return
}
if sub, ok := subs[id]; ok {
// Stop debounce timer but keep events in buffer
sub.Stop()
// Mark as offline instead of deleting
sub.setOffline()
}
}
// Close shuts down the event hub and disconnects all subscribers.
func (e *eventHub) Close() {
e.mu.Lock()
if e.closed {
e.mu.Unlock()
return
}
e.closed = true
close(e.closeCh)
// Close all subscribers
for _, subs := range e.topics {
for _, sub := range subs {
sub.close()
}
}
e.topics = nil
e.mu.Unlock()
// Wait for cleanup goroutine to finish
e.wg.Wait()
}

View File

@ -0,0 +1,317 @@
package eventhub
import (
"context"
"encoding/json"
"errors"
"fmt"
"sync"
"time"
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/inventory"
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
"github.com/gofrs/uuid"
"github.com/samber/lo"
)
type Subscriber interface {
ID() string
Ch() chan *Event
Publish(evt Event)
Stop()
Buffer() []*Event
// Owner returns the owner of the subscriber.
Owner() (*ent.User, error)
// Online returns whether the subscriber is online.
Online() bool
// OfflineSince returns when the subscriber went offline.
// Returns zero time if the subscriber is online.
OfflineSince() time.Time
}
const (
debounceDelay = 5 * time.Second
userCacheTTL = 1 * time.Hour
offlineMaxAge = 14 * 24 * time.Hour // 14 days
)
type subscriber struct {
mu sync.Mutex
userClient inventory.UserClient
fsEventClient inventory.FsEventClient
id string
uid int
ch chan *Event
// Online status
online bool
offlineSince time.Time
// Debounce buffer for pending events
buffer []*Event
timer *time.Timer
// Owner info
ownerCached *ent.User
cachedAt time.Time
// Close signal
closed bool
closedCh chan struct{}
}
func newSubscriber(ctx context.Context, id string, userClient inventory.UserClient, fsEventClient inventory.FsEventClient) (*subscriber, error) {
user := inventory.UserFromContext(ctx)
if user == nil || inventory.IsAnonymousUser(user) {
return nil, errors.New("user not found")
}
return &subscriber{
id: id,
ch: make(chan *Event, bufSize),
userClient: userClient,
fsEventClient: fsEventClient,
ownerCached: user,
uid: user.ID,
cachedAt: time.Now(),
online: true,
closedCh: make(chan struct{}),
}, nil
}
func (s *subscriber) ID() string {
return s.id
}
func (s *subscriber) Ch() chan *Event {
return s.ch
}
func (s *subscriber) Online() bool {
s.mu.Lock()
defer s.mu.Unlock()
return s.online
}
func (s *subscriber) OfflineSince() time.Time {
s.mu.Lock()
defer s.mu.Unlock()
return s.offlineSince
}
func (s *subscriber) Owner() (*ent.User, error) {
s.mu.Lock()
defer s.mu.Unlock()
if time.Since(s.cachedAt) > userCacheTTL || s.ownerCached == nil {
user, err := s.userClient.GetLoginUserByID(context.Background(), s.uid)
if err != nil {
return nil, fmt.Errorf("failed to get login user: %w", err)
}
s.ownerCached = user
s.cachedAt = time.Now()
}
return s.ownerCached, nil
}
// Publish adds an event to the buffer and starts/resets the debounce timer.
// Events will be flushed to the channel after the debounce delay.
// If the subscriber is offline, events are kept in the buffer only.
func (s *subscriber) Publish(evt Event) {
s.mu.Lock()
defer s.mu.Unlock()
if s.closed {
return
}
s.publishLocked(evt)
}
// publishLocked adds an event to the buffer and manages the debounce timer.
// Caller must hold s.mu.
func (s *subscriber) publishLocked(evt Event) {
// Add event to buffer
s.buffer = append(s.buffer, &evt)
// Reset or start the debounce timer
if s.timer != nil {
s.timer.Stop()
}
s.timer = time.AfterFunc(debounceDelay, s.flush)
}
// flush sends all buffered events to the channel.
// Called by the debounce timer.
func (s *subscriber) flush() {
s.mu.Lock()
defer s.mu.Unlock()
s.flushLocked(context.Background())
}
// flushLocked sends all buffered events to the channel.
// Caller must hold s.mu.
func (s *subscriber) flushLocked(ctx context.Context) {
if len(s.buffer) == 0 || s.closed {
return
}
if !s.online {
_ = s.fsEventClient.Create(ctx, s.ownerCached.ID, uuid.FromStringOrNil(s.id), lo.Map(s.buffer, func(item *Event, index int) string {
res, _ := json.Marshal(item)
return string(res)
})...)
} else {
// TODO: implement event merging logic here
// For now, send all buffered events individually
debouncedEvents := DebounceEvents(s.buffer)
for _, evt := range debouncedEvents {
select {
case s.ch <- evt:
default:
// Non-blocking send; drop if subscriber is slow
}
}
}
// Clear the buffer
s.buffer = nil
s.timer = nil
}
// Stop cancels any pending debounce timer and flushes remaining events.
// Should be called before closing the subscriber.
func (s *subscriber) Stop() {
s.mu.Lock()
defer s.mu.Unlock()
if s.timer != nil {
s.timer.Stop()
s.timer = nil
}
// Flush any remaining events before stopping
s.flushLocked(context.Background())
}
// setOnline marks the subscriber as online and flushes any buffered events.
func (s *subscriber) setOnline(ctx context.Context) {
l := logging.FromContext(ctx)
s.mu.Lock()
defer s.mu.Unlock()
if s.closed {
return
}
s.online = true
s.ownerCached = nil
s.offlineSince = time.Time{}
// Retrieve events from inventory
events, err := s.fsEventClient.TakeBySubscriber(ctx, uuid.FromStringOrNil(s.id), s.uid)
if err != nil {
l.Error("Failed to get events from inventory: %s", err)
return
}
// Append events to buffer
for _, event := range events {
var eventParsed Event
err := json.Unmarshal([]byte(event.Event), &eventParsed)
if err != nil {
l.Error("Failed to unmarshal event: %s", err)
continue
}
s.buffer = append(s.buffer, &eventParsed)
}
// Flush buffered events if any
if len(s.buffer) > 0 {
if s.timer != nil {
s.timer.Stop()
}
s.timer = time.AfterFunc(debounceDelay, s.flush)
}
}
// setOffline marks the subscriber as offline.
func (s *subscriber) setOffline() {
s.mu.Lock()
defer s.mu.Unlock()
if s.closed {
return
}
s.online = false
s.offlineSince = time.Now()
// Stop the timer, events will be kept in buffer
if s.timer != nil {
s.timer.Stop()
s.timer = nil
}
// flush the buffer
s.flushLocked(context.Background())
}
// close permanently closes the subscriber.
func (s *subscriber) close() {
s.mu.Lock()
defer s.mu.Unlock()
if s.closed {
return
}
s.closed = true
if s.timer != nil {
s.timer.Stop()
s.timer = nil
}
// Delete the FsEvent
s.fsEventClient.DeleteBySubscriber(context.Background(), uuid.FromStringOrNil(s.id))
// Signal close and close the channel
close(s.closedCh)
close(s.ch)
s.buffer = nil
}
// isClosed returns whether the subscriber is closed.
func (s *subscriber) isClosed() bool {
s.mu.Lock()
defer s.mu.Unlock()
return s.closed
}
// shouldExpire returns whether the subscriber should be expired (offline for too long).
func (s *subscriber) shouldExpire() bool {
s.mu.Lock()
defer s.mu.Unlock()
return !s.online && !s.offlineSince.IsZero() && time.Since(s.offlineSince) > offlineMaxAge
}
// Buffer returns a copy of the current buffered events.
// Useful for debugging or implementing custom merging logic.
func (s *subscriber) Buffer() []*Event {
s.mu.Lock()
defer s.mu.Unlock()
if len(s.buffer) == 0 {
return nil
}
// Return a copy to avoid data races
buf := make([]*Event, len(s.buffer))
copy(buf, s.buffer)
return buf
}

View File

@ -4,11 +4,8 @@ import (
"context"
"errors"
"fmt"
"math/rand"
"path"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
@ -17,6 +14,8 @@ import (
"github.com/cloudreve/Cloudreve/v4/inventory"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/cache"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/encrypt"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/eventhub"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/lock"
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
@ -46,7 +45,7 @@ type (
func NewDatabaseFS(u *ent.User, fileClient inventory.FileClient, shareClient inventory.ShareClient,
l logging.Logger, ls lock.LockSystem, settingClient setting.Provider,
storagePolicyClient inventory.StoragePolicyClient, hasher hashid.Encoder, userClient inventory.UserClient,
cache, stateKv cache.Driver, directLinkClient inventory.DirectLinkClient) fs.FileSystem {
cache, stateKv cache.Driver, directLinkClient inventory.DirectLinkClient, encryptorFactory encrypt.CryptorFactory, eventHub eventhub.EventHub) fs.FileSystem {
return &DBFS{
user: u,
navigators: make(map[string]Navigator),
@ -61,6 +60,8 @@ func NewDatabaseFS(u *ent.User, fileClient inventory.FileClient, shareClient inv
cache: cache,
stateKv: stateKv,
directLinkClient: directLinkClient,
encryptorFactory: encryptorFactory,
eventHub: eventHub,
}
}
@ -79,6 +80,8 @@ type DBFS struct {
cache cache.Driver
stateKv cache.Driver
mu sync.Mutex
encryptorFactory encrypt.CryptorFactory
eventHub eventhub.EventHub
}
func (f *DBFS) Recycle() {
@ -122,7 +125,7 @@ func (f *DBFS) List(ctx context.Context, path *fs.URI, opts ...fs.Option) (fs.Fi
parent, err := f.getFileByPath(ctx, navigator, path)
if err != nil {
return nil, nil, fmt.Errorf("Parent not exist: %w", err)
return nil, nil, fmt.Errorf("parent not exist: %w", err)
}
pageSize := 0
@ -286,6 +289,7 @@ func (f *DBFS) CreateEntity(ctx context.Context, file fs.File, policy *ent.Stora
Source: req.Props.SavePath,
Size: req.Props.Size,
UploadSessionID: uuid.FromStringOrNil(o.UploadRequest.Props.UploadSessionID),
EncryptMetadata: o.encryptMetadata,
})
if err != nil {
_ = inventory.Rollback(tx)
@ -616,6 +620,7 @@ func (f *DBFS) createFile(ctx context.Context, parent *File, name string, fileTy
ModifiedAt: o.UploadRequest.Props.LastModified,
UploadSessionID: uuid.FromStringOrNil(o.UploadRequest.Props.UploadSessionID),
Importing: o.UploadRequest.ImportFrom != nil,
EncryptMetadata: o.encryptMetadata,
}
}
@ -641,7 +646,23 @@ func (f *DBFS) createFile(ctx context.Context, parent *File, name string, fileTy
}
file.SetEntities([]*ent.Entity{entity})
return newFile(parent, file), nil
newFile := newFile(parent, file)
f.emitFileCreated(ctx, newFile)
return newFile, nil
}
func (f *DBFS) generateEncryptMetadata(ctx context.Context, uploadRequest *fs.UploadRequest, policy *ent.StoragePolicy) (*types.EncryptMetadata, error) {
relayEnabled := policy.Settings != nil && policy.Settings.Relay
if (len(uploadRequest.Props.EncryptionSupported) > 0 && uploadRequest.Props.EncryptionSupported[0] == types.CipherAES256CTR) || relayEnabled {
encryptor, err := f.encryptorFactory(types.CipherAES256CTR)
if err != nil {
return nil, fmt.Errorf("failed to get encryptor: %w", err)
}
return encryptor.GenerateMetadata(ctx)
}
return nil, nil
}
// getPreferredPolicy tries to get the preferred storage policy for the given file.
@ -651,7 +672,8 @@ func (f *DBFS) getPreferredPolicy(ctx context.Context, file *File) (*ent.Storage
return nil, fmt.Errorf("owner group not loaded")
}
groupPolicy, err := f.storagePolicyClient.GetByGroup(ctx, ownerGroup)
sc, _ := inventory.InheritTx(ctx, f.storagePolicyClient)
groupPolicy, err := sc.GetByGroup(ctx, ownerGroup)
if err != nil {
return nil, serializer.NewError(serializer.CodeDBError, "Failed to get available storage policies", err)
}
@ -765,44 +787,17 @@ func (f *DBFS) navigatorId(path *fs.URI) string {
// generateSavePath generates the physical save path for the upload request.
func generateSavePath(policy *ent.StoragePolicy, req *fs.UploadRequest, user *ent.User) string {
baseTable := map[string]string{
"{randomkey16}": util.RandStringRunes(16),
"{randomkey8}": util.RandStringRunes(8),
"{timestamp}": strconv.FormatInt(time.Now().Unix(), 10),
"{timestamp_nano}": strconv.FormatInt(time.Now().UnixNano(), 10),
"{randomnum2}": strconv.Itoa(rand.Intn(2)),
"{randomnum3}": strconv.Itoa(rand.Intn(3)),
"{randomnum4}": strconv.Itoa(rand.Intn(4)),
"{randomnum8}": strconv.Itoa(rand.Intn(8)),
"{uid}": strconv.Itoa(user.ID),
"{datetime}": time.Now().Format("20060102150405"),
"{date}": time.Now().Format("20060102"),
"{year}": time.Now().Format("2006"),
"{month}": time.Now().Format("01"),
"{day}": time.Now().Format("02"),
"{hour}": time.Now().Format("15"),
"{minute}": time.Now().Format("04"),
"{second}": time.Now().Format("05"),
currentTime := time.Now()
dynamicReplace := func(rule string, pathAvailable bool) string {
return util.ReplaceMagicVar(rule, fs.Separator, pathAvailable, false, currentTime, user.ID, req.Props.Uri.Name(), req.Props.Uri.Dir(), "")
}
dirRule := policy.DirNameRule
dirRule = filepath.ToSlash(dirRule)
dirRule = util.Replace(baseTable, dirRule)
dirRule = util.Replace(map[string]string{
"{path}": req.Props.Uri.Dir() + fs.Separator,
}, dirRule)
originName := req.Props.Uri.Name()
nameTable := map[string]string{
"{originname}": originName,
"{ext}": filepath.Ext(originName),
"{originname_without_ext}": strings.TrimSuffix(originName, filepath.Ext(originName)),
"{uuid}": uuid.Must(uuid.NewV4()).String(),
}
dirRule = dynamicReplace(dirRule, true)
nameRule := policy.FileNameRule
nameRule = util.Replace(baseTable, nameRule)
nameRule = util.Replace(nameTable, nameRule)
nameRule = dynamicReplace(nameRule, false)
return path.Join(path.Clean(dirRule), nameRule)
}

View File

@ -0,0 +1,150 @@
package dbfs
import (
"context"
"path"
"strings"
"github.com/cloudreve/Cloudreve/v4/pkg/auth/requestinfo"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/eventhub"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
"github.com/samber/lo"
)
func (f *DBFS) emitFileCreated(ctx context.Context, file *File) {
subscribers := f.getEligibleSubscriber(ctx, file, true)
for _, subscriber := range subscribers {
subscriber.Publish(eventhub.Event{
Type: eventhub.EventTypeCreate,
FileID: hashid.EncodeFileID(f.hasher, file.Model.ID),
From: subscriber.relativePath(file),
})
}
}
func (f *DBFS) emitFileModified(ctx context.Context, file *File) {
subscribers := f.getEligibleSubscriber(ctx, file, true)
for _, subscriber := range subscribers {
subscriber.Publish(eventhub.Event{
Type: eventhub.EventTypeModify,
FileID: hashid.EncodeFileID(f.hasher, file.Model.ID),
From: subscriber.relativePath(file),
})
}
}
func (f *DBFS) emitFileRenamed(ctx context.Context, file *File, newName string) {
subscribers := f.getEligibleSubscriber(ctx, file, true)
for _, subscriber := range subscribers {
from := subscriber.relativePath(file)
to := strings.TrimSuffix(from, file.Name()) + newName
subscriber.Publish(eventhub.Event{
Type: eventhub.EventTypeRename,
FileID: hashid.EncodeFileID(f.hasher, file.Model.ID),
From: subscriber.relativePath(file),
To: to,
})
}
}
func (f *DBFS) emitFileDeleted(ctx context.Context, files ...*File) {
for _, file := range files {
subscribers := f.getEligibleSubscriber(ctx, file, true)
for _, subscriber := range subscribers {
subscriber.Publish(eventhub.Event{
Type: eventhub.EventTypeDelete,
FileID: hashid.EncodeFileID(f.hasher, file.Model.ID),
From: subscriber.relativePath(file),
})
}
}
}
func (f *DBFS) emitFileMoved(ctx context.Context, src, dst *File) {
srcSubMap := lo.SliceToMap(f.getEligibleSubscriber(ctx, src, true), func(subscriber foundSubscriber) (string, *foundSubscriber) {
return subscriber.ID(), &subscriber
})
dstSubMap := lo.SliceToMap(f.getEligibleSubscriber(ctx, dst, false), func(subscriber foundSubscriber) (string, *foundSubscriber) {
return subscriber.ID(), &subscriber
})
for _, subscriber := range srcSubMap {
subId := subscriber.ID()
if dstSub, ok := dstSubMap[subId]; ok {
// Src and Dst subscribed by the same subscriber
subscriber.Publish(eventhub.Event{
Type: eventhub.EventTypeRename,
FileID: hashid.EncodeFileID(f.hasher, src.Model.ID),
From: subscriber.relativePath(src),
To: path.Join(dstSub.relativePath(dst), src.Name()),
})
delete(dstSubMap, subId)
} else {
// Only Src is subscribed by the subscriber
subscriber.Publish(eventhub.Event{
Type: eventhub.EventTypeDelete,
FileID: hashid.EncodeFileID(f.hasher, src.Model.ID),
From: subscriber.relativePath(src),
})
}
}
for _, subscriber := range dstSubMap {
// Only Dst is subscribed by the subscriber
subscriber.Publish(eventhub.Event{
Type: eventhub.EventTypeCreate,
FileID: hashid.EncodeFileID(f.hasher, src.Model.ID),
From: path.Join(subscriber.relativePath(dst), src.Name()),
})
}
}
func (f *DBFS) getEligibleSubscriber(ctx context.Context, file *File, checkParentPerm bool) []foundSubscriber {
roots := file.Ancestors()
if !checkParentPerm {
// Include file itself
roots = file.AncestorsChain()
}
requestInfo := requestinfo.RequestInfoFromContext(ctx)
eligibleSubscribers := make([]foundSubscriber, 0)
for _, root := range roots {
subscribers := f.eventHub.GetSubscribers(ctx, root.Model.ID)
subscribers = lo.Filter(subscribers, func(subscriber eventhub.Subscriber, index int) bool {
// Exlucde self from subscribers
if requestInfo != nil && subscriber.ID() == requestInfo.ClientID {
return false
}
return true
})
eligibleSubscribers = append(eligibleSubscribers, lo.Map(subscribers, func(subscriber eventhub.Subscriber, index int) foundSubscriber {
return foundSubscriber{
Subscriber: subscriber,
root: root,
}
})...)
}
return eligibleSubscribers
}
type foundSubscriber struct {
eventhub.Subscriber
root *File
}
func (s *foundSubscriber) relativePath(file *File) string {
res := strings.TrimPrefix(file.Uri(true).Path(), s.root.Uri(true).Path())
if res == "" {
res = fs.Separator
}
if res[0] != fs.Separator[0] {
res = fs.Separator + res
}
return res
}

View File

@ -119,7 +119,22 @@ func (f *DBFS) Create(ctx context.Context, path *fs.URI, fileType types.FileType
}
ancestor = newFile(ancestor, newFolder)
f.emitFileCreated(ctx, ancestor)
} else {
// valide file name
policy, err := f.getPreferredPolicy(ctx, ancestor)
if err != nil {
return nil, err
}
if err := validateExtension(desired[i], policy); err != nil {
return nil, fs.ErrIllegalObjectName.WithError(err)
}
if err := validateFileNameRegexp(desired[i], policy); err != nil {
return nil, fs.ErrIllegalObjectName.WithError(err)
}
file, err := f.createFile(ctx, ancestor, desired[i], fileType, o)
if err != nil {
return nil, err
@ -170,6 +185,10 @@ func (f *DBFS) Rename(ctx context.Context, path *fs.URI, newName string) (fs.Fil
if err := validateExtension(newName, policy); err != nil {
return nil, fs.ErrIllegalObjectName.WithError(err)
}
if err := validateFileNameRegexp(newName, policy); err != nil {
return nil, fs.ErrIllegalObjectName.WithError(err)
}
}
// Lock target
@ -207,6 +226,8 @@ func (f *DBFS) Rename(ctx context.Context, path *fs.URI, newName string) (fs.Fil
return nil, serializer.NewError(serializer.CodeDBError, "Failed to commit rename change", err)
}
f.emitFileRenamed(ctx, target, newName)
return target.Replace(updated), nil
}
@ -285,6 +306,8 @@ func (f *DBFS) SoftDelete(ctx context.Context, path ...*fs.URI) error {
return serializer.NewError(serializer.CodeDBError, "Failed to commit soft-delete change", err)
}
f.emitFileDeleted(ctx, targets...)
return ae.Aggregate()
}
@ -294,9 +317,9 @@ func (f *DBFS) Delete(ctx context.Context, path []*fs.URI, opts ...fs.Option) ([
o.apply(opt)
}
var opt *types.EntityRecycleOption
var opt *types.EntityProps
if o.UnlinkOnly {
opt = &types.EntityRecycleOption{
opt = &types.EntityProps{
UnlinkOnly: true,
}
}
@ -367,7 +390,7 @@ func (f *DBFS) Delete(ctx context.Context, path []*fs.URI, opts ...fs.Option) ([
if err := inventory.CommitWithStorageDiff(ctx, tx, f.l, f.userClient); err != nil {
return nil, serializer.NewError(serializer.CodeDBError, "Failed to commit delete change", err)
}
f.emitFileDeleted(ctx, targets...)
return newStaleEntities, ae.Aggregate()
}
@ -585,10 +608,11 @@ func (f *DBFS) MoveOrCopy(ctx context.Context, path []*fs.URI, dst *fs.URI, isCo
}
var (
storageDiff inventory.StorageDiff
copiedNewTargetsMap map[int]*ent.File
storageDiff inventory.StorageDiff
)
if isCopy {
_, storageDiff, err = f.copyFiles(ctx, fileNavGroup, destination, fc)
copiedNewTargetsMap, storageDiff, err = f.copyFiles(ctx, fileNavGroup, destination, fc)
} else {
storageDiff, err = f.moveFiles(ctx, targets, destination, fc, dstNavigator)
}
@ -603,6 +627,14 @@ func (f *DBFS) MoveOrCopy(ctx context.Context, path []*fs.URI, dst *fs.URI, isCo
return serializer.NewError(serializer.CodeDBError, "Failed to commit move change", err)
}
for _, target := range targets {
if isCopy {
f.emitFileCreated(ctx, newFile(destination, copiedNewTargetsMap[target.ID()]))
} else {
f.emitFileMoved(ctx, target, destination)
}
}
// TODO: after move, dbfs cache should be cleared
}
@ -698,6 +730,8 @@ func (f *DBFS) deleteEntity(ctx context.Context, target *File, entityId int) (in
return nil, serializer.NewError(serializer.CodeDBError, "Failed to remove upload session metadata", err)
}
}
f.emitFileModified(ctx, target)
return diff, nil
}
@ -735,14 +769,14 @@ func (f *DBFS) setCurrentVersion(ctx context.Context, target *File, versionId in
return serializer.NewError(serializer.CodeDBError, "Failed to commit set current version", err)
}
f.emitFileModified(ctx, target)
return nil
}
func (f *DBFS) deleteFiles(ctx context.Context, targets map[Navigator][]*File, fc inventory.FileClient, opt *types.EntityRecycleOption) ([]fs.Entity, inventory.StorageDiff, error) {
func (f *DBFS) deleteFiles(ctx context.Context, targets map[Navigator][]*File, fc inventory.FileClient, opt *types.EntityProps) ([]fs.Entity, inventory.StorageDiff, error) {
if f.user.Edges.Group == nil {
return nil, nil, fmt.Errorf("user group not loaded")
}
limit := max(f.user.Edges.Group.Settings.MaxWalkedFiles, 1)
allStaleEntities := make([]fs.Entity, 0, len(targets))
storageDiff := make(inventory.StorageDiff)
for n, files := range targets {
@ -756,8 +790,7 @@ func (f *DBFS) deleteFiles(ctx context.Context, targets map[Navigator][]*File, f
// List all files to be deleted
toBeDeletedFiles := make([]*File, 0, len(files))
if err := n.Walk(ctx, files, limit, intsets.MaxInt, func(targets []*File, level int) error {
limit -= len(targets)
if err := n.Walk(ctx, files, intsets.MaxInt, intsets.MaxInt, func(targets []*File, level int) error {
toBeDeletedFiles = append(toBeDeletedFiles, targets...)
return nil
}); err != nil {

View File

@ -2,6 +2,7 @@ package dbfs
import (
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
)
@ -26,6 +27,7 @@ type dbfsOption struct {
streamListResponseCallback func(parent fs.File, file []fs.File)
ancestor *File
notRoot bool
encryptMetadata *types.EncryptMetadata
}
func newDbfsOption() *dbfsOption {
@ -50,6 +52,13 @@ func (f optionFunc) Apply(o any) {
}
}
// WithEncryptMetadata sets the encrypt metadata for the upload operation.
func WithEncryptMetadata(encryptMetadata *types.EncryptMetadata) fs.Option {
return optionFunc(func(o *dbfsOption) {
o.encryptMetadata = encryptMetadata
})
}
// WithFilePublicMetadata enables loading file public metadata.
func WithFilePublicMetadata() fs.Option {
return optionFunc(func(o *dbfsOption) {

Some files were not shown because too many files have changed in this diff Show More