mirror of
https://github.com/labring/FastGPT.git
synced 2025-12-25 20:02:47 +00:00
Some checks are pending
Document deploy / sync-images (push) Waiting to run
Document deploy / generate-timestamp (push) Blocked by required conditions
Document deploy / build-images (map[domain:https://fastgpt.cn suffix:cn]) (push) Blocked by required conditions
Document deploy / build-images (map[domain:https://fastgpt.io suffix:io]) (push) Blocked by required conditions
Document deploy / update-images (map[deployment:fastgpt-docs domain:https://fastgpt.cn kube_config:KUBE_CONFIG_CN suffix:cn]) (push) Blocked by required conditions
Document deploy / update-images (map[deployment:fastgpt-docs domain:https://fastgpt.io kube_config:KUBE_CONFIG_IO suffix:io]) (push) Blocked by required conditions
Build FastGPT images in Personal warehouse / get-vars (push) Waiting to run
Build FastGPT images in Personal warehouse / build-fastgpt-images (map[arch:amd64 runs-on:ubuntu-24.04]) (push) Blocked by required conditions
Build FastGPT images in Personal warehouse / build-fastgpt-images (map[arch:arm64 runs-on:ubuntu-24.04-arm]) (push) Blocked by required conditions
Build FastGPT images in Personal warehouse / release-fastgpt-images (push) Blocked by required conditions
* feat: add query optimize and bill (#6021) * add query optimize and bill * perf: query extension * fix: embe model * remove log * remove log * fix: test --------- Co-authored-by: xxyyh <2289112474@qq> Co-authored-by: archer <545436317@qq.com> * feat: notice (#6013) * feat: record user's language * feat: notice points/dataset indexes; support count limit; update docker-compose.yml * fix: ts error * feat: send auth code i18n * chore: dataset notice limit * chore: adjust * fix: ts * fix: countLimit race condition; i18n en-prefix locale fallback to en --------- Co-authored-by: archer <545436317@qq.com> * perf: comment * perf: send inform code * fix: type error (#6029) * feat: add ip region for chat logs (#6010) * feat: add ip region for chat logs * refactor: use Geolite2.mmdb * fix: export chat logs * fix: return location directly * test: add unit test * perf: log show ip data * adjust commercial plans (#6008) * plan frontend * plan limit * coupon * discount coupon * fix * type * fix audit * type * plan name * legacy plan * track * feat: add discount coupon * fix * fix discount coupon * openapi * type * type * env * api type * fix * fix: simple agent plugin input & agent dashboard card (#6034) * refactor: remove gridfs (#6031) * fix: replace gridfs multer operations with s3 compatible ops * wip: s3 features * refactor: remove gridfs * fix * perf: mock test * doc * doc * doc * fix: test * fix: s3 * fix: mock s3 * remove invalid config * fix: init query extension * initv4144 (#6037) * chore: initv4144 * fix * version * fix: new plans (#6039) * fix: new plans * qr modal tip * fix: buffer raw text filename (#6040) * fix: initv4144 (#6041) * fix: pay refresh (#6042) * fix: migration shell * rename collection * clear timerlock * clear timerlock * perf: faq * perf: bill schema * fix: openapi * doc * fix: share var render * feat: delete dataset queue * plan usage display (#6043) * plan usage display * text * fix * fix: ts * perf: remove invalid code * perf: init shell * doc * perf: rename field * perf: avatar presign * init * custom plan text (#6045) * fix plans * fix * fixed * computed --------- Co-authored-by: archer <545436317@qq.com> * init shell * plan text & price page back button (#6046) * init * index * delete dataset * delete dataset * perf: delete dataset * init --------- Co-authored-by: YeYuheng <57035043+YYH211@users.noreply.github.com> Co-authored-by: xxyyh <2289112474@qq> Co-authored-by: Finley Ge <32237950+FinleyGe@users.noreply.github.com> Co-authored-by: Roy <whoeverimf5@gmail.com> Co-authored-by: heheer <heheer@sealos.io>
158 lines
3.2 KiB
TypeScript
158 lines
3.2 KiB
TypeScript
import { getMongoModel, Schema } from '../../common/mongo';
|
||
import {
|
||
ChunkSettingModeEnum,
|
||
ChunkTriggerConfigTypeEnum,
|
||
DataChunkSplitModeEnum,
|
||
DatasetCollectionDataProcessModeEnum,
|
||
DatasetTypeEnum,
|
||
DatasetTypeMap,
|
||
ParagraphChunkAIModeEnum
|
||
} from '@fastgpt/global/core/dataset/constants';
|
||
import {
|
||
TeamCollectionName,
|
||
TeamMemberCollectionName
|
||
} from '@fastgpt/global/support/user/team/constant';
|
||
import type { DatasetSchemaType } from '@fastgpt/global/core/dataset/type.d';
|
||
|
||
export const DatasetCollectionName = 'datasets';
|
||
|
||
export const ChunkSettings = {
|
||
trainingType: {
|
||
type: String,
|
||
enum: Object.values(DatasetCollectionDataProcessModeEnum)
|
||
},
|
||
|
||
chunkTriggerType: {
|
||
type: String,
|
||
enum: Object.values(ChunkTriggerConfigTypeEnum)
|
||
},
|
||
chunkTriggerMinSize: Number,
|
||
|
||
dataEnhanceCollectionName: Boolean,
|
||
|
||
imageIndex: Boolean,
|
||
autoIndexes: Boolean,
|
||
indexPrefixTitle: Boolean,
|
||
|
||
chunkSettingMode: {
|
||
type: String,
|
||
enum: Object.values(ChunkSettingModeEnum)
|
||
},
|
||
chunkSplitMode: {
|
||
type: String,
|
||
enum: Object.values(DataChunkSplitModeEnum)
|
||
},
|
||
paragraphChunkAIMode: {
|
||
type: String,
|
||
enum: Object.values(ParagraphChunkAIModeEnum)
|
||
},
|
||
paragraphChunkDeep: Number,
|
||
paragraphChunkMinSize: Number,
|
||
chunkSize: Number,
|
||
chunkSplitter: String,
|
||
|
||
indexSize: Number,
|
||
qaPrompt: String
|
||
};
|
||
|
||
const DatasetSchema = new Schema({
|
||
parentId: {
|
||
type: Schema.Types.ObjectId,
|
||
ref: DatasetCollectionName,
|
||
default: null
|
||
},
|
||
userId: {
|
||
//abandon
|
||
type: Schema.Types.ObjectId,
|
||
ref: 'user'
|
||
},
|
||
teamId: {
|
||
type: Schema.Types.ObjectId,
|
||
ref: TeamCollectionName,
|
||
required: true
|
||
},
|
||
tmbId: {
|
||
type: Schema.Types.ObjectId,
|
||
ref: TeamMemberCollectionName,
|
||
required: true
|
||
},
|
||
type: {
|
||
type: String,
|
||
enum: Object.keys(DatasetTypeMap),
|
||
required: true,
|
||
default: DatasetTypeEnum.dataset
|
||
},
|
||
avatar: {
|
||
type: String,
|
||
default: '/icon/logo.svg'
|
||
},
|
||
name: {
|
||
type: String,
|
||
required: true
|
||
},
|
||
updateTime: {
|
||
type: Date,
|
||
default: () => new Date()
|
||
},
|
||
vectorModel: {
|
||
type: String,
|
||
required: true,
|
||
default: 'text-embedding-3-small'
|
||
},
|
||
agentModel: {
|
||
type: String,
|
||
required: true,
|
||
default: 'gpt-4o-mini'
|
||
},
|
||
vlmModel: String,
|
||
intro: {
|
||
type: String,
|
||
default: ''
|
||
},
|
||
websiteConfig: {
|
||
type: {
|
||
url: {
|
||
type: String,
|
||
required: true
|
||
},
|
||
selector: {
|
||
type: String,
|
||
default: 'body'
|
||
}
|
||
}
|
||
},
|
||
chunkSettings: {
|
||
type: ChunkSettings
|
||
},
|
||
inheritPermission: {
|
||
type: Boolean,
|
||
default: true
|
||
},
|
||
|
||
apiDatasetServer: Object,
|
||
|
||
// 软删除标记字段
|
||
deleteTime: {
|
||
type: Date,
|
||
default: null // null表示未删除,有值表示删除时间
|
||
},
|
||
|
||
// abandoned
|
||
autoSync: Boolean,
|
||
externalReadUrl: String,
|
||
defaultPermission: Number,
|
||
apiServer: Object,
|
||
feishuServer: Object,
|
||
yuqueServer: Object
|
||
});
|
||
|
||
try {
|
||
DatasetSchema.index({ teamId: 1 });
|
||
DatasetSchema.index({ type: 1 });
|
||
DatasetSchema.index({ deleteTime: 1 }); // 添加软删除字段索引
|
||
} catch (error) {
|
||
console.log(error);
|
||
}
|
||
|
||
export const MongoDataset = getMongoModel<DatasetSchemaType>(DatasetCollectionName, DatasetSchema);
|