mirror of
https://github.com/labring/FastGPT.git
synced 2025-12-25 20:02:47 +00:00
File input (#2270)
* doc * feat: file upload config * perf: chat box file params * feat: markdown show file * feat: chat file store and clear * perf: read file contentType * feat: llm vision config * feat: file url output * perf: plugin error text * perf: image load * feat: ai chat document * perf: file block ui * feat: read file node * feat: file read response field * feat: simple mode support read files * feat: tool call * feat: read file histories * perf: select file * perf: select file config * i18n * i18n * fix: ts; feat: tool response preview result
This commit is contained in:
parent
10dcdb5491
commit
e36d9d794f
|
|
@ -21,6 +21,7 @@
|
|||
"i18n-ally.namespace": true,
|
||||
"i18n-ally.pathMatcher": "{locale}/{namespaces}.json",
|
||||
"i18n-ally.extract.targetPickingStrategy": "most-similar-by-key",
|
||||
"i18n-ally.translate.engines": ["deepl", "google"],
|
||||
"[typescript]": {
|
||||
"editor.defaultFormatter": "esbenp.prettier-vscode"
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,45 @@
|
|||
---
|
||||
title: 'V4.8.9(进行中)'
|
||||
description: 'FastGPT V4.8.9 更新说明'
|
||||
icon: 'upgrade'
|
||||
draft: false
|
||||
toc: true
|
||||
weight: 816
|
||||
---
|
||||
|
||||
## 升级指南
|
||||
|
||||
### 1. 做好数据库备份
|
||||
|
||||
### 2. 修改镜像
|
||||
|
||||
|
||||
### 3. 执行初始化
|
||||
|
||||
从任意终端,发起 1 个 HTTP 请求。其中 {{rootkey}} 替换成环境变量里的 `rootkey`;{{host}} 替换成**FastGPT 商业版域名**。
|
||||
|
||||
```bash
|
||||
curl --location --request POST 'https://{{host}}/api/admin/init/489' \
|
||||
--header 'rootkey: {{rootkey}}' \
|
||||
--header 'Content-Type: application/json'
|
||||
```
|
||||
|
||||
会初始化多租户的通知方式
|
||||
|
||||
-------
|
||||
|
||||
## V4.8.9 更新说明
|
||||
|
||||
1. 新增 - 文件上传配置,不再依赖视觉模型决定是否可上传图片,而是通过系统配置决定。
|
||||
2. 新增 - AI 对话节点和工具调用支持选择“是否开启图片识别”,开启后会自动获取对话框上传的图片和“用户问题”中的图片链接。
|
||||
3. 新增 - 文档解析节点。
|
||||
4. 商业版新增 - 团队通知账号绑定,用于接收重要信息。
|
||||
5. 商业版新增 - 知识库集合标签功能,可以对知识库进行标签管理。
|
||||
6. 商业版新增 - 知识库搜索节点支持标签过滤和创建时间过滤。
|
||||
7. 新增 - 删除所有对话引导内容。
|
||||
8. 优化 - 对话框信息懒加载,减少网络传输。
|
||||
9. 修复 - 知识库上传文件,网络不稳定或文件较多情况下,进度无法到 100%。
|
||||
10. 修复 - 删除应用后回到聊天选择最后一次对话的应用为删除的应用时提示无该应用问题。
|
||||
11. 修复 - 插件动态变量配置默认值时,无法正常显示默认值。
|
||||
12. 修复 - 工具调用温度和最大回复值未生效。
|
||||
13. 修复 - 函数调用模式,assistant role 中,GPT 模型必须传入 content 参数。(不影响大部分模型,目前基本都改用用 ToolChoice 模式,FC 模式已弃用)
|
||||
|
|
@ -1,11 +1,19 @@
|
|||
import { i18nT } from '../../../web/i18n/utils';
|
||||
|
||||
/* mongo fs bucket */
|
||||
export enum BucketNameEnum {
|
||||
dataset = 'dataset'
|
||||
dataset = 'dataset',
|
||||
chat = 'chat'
|
||||
}
|
||||
export const bucketNameMap = {
|
||||
[BucketNameEnum.dataset]: {
|
||||
label: 'file.bucket.dataset'
|
||||
label: i18nT('file:bucket_file')
|
||||
},
|
||||
[BucketNameEnum.chat]: {
|
||||
label: i18nT('file:bucket_chat')
|
||||
}
|
||||
};
|
||||
|
||||
export const ReadFileBaseUrl = '/api/common/file/read';
|
||||
|
||||
export const documentFileType = '.txt, .docx, .csv, .xlsx, .pdf, .md, .html, .pptx';
|
||||
|
|
|
|||
|
|
@ -5,4 +5,5 @@ export type FileTokenQuery = {
|
|||
teamId: string;
|
||||
tmbId: string;
|
||||
fileId: string;
|
||||
expiredTime?: number;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -91,3 +91,10 @@ export const sliceJsonStr = (str: string) => {
|
|||
|
||||
return jsonStr;
|
||||
};
|
||||
|
||||
export const sliceStrStartEnd = (str: string, start: number, end: number) => {
|
||||
const overSize = str.length > start + end;
|
||||
const startContent = str.slice(0, start);
|
||||
const endContent = overSize ? str.slice(-end) : '';
|
||||
return startContent + (overSize ? ` ...... ` : '') + endContent;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -119,3 +119,10 @@ export const Prompt_QuotePromptList: PromptTemplateItem[] = [
|
|||
问题:"""{{question}}"""`
|
||||
}
|
||||
];
|
||||
|
||||
// Document quote prompt
|
||||
export const Prompt_DocumentQuote = `将 <Quote></Quote> 中的内容作为你的知识:
|
||||
<Quote>
|
||||
{{quote}}
|
||||
</Quote>
|
||||
`;
|
||||
|
|
|
|||
|
|
@ -2,23 +2,46 @@ import openai from 'openai';
|
|||
import type {
|
||||
ChatCompletionMessageToolCall,
|
||||
ChatCompletionChunk,
|
||||
ChatCompletionMessageParam,
|
||||
ChatCompletionMessageParam as SdkChatCompletionMessageParam,
|
||||
ChatCompletionToolMessageParam,
|
||||
ChatCompletionAssistantMessageParam
|
||||
ChatCompletionAssistantMessageParam,
|
||||
ChatCompletionContentPart as SdkChatCompletionContentPart,
|
||||
ChatCompletionUserMessageParam as SdkChatCompletionUserMessageParam
|
||||
} from 'openai/resources';
|
||||
import { ChatMessageTypeEnum } from './constants';
|
||||
|
||||
export * from 'openai/resources';
|
||||
|
||||
export type ChatCompletionMessageParam = ChatCompletionMessageParam & {
|
||||
// Extension of ChatCompletionMessageParam, Add file url type
|
||||
export type ChatCompletionContentPartFile = {
|
||||
type: 'file_url';
|
||||
name: string;
|
||||
url: string;
|
||||
};
|
||||
// Rewrite ChatCompletionContentPart, Add file type
|
||||
export type ChatCompletionContentPart =
|
||||
| SdkChatCompletionContentPart
|
||||
| ChatCompletionContentPartFile;
|
||||
type CustomChatCompletionUserMessageParam = {
|
||||
content: string | Array<ChatCompletionContentPart>;
|
||||
role: 'user';
|
||||
name?: string;
|
||||
};
|
||||
|
||||
export type ChatCompletionMessageParam = (
|
||||
| Exclude<SdkChatCompletionMessageParam, SdkChatCompletionUserMessageParam>
|
||||
| CustomChatCompletionUserMessageParam
|
||||
) & {
|
||||
dataId?: string;
|
||||
};
|
||||
export type SdkChatCompletionMessageParam = SdkChatCompletionMessageParam;
|
||||
|
||||
/* ToolChoice and functionCall extension */
|
||||
export type ChatCompletionToolMessageParam = ChatCompletionToolMessageParam & { name: string };
|
||||
export type ChatCompletionAssistantToolParam = {
|
||||
role: 'assistant';
|
||||
tool_calls: ChatCompletionMessageToolCall[];
|
||||
};
|
||||
|
||||
export type ChatCompletionMessageToolCall = ChatCompletionMessageToolCall & {
|
||||
toolName?: string;
|
||||
toolAvatar?: string;
|
||||
|
|
@ -28,13 +51,16 @@ export type ChatCompletionMessageFunctionCall = ChatCompletionAssistantMessagePa
|
|||
toolName?: string;
|
||||
toolAvatar?: string;
|
||||
};
|
||||
|
||||
// Stream response
|
||||
export type StreamChatType = Stream<ChatCompletionChunk>;
|
||||
|
||||
export default openai;
|
||||
export * from 'openai';
|
||||
|
||||
// Other
|
||||
export type PromptTemplateItem = {
|
||||
title: string;
|
||||
desc: string;
|
||||
value: string;
|
||||
};
|
||||
|
||||
export default openai;
|
||||
export * from 'openai';
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
import { AppTTSConfigType, AppWhisperConfigType } from './type';
|
||||
import { AppTTSConfigType, AppFileSelectConfigType, AppWhisperConfigType } from './type';
|
||||
|
||||
export enum AppTypeEnum {
|
||||
folder = 'folder',
|
||||
|
|
@ -23,3 +23,9 @@ export const defaultChatInputGuideConfig = {
|
|||
textList: [],
|
||||
customUrl: ''
|
||||
};
|
||||
|
||||
export const defaultAppSelectFileConfig: AppFileSelectConfigType = {
|
||||
canSelectFile: false,
|
||||
canSelectImg: false,
|
||||
maxFiles: 10
|
||||
};
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
import type { FlowNodeTemplateType, StoreNodeItemType } from '../workflow/type/node';
|
||||
import { AppTypeEnum } from './constants';
|
||||
import { PermissionTypeEnum } from '../../support/permission/constant';
|
||||
import { VariableInputEnum } from '../workflow/constants';
|
||||
import { NodeInputKeyEnum, VariableInputEnum } from '../workflow/constants';
|
||||
import { SelectedDatasetType } from '../workflow/api';
|
||||
import { DatasetSearchModeEnum } from '../dataset/constants';
|
||||
import { TeamTagSchema as TeamTagsSchemaType } from '@fastgpt/global/support/user/team/type.d';
|
||||
|
|
@ -91,6 +91,7 @@ export type AppChatConfigType = {
|
|||
whisperConfig?: AppWhisperConfigType;
|
||||
scheduledTriggerConfig?: AppScheduledTriggerConfigType;
|
||||
chatInputGuide?: ChatInputGuideConfigType;
|
||||
fileSelectConfig?: AppFileSelectConfigType;
|
||||
};
|
||||
export type SettingAIDataType = {
|
||||
model: string;
|
||||
|
|
@ -98,6 +99,7 @@ export type SettingAIDataType = {
|
|||
maxToken: number;
|
||||
isResponseAnswerText?: boolean;
|
||||
maxHistories?: number;
|
||||
[NodeInputKeyEnum.aiChatVision]?: boolean; // Is open vision mode
|
||||
};
|
||||
|
||||
// variable
|
||||
|
|
@ -134,3 +136,9 @@ export type AppScheduledTriggerConfigType = {
|
|||
timezone: string;
|
||||
defaultPrompt: string;
|
||||
};
|
||||
// File
|
||||
export type AppFileSelectConfigType = {
|
||||
canSelectFile: boolean;
|
||||
canSelectImg: boolean;
|
||||
maxFiles: number;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -56,16 +56,21 @@ export const chats2GPTMessages = ({
|
|||
text: item.text?.content || ''
|
||||
};
|
||||
}
|
||||
if (
|
||||
item.type === ChatItemValueTypeEnum.file &&
|
||||
item.file?.type === ChatFileTypeEnum.image
|
||||
) {
|
||||
return {
|
||||
type: 'image_url',
|
||||
image_url: {
|
||||
if (item.type === ChatItemValueTypeEnum.file) {
|
||||
if (item.file?.type === ChatFileTypeEnum.image) {
|
||||
return {
|
||||
type: 'image_url',
|
||||
image_url: {
|
||||
url: item.file?.url || ''
|
||||
}
|
||||
};
|
||||
} else if (item.file?.type === ChatFileTypeEnum.file) {
|
||||
return {
|
||||
type: 'file_url',
|
||||
name: item.file?.name || '',
|
||||
url: item.file?.url || ''
|
||||
}
|
||||
};
|
||||
};
|
||||
}
|
||||
}
|
||||
})
|
||||
.filter(Boolean) as ChatCompletionContentPart[];
|
||||
|
|
@ -175,6 +180,16 @@ export const GPTMessages2Chats = (
|
|||
url: item.image_url.url
|
||||
}
|
||||
});
|
||||
} else if (item.type === 'file_url') {
|
||||
value.push({
|
||||
// @ts-ignore
|
||||
type: ChatItemValueTypeEnum.file,
|
||||
file: {
|
||||
type: ChatFileTypeEnum.file,
|
||||
name: item.name,
|
||||
url: item.url
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
|
|
|
|||
|
|
@ -117,6 +117,7 @@ export type ChatItemType = (UserChatItemType | SystemChatItemType | AIChatItemTy
|
|||
dataId?: string;
|
||||
} & ResponseTagItemType;
|
||||
|
||||
// Frontend type
|
||||
export type ChatSiteItemType = (UserChatItemType | SystemChatItemType | AIChatItemType) & {
|
||||
dataId: string;
|
||||
status: `${ChatStatusEnum}`;
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ import { DispatchNodeResponseType } from '../workflow/runtime/type';
|
|||
import { FlowNodeTypeEnum } from '../workflow/node/constant';
|
||||
import { ChatItemValueTypeEnum, ChatRoleEnum } from './constants';
|
||||
import { ChatHistoryItemResType, ChatItemType, UserChatItemValueItemType } from './type.d';
|
||||
import { sliceStrStartEnd } from '../../common/string/tools';
|
||||
|
||||
// Concat 2 -> 1, and sort by role
|
||||
export const concatHistories = (histories1: ChatItemType[], histories2: ChatItemType[]) => {
|
||||
|
|
@ -25,6 +26,7 @@ export const getChatTitleFromChatMessage = (message?: ChatItemType, defaultValue
|
|||
return defaultValue;
|
||||
};
|
||||
|
||||
// Keep the first n and last n characters
|
||||
export const getHistoryPreview = (
|
||||
completeMessages: ChatItemType[]
|
||||
): {
|
||||
|
|
@ -32,30 +34,44 @@ export const getHistoryPreview = (
|
|||
value: string;
|
||||
}[] => {
|
||||
return completeMessages.map((item, i) => {
|
||||
if (item.obj === ChatRoleEnum.System || i >= completeMessages.length - 2) {
|
||||
return {
|
||||
obj: item.obj,
|
||||
value: item.value?.[0]?.text?.content || ''
|
||||
};
|
||||
}
|
||||
const n = item.obj === ChatRoleEnum.System || i >= completeMessages.length - 2 ? 80 : 40;
|
||||
|
||||
const content = item.value
|
||||
.map((item) => {
|
||||
if (item.text?.content) {
|
||||
const content =
|
||||
item.text.content.length > 20
|
||||
? `${item.text.content.slice(0, 20)}...`
|
||||
: item.text.content;
|
||||
return content;
|
||||
}
|
||||
return '';
|
||||
})
|
||||
.filter(Boolean)
|
||||
.join('\n');
|
||||
// Get message text content
|
||||
const rawText = (() => {
|
||||
if (item.obj === ChatRoleEnum.System) {
|
||||
return item.value?.map((item) => item.text?.content).join('') || '';
|
||||
} else if (item.obj === ChatRoleEnum.Human) {
|
||||
return (
|
||||
item.value
|
||||
?.map((item) => {
|
||||
if (item?.text?.content) return item?.text?.content;
|
||||
if (item.file?.type === 'image') return 'Input an image';
|
||||
return '';
|
||||
})
|
||||
.filter(Boolean)
|
||||
.join('\n') || ''
|
||||
);
|
||||
} else if (item.obj === ChatRoleEnum.AI) {
|
||||
return (
|
||||
item.value
|
||||
?.map((item) => {
|
||||
return (
|
||||
item.text?.content || item?.tools?.map((item) => item.toolName).join(',') || ''
|
||||
);
|
||||
})
|
||||
.join('') || ''
|
||||
);
|
||||
}
|
||||
return '';
|
||||
})();
|
||||
|
||||
const startContent = rawText.slice(0, n);
|
||||
const endContent = rawText.length > 2 * n ? rawText.slice(-n) : '';
|
||||
const content = startContent + (rawText.length > n ? ` ...... ` : '') + endContent;
|
||||
|
||||
return {
|
||||
obj: item.obj,
|
||||
value: content
|
||||
value: sliceStrStartEnd(content, 80, 80)
|
||||
};
|
||||
});
|
||||
};
|
||||
|
|
|
|||
|
|
@ -75,6 +75,8 @@ export enum NodeInputKeyEnum {
|
|||
aiChatQuoteTemplate = 'quoteTemplate',
|
||||
aiChatQuotePrompt = 'quotePrompt',
|
||||
aiChatDatasetQuote = 'quoteQA',
|
||||
aiChatVision = 'aiChatVision',
|
||||
stringQuoteText = 'stringQuoteText',
|
||||
|
||||
// dataset
|
||||
datasetSelectList = 'datasets',
|
||||
|
|
@ -118,7 +120,10 @@ export enum NodeInputKeyEnum {
|
|||
|
||||
// code
|
||||
code = 'code',
|
||||
codeType = 'codeType' // js|py
|
||||
codeType = 'codeType', // js|py
|
||||
|
||||
// read files
|
||||
fileUrlList = 'fileUrlList'
|
||||
}
|
||||
|
||||
export enum NodeOutputKeyEnum {
|
||||
|
|
@ -133,6 +138,9 @@ export enum NodeOutputKeyEnum {
|
|||
addOutputParam = 'system_addOutputParam',
|
||||
rawResponse = 'system_rawResponse',
|
||||
|
||||
// start
|
||||
userFiles = 'userFiles',
|
||||
|
||||
// dataset
|
||||
datasetQuoteQA = 'quoteQA',
|
||||
|
||||
|
|
|
|||
|
|
@ -117,7 +117,8 @@ export enum FlowNodeTypeEnum {
|
|||
variableUpdate = 'variableUpdate',
|
||||
code = 'code',
|
||||
textEditor = 'textEditor',
|
||||
customFeedback = 'customFeedback'
|
||||
customFeedback = 'customFeedback',
|
||||
readFiles = 'readFiles'
|
||||
}
|
||||
|
||||
// node IO value type
|
||||
|
|
|
|||
|
|
@ -16,10 +16,12 @@ import { UserModelSchema } from '../../../support/user/type';
|
|||
import { AppDetailType, AppSchema } from '../../app/type';
|
||||
import { RuntimeNodeItemType } from '../runtime/type';
|
||||
import { RuntimeEdgeItemType } from './edge';
|
||||
import { ReadFileNodeResponse } from '../template/system/readFiles/type';
|
||||
|
||||
/* workflow props */
|
||||
export type ChatDispatchProps = {
|
||||
res?: NextApiResponse;
|
||||
requestOrigin?: string;
|
||||
mode: 'test' | 'chat' | 'debug';
|
||||
teamId: string;
|
||||
tmbId: string;
|
||||
|
|
@ -30,6 +32,7 @@ export type ChatDispatchProps = {
|
|||
histories: ChatItemType[];
|
||||
variables: Record<string, any>; // global variable
|
||||
query: UserChatItemValueItemType[]; // trigger query
|
||||
chatConfig: AppSchema['chatConfig'];
|
||||
stream: boolean;
|
||||
detail: boolean; // response detail
|
||||
maxRunTimes: number;
|
||||
|
|
@ -146,6 +149,10 @@ export type DispatchNodeResponseType = {
|
|||
|
||||
// plugin
|
||||
pluginOutput?: Record<string, any>;
|
||||
|
||||
// read files
|
||||
readFilesResult?: string;
|
||||
readFiles?: ReadFileNodeResponse;
|
||||
};
|
||||
|
||||
export type DispatchNodeResultType<T> = {
|
||||
|
|
@ -166,4 +173,6 @@ export type AIChatNodeProps = {
|
|||
[NodeInputKeyEnum.aiChatIsResponseText]: boolean;
|
||||
[NodeInputKeyEnum.aiChatQuoteTemplate]?: string;
|
||||
[NodeInputKeyEnum.aiChatQuotePrompt]?: string;
|
||||
[NodeInputKeyEnum.aiChatVision]?: boolean;
|
||||
[NodeInputKeyEnum.stringQuoteText]?: string;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -25,6 +25,7 @@ import { VariableUpdateNode } from './system/variableUpdate';
|
|||
import { CodeNode } from './system/sandbox';
|
||||
import { TextEditorNode } from './system/textEditor';
|
||||
import { CustomFeedbackNode } from './system/customFeedback';
|
||||
import { ReadFilesNodes } from './system/readFiles';
|
||||
|
||||
const systemNodes: FlowNodeTemplateType[] = [
|
||||
AiChatModule,
|
||||
|
|
@ -36,6 +37,7 @@ const systemNodes: FlowNodeTemplateType[] = [
|
|||
StopToolNode,
|
||||
ClassifyQuestionModule,
|
||||
ContextExtractModule,
|
||||
ReadFilesNodes,
|
||||
HttpNode468,
|
||||
AiQueryExtension,
|
||||
LafModule,
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ import { FlowNodeInputTypeEnum } from '../node/constant';
|
|||
import { WorkflowIOValueTypeEnum } from '../constants';
|
||||
import { chatNodeSystemPromptTip } from './tip';
|
||||
import { FlowNodeInputItemType } from '../type/io';
|
||||
import { i18nT } from '../../../../web/i18n/utils';
|
||||
|
||||
export const Input_Template_History: FlowNodeInputItemType = {
|
||||
key: NodeInputKeyEnum.history,
|
||||
|
|
@ -64,3 +65,11 @@ export const Input_Template_Dataset_Quote: FlowNodeInputItemType = {
|
|||
description: '',
|
||||
valueType: WorkflowIOValueTypeEnum.datasetQuote
|
||||
};
|
||||
export const Input_Template_Text_Quote: FlowNodeInputItemType = {
|
||||
key: NodeInputKeyEnum.stringQuoteText,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.reference, FlowNodeInputTypeEnum.textarea],
|
||||
label: i18nT('app:document_quote'),
|
||||
debugLabel: i18nT('app:document_quote'),
|
||||
description: i18nT('app:document_quote_tip'),
|
||||
valueType: WorkflowIOValueTypeEnum.string
|
||||
};
|
||||
|
|
|
|||
|
|
@ -15,10 +15,12 @@ import {
|
|||
Input_Template_Dataset_Quote,
|
||||
Input_Template_History,
|
||||
Input_Template_System_Prompt,
|
||||
Input_Template_UserChatInput
|
||||
Input_Template_UserChatInput,
|
||||
Input_Template_Text_Quote
|
||||
} from '../input';
|
||||
import { chatNodeSystemPromptTip } from '../tip';
|
||||
import { getHandleConfig } from '../utils';
|
||||
import { i18nT } from '../../../../../web/i18n/utils';
|
||||
|
||||
export const AiChatModule: FlowNodeTemplateType = {
|
||||
id: FlowNodeTypeEnum.chatNode,
|
||||
|
|
@ -27,8 +29,8 @@ export const AiChatModule: FlowNodeTemplateType = {
|
|||
sourceHandle: getHandleConfig(true, true, true, true),
|
||||
targetHandle: getHandleConfig(true, true, true, true),
|
||||
avatar: 'core/workflow/template/aiChat',
|
||||
name: 'AI 对话',
|
||||
intro: 'AI 大模型对话',
|
||||
name: i18nT('workflow:template.ai_chat'),
|
||||
intro: i18nT('workflow:template.ai_chat_intro'),
|
||||
showStatus: true,
|
||||
isTool: true,
|
||||
version: '481',
|
||||
|
|
@ -40,20 +42,14 @@ export const AiChatModule: FlowNodeTemplateType = {
|
|||
renderTypeList: [FlowNodeInputTypeEnum.hidden], // Set in the pop-up window
|
||||
label: '',
|
||||
value: 0,
|
||||
valueType: WorkflowIOValueTypeEnum.number,
|
||||
min: 0,
|
||||
max: 10,
|
||||
step: 1
|
||||
valueType: WorkflowIOValueTypeEnum.number
|
||||
},
|
||||
{
|
||||
key: NodeInputKeyEnum.aiChatMaxToken,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.hidden], // Set in the pop-up window
|
||||
label: '',
|
||||
value: 2000,
|
||||
valueType: WorkflowIOValueTypeEnum.number,
|
||||
min: 100,
|
||||
max: 4000,
|
||||
step: 50
|
||||
valueType: WorkflowIOValueTypeEnum.number
|
||||
},
|
||||
{
|
||||
key: NodeInputKeyEnum.aiChatIsResponseText,
|
||||
|
|
@ -74,6 +70,13 @@ export const AiChatModule: FlowNodeTemplateType = {
|
|||
label: '',
|
||||
valueType: WorkflowIOValueTypeEnum.string
|
||||
},
|
||||
{
|
||||
key: NodeInputKeyEnum.aiChatVision,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.hidden],
|
||||
label: '',
|
||||
valueType: WorkflowIOValueTypeEnum.boolean,
|
||||
value: true
|
||||
},
|
||||
// settings modal ---
|
||||
{
|
||||
...Input_Template_System_Prompt,
|
||||
|
|
@ -82,8 +85,9 @@ export const AiChatModule: FlowNodeTemplateType = {
|
|||
placeholder: chatNodeSystemPromptTip
|
||||
},
|
||||
Input_Template_History,
|
||||
{ ...Input_Template_UserChatInput, toolDescription: '用户问题' },
|
||||
Input_Template_Dataset_Quote
|
||||
Input_Template_Dataset_Quote,
|
||||
Input_Template_Text_Quote,
|
||||
{ ...Input_Template_UserChatInput, toolDescription: '用户问题' }
|
||||
],
|
||||
outputs: [
|
||||
{
|
||||
|
|
|
|||
|
|
@ -13,9 +13,9 @@ import {
|
|||
import { Input_Template_UserChatInput } from '../input';
|
||||
import { DatasetSearchModeEnum } from '../../../dataset/constants';
|
||||
import { getHandleConfig } from '../utils';
|
||||
import { i18nT } from '../../../../../web/i18n/utils';
|
||||
|
||||
export const Dataset_SEARCH_DESC =
|
||||
'调用“语义检索”和“全文检索”能力,从“知识库”中查找可能与问题相关的参考内容';
|
||||
export const Dataset_SEARCH_DESC = i18nT('workflow:template.dataset_search_intro');
|
||||
|
||||
export const DatasetSearchModule: FlowNodeTemplateType = {
|
||||
id: FlowNodeTypeEnum.datasetSearchNode,
|
||||
|
|
@ -24,7 +24,7 @@ export const DatasetSearchModule: FlowNodeTemplateType = {
|
|||
sourceHandle: getHandleConfig(true, true, true, true),
|
||||
targetHandle: getHandleConfig(true, true, true, true),
|
||||
avatar: 'core/workflow/template/datasetSearch',
|
||||
name: '知识库搜索',
|
||||
name: i18nT('workflow:template.dataset_search'),
|
||||
intro: Dataset_SEARCH_DESC,
|
||||
showStatus: true,
|
||||
isTool: true,
|
||||
|
|
|
|||
|
|
@ -0,0 +1,48 @@
|
|||
import { i18nT } from '../../../../../../web/i18n/utils';
|
||||
import {
|
||||
FlowNodeTemplateTypeEnum,
|
||||
NodeInputKeyEnum,
|
||||
NodeOutputKeyEnum,
|
||||
WorkflowIOValueTypeEnum
|
||||
} from '../../../constants';
|
||||
import {
|
||||
FlowNodeInputTypeEnum,
|
||||
FlowNodeOutputTypeEnum,
|
||||
FlowNodeTypeEnum
|
||||
} from '../../../node/constant';
|
||||
import { FlowNodeTemplateType } from '../../../type/node';
|
||||
import { getHandleConfig } from '../../utils';
|
||||
|
||||
export const ReadFilesNodes: FlowNodeTemplateType = {
|
||||
id: FlowNodeTypeEnum.readFiles,
|
||||
templateType: FlowNodeTemplateTypeEnum.tools,
|
||||
flowNodeType: FlowNodeTypeEnum.readFiles,
|
||||
sourceHandle: getHandleConfig(true, true, true, true),
|
||||
targetHandle: getHandleConfig(true, true, true, true),
|
||||
avatar: 'core/app/simpleMode/file',
|
||||
name: i18nT('app:workflow.read_files'),
|
||||
intro: i18nT('app:workflow.read_files_tip'),
|
||||
showStatus: true,
|
||||
version: '489',
|
||||
isTool: true,
|
||||
inputs: [
|
||||
{
|
||||
key: NodeInputKeyEnum.fileUrlList,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.reference],
|
||||
valueType: WorkflowIOValueTypeEnum.arrayString,
|
||||
label: i18nT('app:workflow.file_url'),
|
||||
required: true,
|
||||
value: []
|
||||
}
|
||||
],
|
||||
outputs: [
|
||||
{
|
||||
id: NodeOutputKeyEnum.text,
|
||||
key: NodeOutputKeyEnum.text,
|
||||
label: i18nT('app:workflow.read_files_result'),
|
||||
description: i18nT('app:workflow.read_files_result_desc'),
|
||||
valueType: WorkflowIOValueTypeEnum.string,
|
||||
type: FlowNodeOutputTypeEnum.static
|
||||
}
|
||||
]
|
||||
};
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
export type ReadFileNodeResponse = {
|
||||
url: string;
|
||||
name: string;
|
||||
}[];
|
||||
|
|
@ -2,6 +2,7 @@ import { FlowNodeTypeEnum } from '../../node/constant';
|
|||
import { FlowNodeTemplateType } from '../../type/node.d';
|
||||
import { FlowNodeTemplateTypeEnum } from '../../constants';
|
||||
import { getHandleConfig } from '../utils';
|
||||
import { i18nT } from '../../../../../web/i18n/utils';
|
||||
|
||||
export const SystemConfigNode: FlowNodeTemplateType = {
|
||||
id: FlowNodeTypeEnum.systemConfig,
|
||||
|
|
@ -10,8 +11,8 @@ export const SystemConfigNode: FlowNodeTemplateType = {
|
|||
sourceHandle: getHandleConfig(false, false, false, false),
|
||||
targetHandle: getHandleConfig(false, false, false, false),
|
||||
avatar: 'core/workflow/template/systemConfig',
|
||||
name: '系统配置',
|
||||
intro: '可以配置应用的系统参数。',
|
||||
name: i18nT('workflow:template.system_config'),
|
||||
intro: '',
|
||||
unique: true,
|
||||
forbidDelete: true,
|
||||
version: '481',
|
||||
|
|
|
|||
|
|
@ -19,6 +19,7 @@ import {
|
|||
import { chatNodeSystemPromptTip } from '../tip';
|
||||
import { LLMModelTypeEnum } from '../../../ai/constants';
|
||||
import { getHandleConfig } from '../utils';
|
||||
import { i18nT } from '../../../../../web/i18n/utils';
|
||||
|
||||
export const ToolModule: FlowNodeTemplateType = {
|
||||
id: FlowNodeTypeEnum.tools,
|
||||
|
|
@ -27,8 +28,8 @@ export const ToolModule: FlowNodeTemplateType = {
|
|||
sourceHandle: getHandleConfig(true, true, false, true),
|
||||
targetHandle: getHandleConfig(true, true, false, true),
|
||||
avatar: 'core/workflow/template/toolCall',
|
||||
name: '工具调用',
|
||||
intro: '通过AI模型自动选择一个或多个功能块进行调用,也可以对插件进行调用。',
|
||||
name: i18nT('workflow:template.tool_call'),
|
||||
intro: i18nT('workflow:template.tool_call_intro'),
|
||||
showStatus: true,
|
||||
version: '481',
|
||||
inputs: [
|
||||
|
|
@ -41,21 +42,23 @@ export const ToolModule: FlowNodeTemplateType = {
|
|||
renderTypeList: [FlowNodeInputTypeEnum.hidden], // Set in the pop-up window
|
||||
label: '',
|
||||
value: 0,
|
||||
valueType: WorkflowIOValueTypeEnum.number,
|
||||
min: 0,
|
||||
max: 10,
|
||||
step: 1
|
||||
valueType: WorkflowIOValueTypeEnum.number
|
||||
},
|
||||
{
|
||||
key: NodeInputKeyEnum.aiChatMaxToken,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.hidden], // Set in the pop-up window
|
||||
label: '',
|
||||
value: 2000,
|
||||
valueType: WorkflowIOValueTypeEnum.number,
|
||||
min: 100,
|
||||
max: 4000,
|
||||
step: 50
|
||||
valueType: WorkflowIOValueTypeEnum.number
|
||||
},
|
||||
{
|
||||
key: NodeInputKeyEnum.aiChatVision,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.hidden],
|
||||
label: '',
|
||||
valueType: WorkflowIOValueTypeEnum.boolean,
|
||||
value: true
|
||||
},
|
||||
|
||||
{
|
||||
...Input_Template_System_Prompt,
|
||||
label: 'core.ai.Prompt',
|
||||
|
|
|
|||
|
|
@ -7,6 +7,17 @@ import {
|
|||
} from '../../constants';
|
||||
import { getHandleConfig } from '../utils';
|
||||
import { Input_Template_UserChatInput } from '../input';
|
||||
import { i18nT } from '../../../../../web/i18n/utils';
|
||||
import { FlowNodeOutputItemType } from '../../type/io';
|
||||
|
||||
export const userFilesInput: FlowNodeOutputItemType = {
|
||||
id: NodeOutputKeyEnum.userFiles,
|
||||
key: NodeOutputKeyEnum.userFiles,
|
||||
label: i18nT('app:workflow.user_file_input'),
|
||||
description: i18nT('app:workflow.user_file_input_desc'),
|
||||
type: FlowNodeOutputTypeEnum.static,
|
||||
valueType: WorkflowIOValueTypeEnum.arrayString
|
||||
};
|
||||
|
||||
export const WorkflowStart: FlowNodeTemplateType = {
|
||||
id: FlowNodeTypeEnum.workflowStart,
|
||||
|
|
@ -15,7 +26,7 @@ export const WorkflowStart: FlowNodeTemplateType = {
|
|||
sourceHandle: getHandleConfig(false, true, false, false),
|
||||
targetHandle: getHandleConfig(false, false, false, false),
|
||||
avatar: 'core/workflow/template/workflowStart',
|
||||
name: '流程开始',
|
||||
name: i18nT('workflow:template.workflow_start'),
|
||||
intro: '',
|
||||
forbidDelete: true,
|
||||
unique: true,
|
||||
|
|
@ -25,7 +36,7 @@ export const WorkflowStart: FlowNodeTemplateType = {
|
|||
{
|
||||
id: NodeOutputKeyEnum.userChatInput,
|
||||
key: NodeOutputKeyEnum.userChatInput,
|
||||
label: 'core.module.input.label.user question',
|
||||
label: i18nT('common:core.module.input.label.user question'),
|
||||
type: FlowNodeOutputTypeEnum.static,
|
||||
valueType: WorkflowIOValueTypeEnum.string
|
||||
}
|
||||
|
|
|
|||
|
|
@ -82,6 +82,8 @@ export const splitGuideModule = (guideModules?: StoreNodeItemType) => {
|
|||
chatInputGuide
|
||||
};
|
||||
};
|
||||
|
||||
// Get app chat config: db > nodes
|
||||
export const getAppChatConfig = ({
|
||||
chatConfig,
|
||||
systemConfigNode,
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
import { search, SafeSearchType } from 'duck-duck-scrape';
|
||||
import { delay } from '@fastgpt/global/common/system/utils';
|
||||
import { addLog } from '@fastgpt/service/common/system/log';
|
||||
import { getErrText } from '@fastgpt/global/common/error/utils';
|
||||
|
||||
type Props = {
|
||||
query: string;
|
||||
|
|
@ -35,7 +36,7 @@ const main = async (props: Props, retry = 3): Response => {
|
|||
if (retry <= 0) {
|
||||
addLog.warn('DuckDuckGo error', { error });
|
||||
return {
|
||||
result: 'Failed to fetch data'
|
||||
result: getErrText(error, 'Failed to fetch data from DuckDuckGo')
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
import { searchImages, SafeSearchType } from 'duck-duck-scrape';
|
||||
import { delay } from '@fastgpt/global/common/system/utils';
|
||||
import { addLog } from '@fastgpt/service/common/system/log';
|
||||
import { getErrText } from '@fastgpt/global/common/error/utils';
|
||||
|
||||
type Props = {
|
||||
query: string;
|
||||
|
|
@ -33,7 +34,7 @@ const main = async (props: Props, retry = 3): Response => {
|
|||
if (retry <= 0) {
|
||||
addLog.warn('DuckDuckGo error', { error });
|
||||
return {
|
||||
result: 'Failed to fetch data'
|
||||
result: getErrText(error, 'Failed to fetch data from DuckDuckGo')
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
import { searchNews, SafeSearchType } from 'duck-duck-scrape';
|
||||
import { delay } from '@fastgpt/global/common/system/utils';
|
||||
import { addLog } from '@fastgpt/service/common/system/log';
|
||||
import { getErrText } from '@fastgpt/global/common/error/utils';
|
||||
|
||||
type Props = {
|
||||
query: string;
|
||||
|
|
@ -34,7 +35,7 @@ const main = async (props: Props, retry = 3): Response => {
|
|||
if (retry <= 0) {
|
||||
addLog.warn('DuckDuckGo error', { error });
|
||||
return {
|
||||
result: 'Failed to fetch data'
|
||||
result: getErrText(error, 'Failed to fetch data from DuckDuckGo')
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
import { searchVideos, SafeSearchType } from 'duck-duck-scrape';
|
||||
import { delay } from '@fastgpt/global/common/system/utils';
|
||||
import { addLog } from '@fastgpt/service/common/system/log';
|
||||
import { getErrText } from '@fastgpt/global/common/error/utils';
|
||||
|
||||
type Props = {
|
||||
query: string;
|
||||
|
|
@ -34,7 +35,7 @@ const main = async (props: Props, retry = 3): Response => {
|
|||
if (retry <= 0) {
|
||||
addLog.warn('DuckDuckGo error', { error });
|
||||
return {
|
||||
result: 'Failed to fetch data'
|
||||
result: getErrText(error, 'Failed to fetch data from DuckDuckGo')
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"author": "",
|
||||
"version": "486",
|
||||
"version": "489",
|
||||
"name": "文本加工",
|
||||
"avatar": "/imgs/workflow/textEditor.svg",
|
||||
"intro": "可对固定或传入的文本进行加工后输出,非字符串类型数据最终会转成字符串类型。",
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
import { connectionMongo, getMongoModel, type Model } from '../../mongo';
|
||||
const { Schema, model, models } = connectionMongo;
|
||||
import { connectionMongo, getMongoModel } from '../../mongo';
|
||||
const { Schema } = connectionMongo;
|
||||
import { RawTextBufferSchemaType } from './type';
|
||||
|
||||
export const collectionName = 'buffer_rawtexts';
|
||||
|
|
|
|||
|
|
@ -3,16 +3,19 @@ import { BucketNameEnum } from '@fastgpt/global/common/file/constants';
|
|||
import fsp from 'fs/promises';
|
||||
import fs from 'fs';
|
||||
import { DatasetFileSchema } from '@fastgpt/global/core/dataset/type';
|
||||
import { MongoFileSchema } from './schema';
|
||||
import { MongoChatFileSchema, MongoDatasetFileSchema } from './schema';
|
||||
import { detectFileEncoding } from '@fastgpt/global/common/file/tools';
|
||||
import { CommonErrEnum } from '@fastgpt/global/common/error/code/common';
|
||||
import { MongoRawTextBuffer } from '../../buffer/rawText/schema';
|
||||
import { readRawContentByFileBuffer } from '../read/utils';
|
||||
import { gridFsStream2Buffer, stream2Encoding } from './utils';
|
||||
import { addLog } from '../../system/log';
|
||||
import { readFromSecondary } from '../../mongo/utils';
|
||||
|
||||
export function getGFSCollection(bucket: `${BucketNameEnum}`) {
|
||||
MongoFileSchema;
|
||||
MongoDatasetFileSchema;
|
||||
MongoChatFileSchema;
|
||||
|
||||
return connectionMongo.connection.db.collection(`${bucket}.files`);
|
||||
}
|
||||
export function getGridBucket(bucket: `${BucketNameEnum}`) {
|
||||
|
|
@ -49,6 +52,7 @@ export async function uploadFile({
|
|||
|
||||
const { stream: readStream, encoding } = await stream2Encoding(fs.createReadStream(path));
|
||||
|
||||
// Add default metadata
|
||||
metadata.teamId = teamId;
|
||||
metadata.tmbId = tmbId;
|
||||
metadata.encoding = encoding;
|
||||
|
|
@ -103,7 +107,9 @@ export async function delFileByFileIdList({
|
|||
try {
|
||||
const bucket = getGridBucket(bucketName);
|
||||
|
||||
await Promise.all(fileIdList.map((id) => bucket.delete(new Types.ObjectId(id))));
|
||||
for await (const fileId of fileIdList) {
|
||||
await bucket.delete(new Types.ObjectId(fileId));
|
||||
}
|
||||
} catch (error) {
|
||||
if (retry > 0) {
|
||||
return delFileByFileIdList({ bucketName, fileIdList, retry: retry - 1 });
|
||||
|
|
@ -138,7 +144,9 @@ export const readFileContentFromMongo = async ({
|
|||
filename: string;
|
||||
}> => {
|
||||
// read buffer
|
||||
const fileBuffer = await MongoRawTextBuffer.findOne({ sourceId: fileId }).lean();
|
||||
const fileBuffer = await MongoRawTextBuffer.findOne({ sourceId: fileId }, undefined, {
|
||||
...readFromSecondary
|
||||
}).lean();
|
||||
if (fileBuffer) {
|
||||
return {
|
||||
rawText: fileBuffer.rawText,
|
||||
|
|
|
|||
|
|
@ -1,13 +1,17 @@
|
|||
import { connectionMongo, getMongoModel, type Model } from '../../mongo';
|
||||
const { Schema, model, models } = connectionMongo;
|
||||
const { Schema } = connectionMongo;
|
||||
|
||||
const FileSchema = new Schema({});
|
||||
const DatasetFileSchema = new Schema({});
|
||||
const ChatFileSchema = new Schema({});
|
||||
|
||||
try {
|
||||
FileSchema.index({ 'metadata.teamId': 1 });
|
||||
FileSchema.index({ 'metadata.uploadDate': -1 });
|
||||
DatasetFileSchema.index({ uploadDate: -1 });
|
||||
|
||||
ChatFileSchema.index({ uploadDate: -1 });
|
||||
ChatFileSchema.index({ 'metadata.chatId': 1 });
|
||||
} catch (error) {
|
||||
console.log(error);
|
||||
}
|
||||
|
||||
export const MongoFileSchema = getMongoModel('dataset.files', FileSchema);
|
||||
export const MongoDatasetFileSchema = getMongoModel('dataset.files', DatasetFileSchema);
|
||||
export const MongoChatFileSchema = getMongoModel('chat.files', ChatFileSchema);
|
||||
|
|
|
|||
|
|
@ -8,28 +8,6 @@ import fs from 'fs';
|
|||
import { detectFileEncoding } from '@fastgpt/global/common/file/tools';
|
||||
import type { ReadFileResponse } from '../../../worker/readFile/type';
|
||||
|
||||
// match md img text and upload to db
|
||||
export const matchMdImgTextAndUpload = ({
|
||||
teamId,
|
||||
md,
|
||||
metadata
|
||||
}: {
|
||||
md: string;
|
||||
teamId: string;
|
||||
metadata?: Record<string, any>;
|
||||
}) =>
|
||||
markdownProcess({
|
||||
rawText: md,
|
||||
uploadImgController: (base64Img) =>
|
||||
uploadMongoImg({
|
||||
type: MongoImageTypeEnum.collectionImage,
|
||||
base64Img,
|
||||
teamId,
|
||||
metadata,
|
||||
expiredTime: addHours(new Date(), 2)
|
||||
})
|
||||
});
|
||||
|
||||
export type readRawTextByLocalFileParams = {
|
||||
teamId: string;
|
||||
path: string;
|
||||
|
|
@ -72,6 +50,28 @@ export const readRawContentByFileBuffer = async ({
|
|||
encoding: string;
|
||||
metadata?: Record<string, any>;
|
||||
}) => {
|
||||
// Upload image in markdown
|
||||
const matchMdImgTextAndUpload = ({
|
||||
teamId,
|
||||
md,
|
||||
metadata
|
||||
}: {
|
||||
md: string;
|
||||
teamId: string;
|
||||
metadata?: Record<string, any>;
|
||||
}) =>
|
||||
markdownProcess({
|
||||
rawText: md,
|
||||
uploadImgController: (base64Img) =>
|
||||
uploadMongoImg({
|
||||
type: MongoImageTypeEnum.collectionImage,
|
||||
base64Img,
|
||||
teamId,
|
||||
metadata,
|
||||
expiredTime: addHours(new Date(), 1)
|
||||
})
|
||||
});
|
||||
|
||||
let { rawText, formatText } = await runWorker<ReadFileResponse>(WorkerNameEnum.readFile, {
|
||||
extension,
|
||||
encoding,
|
||||
|
|
|
|||
|
|
@ -18,7 +18,17 @@ export const guessBase64ImageType = (str: string) => {
|
|||
i: 'image/png',
|
||||
R: 'image/gif',
|
||||
U: 'image/webp',
|
||||
Q: 'image/bmp'
|
||||
Q: 'image/bmp',
|
||||
P: 'image/svg+xml',
|
||||
T: 'image/tiff',
|
||||
J: 'image/jp2',
|
||||
S: 'image/x-tga',
|
||||
I: 'image/ief',
|
||||
V: 'image/vnd.microsoft.icon',
|
||||
W: 'image/vnd.wap.wbmp',
|
||||
X: 'image/x-xbitmap',
|
||||
Z: 'image/x-xpixmap',
|
||||
Y: 'image/x-xwindowdump'
|
||||
};
|
||||
|
||||
const defaultType = 'image/jpeg';
|
||||
|
|
@ -30,6 +40,11 @@ export const guessBase64ImageType = (str: string) => {
|
|||
return imageTypeMap[firstChar] || defaultType;
|
||||
};
|
||||
|
||||
export const getFileContentTypeFromHeader = (header: string): string | undefined => {
|
||||
const contentType = header.split(';')[0];
|
||||
return contentType;
|
||||
};
|
||||
|
||||
export const clearDirFiles = (dirPath: string) => {
|
||||
if (!fs.existsSync(dirPath)) {
|
||||
return;
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
import type { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type.d';
|
||||
import { getAIApi } from '../config';
|
||||
import { countGptMessagesTokens } from '../../../common/string/tiktoken/index';
|
||||
import { loadRequestMessages } from '../../chat/utils';
|
||||
|
||||
export const Prompt_QuestionGuide = `你是一个AI智能助手,可以回答和解决我的问题。请结合前面的对话记录,帮我生成 3 个问题,引导我继续提问。问题的长度应小于20个字符,按 JSON 格式返回: ["问题1", "问题2", "问题3"]`;
|
||||
|
||||
|
|
@ -25,7 +26,10 @@ export async function createQuestionGuide({
|
|||
model: model,
|
||||
temperature: 0.1,
|
||||
max_tokens: 200,
|
||||
messages: concatMessages,
|
||||
messages: await loadRequestMessages({
|
||||
messages: concatMessages,
|
||||
useVision: false
|
||||
}),
|
||||
stream: false
|
||||
});
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,39 @@
|
|||
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
|
||||
import { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type';
|
||||
import { countGptMessagesTokens } from '../../common/string/tiktoken';
|
||||
|
||||
export const computedMaxToken = async ({
|
||||
maxToken,
|
||||
model,
|
||||
filterMessages = []
|
||||
}: {
|
||||
maxToken: number;
|
||||
model: LLMModelItemType;
|
||||
filterMessages: ChatCompletionMessageParam[];
|
||||
}) => {
|
||||
maxToken = Math.min(maxToken, model.maxResponse);
|
||||
const tokensLimit = model.maxContext;
|
||||
|
||||
/* count response max token */
|
||||
const promptsToken = await countGptMessagesTokens(filterMessages);
|
||||
maxToken = promptsToken + maxToken > tokensLimit ? tokensLimit - promptsToken : maxToken;
|
||||
|
||||
if (maxToken <= 0) {
|
||||
maxToken = 200;
|
||||
}
|
||||
return maxToken;
|
||||
};
|
||||
|
||||
// FastGPT temperature range: [0,10], ai temperature:[0,2],{0,1]……
|
||||
export const computedTemperature = ({
|
||||
model,
|
||||
temperature
|
||||
}: {
|
||||
model: LLMModelItemType;
|
||||
temperature: number;
|
||||
}) => {
|
||||
temperature = +(model.maxTemperature * (temperature / 10)).toFixed(2);
|
||||
temperature = Math.max(temperature, 0.01);
|
||||
|
||||
return temperature;
|
||||
};
|
||||
|
|
@ -17,7 +17,8 @@ export const chatConfigType = {
|
|||
ttsConfig: Object,
|
||||
whisperConfig: Object,
|
||||
scheduledTriggerConfig: Object,
|
||||
chatInputGuide: Object
|
||||
chatInputGuide: Object,
|
||||
fileSelectConfig: Object
|
||||
};
|
||||
|
||||
// schema
|
||||
|
|
|
|||
|
|
@ -2,6 +2,9 @@ import type { ChatItemType, ChatItemValueItemType } from '@fastgpt/global/core/c
|
|||
import { MongoChatItem } from './chatItemSchema';
|
||||
import { addLog } from '../../common/system/log';
|
||||
import { ChatItemValueTypeEnum } from '@fastgpt/global/core/chat/constants';
|
||||
import { delFileByFileIdList, getGFSCollection } from '../../common/file/gridfs/controller';
|
||||
import { BucketNameEnum } from '@fastgpt/global/common/file/constants';
|
||||
import { MongoChat } from './chatSchema';
|
||||
|
||||
export async function getChatItems({
|
||||
appId,
|
||||
|
|
@ -75,3 +78,40 @@ export const addCustomFeedbacks = async ({
|
|||
addLog.error('addCustomFeedbacks error', error);
|
||||
}
|
||||
};
|
||||
|
||||
/*
|
||||
Delete chat files
|
||||
1. ChatId: Delete one chat files
|
||||
2. AppId: Delete all the app's chat files
|
||||
*/
|
||||
export const deleteChatFiles = async ({
|
||||
chatIdList,
|
||||
appId
|
||||
}: {
|
||||
chatIdList?: string[];
|
||||
appId?: string;
|
||||
}) => {
|
||||
if (!appId && !chatIdList) return Promise.reject('appId or chatIdList is required');
|
||||
|
||||
const appChatIdList = await (async () => {
|
||||
if (appId) {
|
||||
const appChatIdList = await MongoChat.find({ appId }, { chatId: 1 });
|
||||
return appChatIdList.map((item) => String(item.chatId));
|
||||
} else if (chatIdList) {
|
||||
return chatIdList;
|
||||
}
|
||||
return [];
|
||||
})();
|
||||
|
||||
const collection = getGFSCollection(BucketNameEnum.chat);
|
||||
const where = {
|
||||
'metadata.chatId': { $in: appChatIdList }
|
||||
};
|
||||
|
||||
const files = await collection.find(where, { projection: { _id: 1 } }).toArray();
|
||||
|
||||
await delFileByFileIdList({
|
||||
bucketName: BucketNameEnum.chat,
|
||||
fileIdList: files.map((item) => String(item._id))
|
||||
});
|
||||
};
|
||||
|
|
|
|||
|
|
@ -1,13 +1,13 @@
|
|||
import { countGptMessagesTokens } from '../../common/string/tiktoken/index';
|
||||
import type {
|
||||
ChatCompletionContentPart,
|
||||
ChatCompletionMessageParam
|
||||
ChatCompletionMessageParam,
|
||||
SdkChatCompletionMessageParam
|
||||
} from '@fastgpt/global/core/ai/type.d';
|
||||
import axios from 'axios';
|
||||
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
|
||||
import { guessBase64ImageType } from '../../common/file/utils';
|
||||
import { getFileContentTypeFromHeader, guessBase64ImageType } from '../../common/file/utils';
|
||||
import { serverRequestBaseUrl } from '../../common/api/serverRequest';
|
||||
import { cloneDeep } from 'lodash';
|
||||
|
||||
/* slice chat context by tokens */
|
||||
const filterEmptyMessages = (messages: ChatCompletionMessageParam[]) => {
|
||||
|
|
@ -96,89 +96,183 @@ export const filterGPTMessageByMaxTokens = async ({
|
|||
return filterEmptyMessages([...systemPrompts, ...chats]);
|
||||
};
|
||||
|
||||
export const formatGPTMessagesInRequestBefore = (messages: ChatCompletionMessageParam[]) => {
|
||||
return messages
|
||||
.map((item) => {
|
||||
if (!item.content) return;
|
||||
if (typeof item.content === 'string') {
|
||||
return {
|
||||
...item,
|
||||
content: item.content.trim()
|
||||
};
|
||||
}
|
||||
|
||||
// array
|
||||
if (item.content.length === 0) return;
|
||||
if (item.content.length === 1 && item.content[0].type === 'text') {
|
||||
return {
|
||||
...item,
|
||||
content: item.content[0].text
|
||||
};
|
||||
}
|
||||
|
||||
return item;
|
||||
})
|
||||
.filter(Boolean) as ChatCompletionMessageParam[];
|
||||
};
|
||||
|
||||
/* Load user chat content.
|
||||
Img: to base 64
|
||||
/*
|
||||
Format requested messages
|
||||
1. If not useVision, only retain text.
|
||||
2. Remove file_url
|
||||
3. If useVision, parse url from question, and load image from url(Local url)
|
||||
*/
|
||||
export const loadChatImgToBase64 = async (content: string | ChatCompletionContentPart[]) => {
|
||||
if (typeof content === 'string') {
|
||||
return content;
|
||||
}
|
||||
export const loadRequestMessages = async ({
|
||||
messages,
|
||||
useVision = true,
|
||||
origin
|
||||
}: {
|
||||
messages: ChatCompletionMessageParam[];
|
||||
useVision?: boolean;
|
||||
origin?: string;
|
||||
}) => {
|
||||
// Split question text and image
|
||||
function parseStringWithImages(input: string): ChatCompletionContentPart[] {
|
||||
if (!useVision) {
|
||||
return [{ type: 'text', text: input || '' }];
|
||||
}
|
||||
|
||||
return Promise.all(
|
||||
content.map(async (item) => {
|
||||
if (item.type === 'text') return item;
|
||||
// 正则表达式匹配图片URL
|
||||
const imageRegex = /(https?:\/\/.*\.(?:png|jpe?g|gif|webp|bmp|tiff?|svg|ico|heic|avif))/i;
|
||||
|
||||
if (!item.image_url.url) return item;
|
||||
const result: { type: 'text' | 'image'; value: string }[] = [];
|
||||
let lastIndex = 0;
|
||||
let match;
|
||||
|
||||
/*
|
||||
1. From db: Get it from db
|
||||
2. From web: Not update
|
||||
*/
|
||||
if (item.image_url.url.startsWith('/')) {
|
||||
const response = await axios.get(item.image_url.url, {
|
||||
baseURL: serverRequestBaseUrl,
|
||||
responseType: 'arraybuffer'
|
||||
});
|
||||
const base64 = Buffer.from(response.data).toString('base64');
|
||||
let imageType = response.headers['content-type'];
|
||||
if (imageType === undefined) {
|
||||
imageType = guessBase64ImageType(base64);
|
||||
}
|
||||
return {
|
||||
...item,
|
||||
image_url: {
|
||||
...item.image_url,
|
||||
url: `data:${imageType};base64,${base64}`
|
||||
}
|
||||
};
|
||||
// 使用正则表达式查找所有匹配项
|
||||
while ((match = imageRegex.exec(input.slice(lastIndex))) !== null) {
|
||||
const textBefore = input.slice(lastIndex, lastIndex + match.index);
|
||||
|
||||
// 如果图片URL前有文本,添加文本部分
|
||||
if (textBefore) {
|
||||
result.push({ type: 'text', value: textBefore });
|
||||
}
|
||||
|
||||
return item;
|
||||
})
|
||||
);
|
||||
};
|
||||
export const loadRequestMessages = async (messages: ChatCompletionMessageParam[]) => {
|
||||
// 添加图片URL
|
||||
result.push({ type: 'image', value: match[0] });
|
||||
|
||||
lastIndex += match.index + match[0].length;
|
||||
}
|
||||
|
||||
// 添加剩余的文本(如果有的话)
|
||||
if (lastIndex < input.length) {
|
||||
result.push({ type: 'text', value: input.slice(lastIndex) });
|
||||
}
|
||||
|
||||
return result
|
||||
.map((item) => {
|
||||
if (item.type === 'text') {
|
||||
return { type: 'text', text: item.value };
|
||||
}
|
||||
if (item.type === 'image') {
|
||||
return {
|
||||
type: 'image_url',
|
||||
image_url: {
|
||||
url: item.value
|
||||
}
|
||||
};
|
||||
}
|
||||
return { type: 'text', text: item.value };
|
||||
})
|
||||
.filter(Boolean) as ChatCompletionContentPart[];
|
||||
}
|
||||
// Load image
|
||||
const parseUserContent = async (content: string | ChatCompletionContentPart[]) => {
|
||||
if (typeof content === 'string') {
|
||||
return parseStringWithImages(content);
|
||||
}
|
||||
|
||||
const result = await Promise.all(
|
||||
content.map(async (item) => {
|
||||
if (item.type === 'text') return parseStringWithImages(item.text);
|
||||
if (item.type === 'file_url') return;
|
||||
|
||||
if (!item.image_url.url) return item;
|
||||
|
||||
// Remove url origin
|
||||
const imgUrl = (() => {
|
||||
if (origin && item.image_url.url.startsWith(origin)) {
|
||||
return item.image_url.url.replace(origin, '');
|
||||
}
|
||||
return item.image_url.url;
|
||||
})();
|
||||
|
||||
/* Load local image */
|
||||
if (imgUrl.startsWith('/')) {
|
||||
const response = await axios.get(imgUrl, {
|
||||
baseURL: serverRequestBaseUrl,
|
||||
responseType: 'arraybuffer'
|
||||
});
|
||||
const base64 = Buffer.from(response.data, 'binary').toString('base64');
|
||||
const imageType =
|
||||
getFileContentTypeFromHeader(response.headers['content-type']) ||
|
||||
guessBase64ImageType(base64);
|
||||
|
||||
return {
|
||||
...item,
|
||||
image_url: {
|
||||
...item.image_url,
|
||||
url: `data:${imageType};base64,${base64}`
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
return item;
|
||||
})
|
||||
);
|
||||
|
||||
return result.flat().filter(Boolean);
|
||||
};
|
||||
// format GPT messages, concat text messages
|
||||
const clearInvalidMessages = (messages: ChatCompletionMessageParam[]) => {
|
||||
return messages
|
||||
.map((item) => {
|
||||
if (item.role === ChatCompletionRequestMessageRoleEnum.System && !item.content) {
|
||||
return;
|
||||
}
|
||||
if (item.role === ChatCompletionRequestMessageRoleEnum.User) {
|
||||
if (!item.content) return;
|
||||
|
||||
if (typeof item.content === 'string') {
|
||||
return {
|
||||
...item,
|
||||
content: item.content.trim()
|
||||
};
|
||||
}
|
||||
|
||||
// array
|
||||
if (item.content.length === 0) return;
|
||||
if (item.content.length === 1 && item.content[0].type === 'text') {
|
||||
return {
|
||||
...item,
|
||||
content: item.content[0].text
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
return item;
|
||||
})
|
||||
.filter(Boolean) as ChatCompletionMessageParam[];
|
||||
};
|
||||
|
||||
if (messages.length === 0) {
|
||||
return Promise.reject('core.chat.error.Messages empty');
|
||||
}
|
||||
|
||||
const loadMessages = await Promise.all(
|
||||
messages.map(async (item) => {
|
||||
// filter messages file
|
||||
const filterMessages = messages.map((item) => {
|
||||
// If useVision=false, only retain text.
|
||||
if (
|
||||
item.role === ChatCompletionRequestMessageRoleEnum.User &&
|
||||
Array.isArray(item.content) &&
|
||||
!useVision
|
||||
) {
|
||||
return {
|
||||
...item,
|
||||
content: item.content.filter((item) => item.type === 'text')
|
||||
};
|
||||
}
|
||||
|
||||
return item;
|
||||
});
|
||||
|
||||
const loadMessages = (await Promise.all(
|
||||
filterMessages.map(async (item) => {
|
||||
if (item.role === ChatCompletionRequestMessageRoleEnum.User) {
|
||||
return {
|
||||
...item,
|
||||
content: await loadChatImgToBase64(item.content)
|
||||
content: await parseUserContent(item.content)
|
||||
};
|
||||
} else {
|
||||
return item;
|
||||
}
|
||||
})
|
||||
);
|
||||
)) as ChatCompletionMessageParam[];
|
||||
|
||||
return loadMessages;
|
||||
return clearInvalidMessages(loadMessages) as SdkChatCompletionMessageParam[];
|
||||
};
|
||||
|
|
|
|||
|
|
@ -493,7 +493,7 @@ export async function searchDatasetData(props: SearchDatasetDataProps) {
|
|||
getForbidData(),
|
||||
filterCollectionByMetadata()
|
||||
]);
|
||||
console.log(filterCollectionIdList, '===');
|
||||
|
||||
await Promise.all(
|
||||
queries.map(async (query) => {
|
||||
const [{ tokens, embeddingRecallResults }, { fullTextRecallResults }] = await Promise.all([
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@ import { formatModelChars2Points } from '../../../../support/wallet/usage/utils'
|
|||
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt';
|
||||
import { getHandleId } from '@fastgpt/global/core/workflow/utils';
|
||||
import { loadRequestMessages } from '../../../chat/utils';
|
||||
|
||||
type Props = ModuleDispatchProps<{
|
||||
[NodeInputKeyEnum.aiModel]: string;
|
||||
|
|
@ -113,6 +114,10 @@ const completions = async ({
|
|||
]
|
||||
}
|
||||
];
|
||||
const requestMessages = await loadRequestMessages({
|
||||
messages: chats2GPTMessages({ messages, reserveId: false }),
|
||||
useVision: false
|
||||
});
|
||||
|
||||
const ai = getAIApi({
|
||||
userKey: user.openaiAccount,
|
||||
|
|
@ -122,7 +127,7 @@ const completions = async ({
|
|||
const data = await ai.chat.completions.create({
|
||||
model: cqModel.model,
|
||||
temperature: 0.01,
|
||||
messages: chats2GPTMessages({ messages, reserveId: false }),
|
||||
messages: requestMessages,
|
||||
stream: false
|
||||
});
|
||||
const answer = data.choices?.[0].message?.content || '';
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
import { chats2GPTMessages } from '@fastgpt/global/core/chat/adapt';
|
||||
import { filterGPTMessageByMaxTokens } from '../../../chat/utils';
|
||||
import { filterGPTMessageByMaxTokens, loadRequestMessages } from '../../../chat/utils';
|
||||
import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
|
||||
import {
|
||||
countMessagesTokens,
|
||||
|
|
@ -173,6 +173,10 @@ ${description ? `- ${description}` : ''}
|
|||
messages: adaptMessages,
|
||||
maxTokens: extractModel.maxContext
|
||||
});
|
||||
const requestMessages = await loadRequestMessages({
|
||||
messages: filterMessages,
|
||||
useVision: false
|
||||
});
|
||||
|
||||
const properties: Record<
|
||||
string,
|
||||
|
|
@ -200,7 +204,7 @@ ${description ? `- ${description}` : ''}
|
|||
};
|
||||
|
||||
return {
|
||||
filterMessages,
|
||||
filterMessages: requestMessages,
|
||||
agentFunction
|
||||
};
|
||||
};
|
||||
|
|
@ -338,6 +342,10 @@ Human: ${content}`
|
|||
]
|
||||
}
|
||||
];
|
||||
const requestMessages = await loadRequestMessages({
|
||||
messages: chats2GPTMessages({ messages, reserveId: false }),
|
||||
useVision: false
|
||||
});
|
||||
|
||||
const ai = getAIApi({
|
||||
userKey: user.openaiAccount,
|
||||
|
|
@ -346,7 +354,7 @@ Human: ${content}`
|
|||
const data = await ai.chat.completions.create({
|
||||
model: extractModel.model,
|
||||
temperature: 0.01,
|
||||
messages: chats2GPTMessages({ messages, reserveId: false }),
|
||||
messages: requestMessages,
|
||||
stream: false
|
||||
});
|
||||
const answer = data.choices?.[0].message?.content || '';
|
||||
|
|
|
|||
|
|
@ -1,3 +1,5 @@
|
|||
import { replaceVariable } from '@fastgpt/global/common/string/tools';
|
||||
|
||||
export const Prompt_Tool_Call = `<Instruction>
|
||||
你是一个智能机器人,除了可以回答用户问题外,你还掌握工具的使用能力。有时候,你可以依赖工具的运行结果,来更准确的回答用户。
|
||||
|
||||
|
|
@ -32,6 +34,8 @@ TOOL_RESPONSE: """
|
|||
ANSWER: 0: 今天杭州是晴天,适合去西湖、灵隐寺、千岛湖等地玩。
|
||||
</Instruction>
|
||||
|
||||
------
|
||||
|
||||
现在,我们开始吧!下面是你本次可以使用的工具:
|
||||
|
||||
"""
|
||||
|
|
@ -42,3 +46,16 @@ ANSWER: 0: 今天杭州是晴天,适合去西湖、灵隐寺、千岛湖等地
|
|||
|
||||
USER: {{question}}
|
||||
ANSWER: `;
|
||||
|
||||
export const getMultiplePrompt = (obj: {
|
||||
fileCount: number;
|
||||
imgCount: number;
|
||||
question: string;
|
||||
}) => {
|
||||
const prompt = `Number of session file inputs:
|
||||
Document:{{fileCount}}
|
||||
Image:{{imgCount}}
|
||||
------
|
||||
{{question}}`;
|
||||
return replaceVariable(prompt, obj);
|
||||
};
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ import {
|
|||
ChatCompletionMessageFunctionCall,
|
||||
ChatCompletionFunctionMessageParam,
|
||||
ChatCompletionAssistantMessageParam
|
||||
} from '@fastgpt/global/core/ai/type';
|
||||
} from '@fastgpt/global/core/ai/type.d';
|
||||
import { NextApiResponse } from 'next';
|
||||
import {
|
||||
responseWrite,
|
||||
|
|
@ -24,10 +24,11 @@ import { DispatchToolModuleProps, RunToolResponse, ToolNodeItemType } from './ty
|
|||
import json5 from 'json5';
|
||||
import { DispatchFlowResponse } from '../../type';
|
||||
import { countGptMessagesTokens } from '../../../../../common/string/tiktoken/index';
|
||||
import { getNanoid } from '@fastgpt/global/common/string/tools';
|
||||
import { getNanoid, sliceStrStartEnd } from '@fastgpt/global/common/string/tools';
|
||||
import { AIChatItemType } from '@fastgpt/global/core/chat/type';
|
||||
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
|
||||
import { updateToolInputValue } from './utils';
|
||||
import { computedMaxToken, computedTemperature } from '../../../../ai/utils';
|
||||
|
||||
type FunctionRunResponseType = {
|
||||
toolRunResponse: DispatchFlowResponse;
|
||||
|
|
@ -42,7 +43,18 @@ export const runToolWithFunctionCall = async (
|
|||
},
|
||||
response?: RunToolResponse
|
||||
): Promise<RunToolResponse> => {
|
||||
const { toolModel, toolNodes, messages, res, runtimeNodes, detail = false, node, stream } = props;
|
||||
const {
|
||||
toolModel,
|
||||
toolNodes,
|
||||
messages,
|
||||
res,
|
||||
requestOrigin,
|
||||
runtimeNodes,
|
||||
detail = false,
|
||||
node,
|
||||
stream,
|
||||
params: { temperature = 0, maxToken = 4000, aiChatVision }
|
||||
} = props;
|
||||
const assistantResponses = response?.assistantResponses || [];
|
||||
|
||||
const functions: ChatCompletionCreateParams.Function[] = toolNodes.map((item) => {
|
||||
|
|
@ -72,44 +84,60 @@ export const runToolWithFunctionCall = async (
|
|||
};
|
||||
});
|
||||
|
||||
const filterMessages = await filterGPTMessageByMaxTokens({
|
||||
messages,
|
||||
maxTokens: toolModel.maxContext - 500 // filter token. not response maxToken
|
||||
});
|
||||
const formativeMessages = filterMessages.map((item) => {
|
||||
const filterMessages = (
|
||||
await filterGPTMessageByMaxTokens({
|
||||
messages,
|
||||
maxTokens: toolModel.maxContext - 300 // filter token. not response maxToken
|
||||
})
|
||||
).map((item) => {
|
||||
if (item.role === ChatCompletionRequestMessageRoleEnum.Assistant && item.function_call) {
|
||||
return {
|
||||
...item,
|
||||
function_call: {
|
||||
name: item.function_call?.name,
|
||||
arguments: item.function_call?.arguments
|
||||
}
|
||||
},
|
||||
content: ''
|
||||
};
|
||||
}
|
||||
return item;
|
||||
});
|
||||
const requestMessages = await loadRequestMessages(formativeMessages);
|
||||
const [requestMessages, max_tokens] = await Promise.all([
|
||||
loadRequestMessages({
|
||||
messages: filterMessages,
|
||||
useVision: toolModel.vision && aiChatVision,
|
||||
origin: requestOrigin
|
||||
}),
|
||||
computedMaxToken({
|
||||
model: toolModel,
|
||||
maxToken,
|
||||
filterMessages
|
||||
})
|
||||
]);
|
||||
const requestBody: any = {
|
||||
...toolModel?.defaultConfig,
|
||||
model: toolModel.model,
|
||||
temperature: computedTemperature({
|
||||
model: toolModel,
|
||||
temperature
|
||||
}),
|
||||
max_tokens,
|
||||
stream,
|
||||
messages: requestMessages,
|
||||
functions,
|
||||
function_call: 'auto'
|
||||
};
|
||||
|
||||
// console.log(JSON.stringify(requestBody, null, 2));
|
||||
/* Run llm */
|
||||
const ai = getAIApi({
|
||||
timeout: 480000
|
||||
});
|
||||
const aiResponse = await ai.chat.completions.create(
|
||||
{
|
||||
...toolModel?.defaultConfig,
|
||||
model: toolModel.model,
|
||||
temperature: 0,
|
||||
stream,
|
||||
messages: requestMessages,
|
||||
functions,
|
||||
function_call: 'auto'
|
||||
},
|
||||
{
|
||||
headers: {
|
||||
Accept: 'application/json, text/plain, */*'
|
||||
}
|
||||
const aiResponse = await ai.chat.completions.create(requestBody, {
|
||||
headers: {
|
||||
Accept: 'application/json, text/plain, */*'
|
||||
}
|
||||
);
|
||||
});
|
||||
|
||||
const { answer, functionCalls } = await (async () => {
|
||||
if (res && stream) {
|
||||
|
|
@ -198,7 +226,7 @@ export const runToolWithFunctionCall = async (
|
|||
toolName: '',
|
||||
toolAvatar: '',
|
||||
params: '',
|
||||
response: stringToolResponse
|
||||
response: sliceStrStartEnd(stringToolResponse, 300, 300)
|
||||
}
|
||||
})
|
||||
});
|
||||
|
|
@ -222,7 +250,7 @@ export const runToolWithFunctionCall = async (
|
|||
function_call: functionCall
|
||||
};
|
||||
const concatToolMessages = [
|
||||
...filterMessages,
|
||||
...requestMessages,
|
||||
assistantToolMsgParams
|
||||
] as ChatCompletionMessageParam[];
|
||||
const tokens = await countGptMessagesTokens(concatToolMessages, undefined, functions);
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ import { ModelTypeEnum, getLLMModel } from '../../../../ai/model';
|
|||
import { filterToolNodeIdByEdges, getHistories } from '../../utils';
|
||||
import { runToolWithToolChoice } from './toolChoice';
|
||||
import { DispatchToolModuleProps, ToolNodeItemType } from './type.d';
|
||||
import { ChatItemType } from '@fastgpt/global/core/chat/type';
|
||||
import { ChatItemType, UserChatItemValueItemType } from '@fastgpt/global/core/chat/type';
|
||||
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
|
||||
import {
|
||||
GPTMessages2Chats,
|
||||
|
|
@ -22,12 +22,46 @@ import { getHistoryPreview } from '@fastgpt/global/core/chat/utils';
|
|||
import { runToolWithFunctionCall } from './functionCall';
|
||||
import { runToolWithPromptCall } from './promptCall';
|
||||
import { replaceVariable } from '@fastgpt/global/common/string/tools';
|
||||
import { Prompt_Tool_Call } from './constants';
|
||||
import { getMultiplePrompt, Prompt_Tool_Call } from './constants';
|
||||
import { filterToolResponseToPreview } from './utils';
|
||||
|
||||
type Response = DispatchNodeResultType<{
|
||||
[NodeOutputKeyEnum.answerText]: string;
|
||||
}>;
|
||||
|
||||
/*
|
||||
Tool call, auth add file prompt to question。
|
||||
Guide the LLM to call tool.
|
||||
*/
|
||||
export const toolCallMessagesAdapt = ({
|
||||
userInput
|
||||
}: {
|
||||
userInput: UserChatItemValueItemType[];
|
||||
}) => {
|
||||
const files = userInput.filter((item) => item.type === 'file');
|
||||
|
||||
if (files.length > 0) {
|
||||
return userInput.map((item) => {
|
||||
if (item.type === 'text') {
|
||||
const filesCount = files.filter((file) => file.file?.type === 'file').length;
|
||||
const imgCount = files.filter((file) => file.file?.type === 'image').length;
|
||||
const text = item.text?.content || '';
|
||||
|
||||
return {
|
||||
...item,
|
||||
text: {
|
||||
content: getMultiplePrompt({ fileCount: filesCount, imgCount, question: text })
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
return item;
|
||||
});
|
||||
}
|
||||
|
||||
return userInput;
|
||||
};
|
||||
|
||||
export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<Response> => {
|
||||
const {
|
||||
node: { nodeId, name },
|
||||
|
|
@ -62,16 +96,31 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
|
|||
|
||||
const messages: ChatItemType[] = [
|
||||
...getSystemPrompt(systemPrompt),
|
||||
...chatHistories,
|
||||
// Add file input prompt to histories
|
||||
...chatHistories.map((item) => {
|
||||
if (item.obj === ChatRoleEnum.Human) {
|
||||
return {
|
||||
...item,
|
||||
value: toolCallMessagesAdapt({
|
||||
userInput: item.value
|
||||
})
|
||||
};
|
||||
}
|
||||
return item;
|
||||
}),
|
||||
{
|
||||
obj: ChatRoleEnum.Human,
|
||||
value: runtimePrompt2ChatsValue({
|
||||
text: userChatInput,
|
||||
files: chatValue2RuntimePrompt(query).files
|
||||
value: toolCallMessagesAdapt({
|
||||
userInput: runtimePrompt2ChatsValue({
|
||||
text: userChatInput,
|
||||
files: chatValue2RuntimePrompt(query).files
|
||||
})
|
||||
})
|
||||
}
|
||||
];
|
||||
|
||||
// console.log(JSON.stringify(messages, null, 2));
|
||||
|
||||
const {
|
||||
dispatchFlowResponse, // tool flow response
|
||||
totalTokens,
|
||||
|
|
@ -98,14 +147,24 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
|
|||
}
|
||||
|
||||
const lastMessage = adaptMessages[adaptMessages.length - 1];
|
||||
if (typeof lastMessage.content !== 'string') {
|
||||
return Promise.reject('暂时只支持纯文本');
|
||||
if (typeof lastMessage.content === 'string') {
|
||||
lastMessage.content = replaceVariable(Prompt_Tool_Call, {
|
||||
question: lastMessage.content
|
||||
});
|
||||
} else if (Array.isArray(lastMessage.content)) {
|
||||
// array, replace last element
|
||||
const lastText = lastMessage.content[lastMessage.content.length - 1];
|
||||
if (lastText.type === 'text') {
|
||||
lastMessage.content = replaceVariable(Prompt_Tool_Call, {
|
||||
question: lastText.text
|
||||
});
|
||||
} else {
|
||||
return Promise.reject('Prompt call invalid input');
|
||||
}
|
||||
} else {
|
||||
return Promise.reject('Prompt call invalid input');
|
||||
}
|
||||
|
||||
lastMessage.content = replaceVariable(Prompt_Tool_Call, {
|
||||
question: userChatInput
|
||||
});
|
||||
|
||||
return runToolWithPromptCall({
|
||||
...props,
|
||||
toolNodes,
|
||||
|
|
@ -132,12 +191,14 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
|
|||
}, 0);
|
||||
const flatUsages = dispatchFlowResponse.map((item) => item.flowUsages).flat();
|
||||
|
||||
const previewAssistantResponses = filterToolResponseToPreview(assistantResponses);
|
||||
|
||||
return {
|
||||
[NodeOutputKeyEnum.answerText]: assistantResponses
|
||||
[NodeOutputKeyEnum.answerText]: previewAssistantResponses
|
||||
.filter((item) => item.text?.content)
|
||||
.map((item) => item.text?.content || '')
|
||||
.join(''),
|
||||
[DispatchNodeResponseKeyEnum.assistantResponses]: assistantResponses,
|
||||
[DispatchNodeResponseKeyEnum.assistantResponses]: previewAssistantResponses,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
totalPoints: totalPointsUsage,
|
||||
toolCallTokens: totalTokens,
|
||||
|
|
|
|||
|
|
@ -20,10 +20,16 @@ import { dispatchWorkFlow } from '../../index';
|
|||
import { DispatchToolModuleProps, RunToolResponse, ToolNodeItemType } from './type.d';
|
||||
import json5 from 'json5';
|
||||
import { countGptMessagesTokens } from '../../../../../common/string/tiktoken/index';
|
||||
import { getNanoid, replaceVariable, sliceJsonStr } from '@fastgpt/global/common/string/tools';
|
||||
import {
|
||||
getNanoid,
|
||||
replaceVariable,
|
||||
sliceJsonStr,
|
||||
sliceStrStartEnd
|
||||
} from '@fastgpt/global/common/string/tools';
|
||||
import { AIChatItemType } from '@fastgpt/global/core/chat/type';
|
||||
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
|
||||
import { updateToolInputValue } from './utils';
|
||||
import { computedMaxToken, computedTemperature } from '../../../../ai/utils';
|
||||
|
||||
type FunctionCallCompletion = {
|
||||
id: string;
|
||||
|
|
@ -43,7 +49,18 @@ export const runToolWithPromptCall = async (
|
|||
},
|
||||
response?: RunToolResponse
|
||||
): Promise<RunToolResponse> => {
|
||||
const { toolModel, toolNodes, messages, res, runtimeNodes, detail = false, node, stream } = props;
|
||||
const {
|
||||
toolModel,
|
||||
toolNodes,
|
||||
messages,
|
||||
res,
|
||||
requestOrigin,
|
||||
runtimeNodes,
|
||||
detail = false,
|
||||
node,
|
||||
stream,
|
||||
params: { temperature = 0, maxToken = 4000, aiChatVision }
|
||||
} = props;
|
||||
const assistantResponses = response?.assistantResponses || [];
|
||||
|
||||
const toolsPrompt = JSON.stringify(
|
||||
|
|
@ -77,7 +94,7 @@ export const runToolWithPromptCall = async (
|
|||
|
||||
const lastMessage = messages[messages.length - 1];
|
||||
if (typeof lastMessage.content !== 'string') {
|
||||
return Promise.reject('暂时只支持纯文本');
|
||||
return Promise.reject('Prompt call invalid input');
|
||||
}
|
||||
lastMessage.content = replaceVariable(lastMessage.content, {
|
||||
toolsPrompt
|
||||
|
|
@ -87,27 +104,40 @@ export const runToolWithPromptCall = async (
|
|||
messages,
|
||||
maxTokens: toolModel.maxContext - 500 // filter token. not response maxToken
|
||||
});
|
||||
const requestMessages = await loadRequestMessages(filterMessages);
|
||||
const [requestMessages, max_tokens] = await Promise.all([
|
||||
loadRequestMessages({
|
||||
messages: filterMessages,
|
||||
useVision: toolModel.vision && aiChatVision,
|
||||
origin: requestOrigin
|
||||
}),
|
||||
computedMaxToken({
|
||||
model: toolModel,
|
||||
maxToken,
|
||||
filterMessages
|
||||
})
|
||||
]);
|
||||
const requestBody = {
|
||||
...toolModel?.defaultConfig,
|
||||
model: toolModel.model,
|
||||
temperature: computedTemperature({
|
||||
model: toolModel,
|
||||
temperature
|
||||
}),
|
||||
max_tokens,
|
||||
stream,
|
||||
messages: requestMessages
|
||||
};
|
||||
|
||||
// console.log(JSON.stringify(filterMessages, null, 2));
|
||||
// console.log(JSON.stringify(requestBody, null, 2));
|
||||
/* Run llm */
|
||||
const ai = getAIApi({
|
||||
timeout: 480000
|
||||
});
|
||||
const aiResponse = await ai.chat.completions.create(
|
||||
{
|
||||
...toolModel?.defaultConfig,
|
||||
model: toolModel.model,
|
||||
temperature: 0,
|
||||
stream,
|
||||
messages: requestMessages
|
||||
},
|
||||
{
|
||||
headers: {
|
||||
Accept: 'application/json, text/plain, */*'
|
||||
}
|
||||
const aiResponse = await ai.chat.completions.create(requestBody, {
|
||||
headers: {
|
||||
Accept: 'application/json, text/plain, */*'
|
||||
}
|
||||
);
|
||||
});
|
||||
|
||||
const answer = await (async () => {
|
||||
if (res && stream) {
|
||||
|
|
@ -225,7 +255,7 @@ export const runToolWithPromptCall = async (
|
|||
toolName: '',
|
||||
toolAvatar: '',
|
||||
params: '',
|
||||
response: stringToolResponse
|
||||
response: sliceStrStartEnd(stringToolResponse, 300, 300)
|
||||
}
|
||||
})
|
||||
});
|
||||
|
|
@ -250,7 +280,7 @@ export const runToolWithPromptCall = async (
|
|||
function_call: toolJson
|
||||
};
|
||||
const concatToolMessages = [
|
||||
...filterMessages,
|
||||
...requestMessages,
|
||||
assistantToolMsgParams
|
||||
] as ChatCompletionMessageParam[];
|
||||
const tokens = await countGptMessagesTokens(concatToolMessages, undefined);
|
||||
|
|
|
|||
|
|
@ -28,6 +28,8 @@ import { countGptMessagesTokens } from '../../../../../common/string/tiktoken/in
|
|||
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
|
||||
import { AIChatItemType } from '@fastgpt/global/core/chat/type';
|
||||
import { updateToolInputValue } from './utils';
|
||||
import { computedMaxToken, computedTemperature } from '../../../../ai/utils';
|
||||
import { sliceStrStartEnd } from '@fastgpt/global/common/string/tools';
|
||||
|
||||
type ToolRunResponseType = {
|
||||
toolRunResponse: DispatchFlowResponse;
|
||||
|
|
@ -49,7 +51,18 @@ export const runToolWithToolChoice = async (
|
|||
},
|
||||
response?: RunToolResponse
|
||||
): Promise<RunToolResponse> => {
|
||||
const { toolModel, toolNodes, messages, res, runtimeNodes, detail = false, node, stream } = props;
|
||||
const {
|
||||
toolModel,
|
||||
toolNodes,
|
||||
messages,
|
||||
res,
|
||||
requestOrigin,
|
||||
runtimeNodes,
|
||||
detail = false,
|
||||
node,
|
||||
stream,
|
||||
params: { temperature = 0, maxToken = 4000, aiChatVision }
|
||||
} = props;
|
||||
const assistantResponses = response?.assistantResponses || [];
|
||||
|
||||
const tools: ChatCompletionTool[] = toolNodes.map((item) => {
|
||||
|
|
@ -81,12 +94,13 @@ export const runToolWithToolChoice = async (
|
|||
}
|
||||
};
|
||||
});
|
||||
|
||||
const filterMessages = await filterGPTMessageByMaxTokens({
|
||||
messages,
|
||||
maxTokens: toolModel.maxContext - 300 // filter token. not response maxToken
|
||||
});
|
||||
const formativeMessages = filterMessages.map((item) => {
|
||||
// Filter histories by maxToken
|
||||
const filterMessages = (
|
||||
await filterGPTMessageByMaxTokens({
|
||||
messages,
|
||||
maxTokens: toolModel.maxContext - 300 // filter token. not response maxToken
|
||||
})
|
||||
).map((item) => {
|
||||
if (item.role === 'assistant' && item.tool_calls) {
|
||||
return {
|
||||
...item,
|
||||
|
|
@ -99,43 +113,43 @@ export const runToolWithToolChoice = async (
|
|||
}
|
||||
return item;
|
||||
});
|
||||
const requestMessages = await loadRequestMessages(formativeMessages);
|
||||
|
||||
// console.log(
|
||||
// JSON.stringify(
|
||||
// {
|
||||
// ...toolModel?.defaultConfig,
|
||||
// model: toolModel.model,
|
||||
// temperature: 0,
|
||||
// stream,
|
||||
// messages: requestMessages,
|
||||
// tools,
|
||||
// tool_choice: 'auto'
|
||||
// },
|
||||
// null,
|
||||
// 2
|
||||
// )
|
||||
// );
|
||||
const [requestMessages, max_tokens] = await Promise.all([
|
||||
loadRequestMessages({
|
||||
messages: filterMessages,
|
||||
useVision: toolModel.vision && aiChatVision,
|
||||
origin: requestOrigin
|
||||
}),
|
||||
computedMaxToken({
|
||||
model: toolModel,
|
||||
maxToken,
|
||||
filterMessages
|
||||
})
|
||||
]);
|
||||
const requestBody: any = {
|
||||
...toolModel?.defaultConfig,
|
||||
model: toolModel.model,
|
||||
temperature: computedTemperature({
|
||||
model: toolModel,
|
||||
temperature
|
||||
}),
|
||||
max_tokens,
|
||||
stream,
|
||||
messages: requestMessages,
|
||||
tools,
|
||||
tool_choice: 'auto'
|
||||
};
|
||||
|
||||
// console.log(JSON.stringify(requestBody, null, 2));
|
||||
/* Run llm */
|
||||
const ai = getAIApi({
|
||||
timeout: 480000
|
||||
});
|
||||
const aiResponse = await ai.chat.completions.create(
|
||||
{
|
||||
...toolModel?.defaultConfig,
|
||||
model: toolModel.model,
|
||||
temperature: 0,
|
||||
stream,
|
||||
messages: requestMessages,
|
||||
tools,
|
||||
tool_choice: 'auto'
|
||||
},
|
||||
{
|
||||
headers: {
|
||||
Accept: 'application/json, text/plain, */*'
|
||||
}
|
||||
const aiResponse = await ai.chat.completions.create(requestBody, {
|
||||
headers: {
|
||||
Accept: 'application/json, text/plain, */*'
|
||||
}
|
||||
);
|
||||
});
|
||||
|
||||
const { answer, toolCalls } = await (async () => {
|
||||
if (res && stream) {
|
||||
|
|
@ -221,7 +235,7 @@ export const runToolWithToolChoice = async (
|
|||
toolName: '',
|
||||
toolAvatar: '',
|
||||
params: '',
|
||||
response: stringToolResponse
|
||||
response: sliceStrStartEnd(stringToolResponse, 300, 300)
|
||||
}
|
||||
})
|
||||
});
|
||||
|
|
@ -243,7 +257,7 @@ export const runToolWithToolChoice = async (
|
|||
tool_calls: toolCalls
|
||||
};
|
||||
const concatToolMessages = [
|
||||
...filterMessages,
|
||||
...requestMessages,
|
||||
assistantToolMsgParams
|
||||
] as ChatCompletionMessageParam[];
|
||||
const tokens = await countGptMessagesTokens(concatToolMessages, tools);
|
||||
|
|
|
|||
|
|
@ -11,9 +11,13 @@ import { AIChatItemValueItemType, ChatItemValueItemType } from '@fastgpt/global/
|
|||
|
||||
export type DispatchToolModuleProps = ModuleDispatchProps<{
|
||||
[NodeInputKeyEnum.history]?: ChatItemType[];
|
||||
[NodeInputKeyEnum.userChatInput]: string;
|
||||
|
||||
[NodeInputKeyEnum.aiModel]: string;
|
||||
[NodeInputKeyEnum.aiSystemPrompt]: string;
|
||||
[NodeInputKeyEnum.userChatInput]: string;
|
||||
[NodeInputKeyEnum.aiChatTemperature]: number;
|
||||
[NodeInputKeyEnum.aiChatMaxToken]: number;
|
||||
[NodeInputKeyEnum.aiChatVision]?: boolean;
|
||||
}>;
|
||||
|
||||
export type RunToolResponse = {
|
||||
|
|
|
|||
|
|
@ -1,3 +1,6 @@
|
|||
import { sliceStrStartEnd } from '@fastgpt/global/common/string/tools';
|
||||
import { ChatItemValueTypeEnum } from '@fastgpt/global/core/chat/constants';
|
||||
import { AIChatItemValueItemType } from '@fastgpt/global/core/chat/type';
|
||||
import { FlowNodeInputItemType } from '@fastgpt/global/core/workflow/type/io';
|
||||
|
||||
export const updateToolInputValue = ({
|
||||
|
|
@ -12,3 +15,22 @@ export const updateToolInputValue = ({
|
|||
value: params[input.key] ?? input.value
|
||||
}));
|
||||
};
|
||||
|
||||
export const filterToolResponseToPreview = (response: AIChatItemValueItemType[]) => {
|
||||
return response.map((item) => {
|
||||
if (item.type === ChatItemValueTypeEnum.tool) {
|
||||
const formatTools = item.tools?.map((tool) => {
|
||||
return {
|
||||
...tool,
|
||||
response: sliceStrStartEnd(tool.response, 500, 500)
|
||||
};
|
||||
});
|
||||
return {
|
||||
...item,
|
||||
tools: formatTools
|
||||
};
|
||||
}
|
||||
|
||||
return item;
|
||||
});
|
||||
};
|
||||
|
|
|
|||
|
|
@ -1,9 +1,5 @@
|
|||
import type { NextApiResponse } from 'next';
|
||||
import {
|
||||
filterGPTMessageByMaxTokens,
|
||||
formatGPTMessagesInRequestBefore,
|
||||
loadRequestMessages
|
||||
} from '../../../chat/utils';
|
||||
import { filterGPTMessageByMaxTokens, loadRequestMessages } from '../../../chat/utils';
|
||||
import type { ChatItemType, UserChatItemValueItemType } from '@fastgpt/global/core/chat/type.d';
|
||||
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
|
||||
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
|
|
@ -19,10 +15,7 @@ import type { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
|
|||
import { postTextCensor } from '../../../../common/api/requestPlusApi';
|
||||
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
|
||||
import type { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import {
|
||||
countGptMessagesTokens,
|
||||
countMessagesTokens
|
||||
} from '../../../../common/string/tiktoken/index';
|
||||
import { countMessagesTokens } from '../../../../common/string/tiktoken/index';
|
||||
import {
|
||||
chats2GPTMessages,
|
||||
chatValue2RuntimePrompt,
|
||||
|
|
@ -31,6 +24,7 @@ import {
|
|||
runtimePrompt2ChatsValue
|
||||
} from '@fastgpt/global/core/chat/adapt';
|
||||
import {
|
||||
Prompt_DocumentQuote,
|
||||
Prompt_QuotePromptList,
|
||||
Prompt_QuoteTemplateList
|
||||
} from '@fastgpt/global/core/ai/prompt/AIChat';
|
||||
|
|
@ -46,6 +40,7 @@ import { getHistories } from '../utils';
|
|||
import { filterSearchResultsByMaxChars } from '../../utils';
|
||||
import { getHistoryPreview } from '@fastgpt/global/core/chat/utils';
|
||||
import { addLog } from '../../../../common/system/log';
|
||||
import { computedMaxToken, computedTemperature } from '../../../ai/utils';
|
||||
|
||||
export type ChatProps = ModuleDispatchProps<
|
||||
AIChatNodeProps & {
|
||||
|
|
@ -63,6 +58,7 @@ export type ChatResponse = DispatchNodeResultType<{
|
|||
export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResponse> => {
|
||||
let {
|
||||
res,
|
||||
requestOrigin,
|
||||
stream = false,
|
||||
detail = false,
|
||||
user,
|
||||
|
|
@ -79,7 +75,9 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
|||
isResponseAnswerText = true,
|
||||
systemPrompt = '',
|
||||
quoteTemplate,
|
||||
quotePrompt
|
||||
quotePrompt,
|
||||
aiChatVision,
|
||||
stringQuoteText
|
||||
}
|
||||
} = props;
|
||||
const { files: inputFiles } = chatValue2RuntimePrompt(query);
|
||||
|
|
@ -91,54 +89,43 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
|||
|
||||
const chatHistories = getHistories(history, histories);
|
||||
|
||||
// temperature adapt
|
||||
const modelConstantsData = getLLMModel(model);
|
||||
|
||||
if (!modelConstantsData) {
|
||||
return Promise.reject('The chat model is undefined, you need to select a chat model.');
|
||||
}
|
||||
|
||||
const { quoteText } = await filterQuote({
|
||||
const { datasetQuoteText } = await filterDatasetQuote({
|
||||
quoteQA,
|
||||
model: modelConstantsData,
|
||||
quoteTemplate
|
||||
});
|
||||
|
||||
// censor model and system key
|
||||
if (modelConstantsData.censor && !user.openaiAccount?.key) {
|
||||
await postTextCensor({
|
||||
text: `${systemPrompt}
|
||||
${quoteText}
|
||||
${userChatInput}
|
||||
`
|
||||
});
|
||||
}
|
||||
|
||||
const { filterMessages } = await getChatMessages({
|
||||
model: modelConstantsData,
|
||||
histories: chatHistories,
|
||||
quoteQA,
|
||||
quoteText,
|
||||
quotePrompt,
|
||||
userChatInput,
|
||||
inputFiles,
|
||||
systemPrompt
|
||||
});
|
||||
|
||||
const { max_tokens } = await getMaxTokens({
|
||||
model: modelConstantsData,
|
||||
maxToken,
|
||||
filterMessages
|
||||
});
|
||||
|
||||
// FastGPT temperature range: 1~10
|
||||
temperature = +(modelConstantsData.maxTemperature * (temperature / 10)).toFixed(2);
|
||||
temperature = Math.max(temperature, 0.01);
|
||||
const ai = getAIApi({
|
||||
userKey: user.openaiAccount,
|
||||
timeout: 480000
|
||||
});
|
||||
const [{ filterMessages }] = await Promise.all([
|
||||
getChatMessages({
|
||||
model: modelConstantsData,
|
||||
histories: chatHistories,
|
||||
useDatasetQuote: quoteQA !== undefined,
|
||||
datasetQuoteText,
|
||||
datasetQuotePrompt: quotePrompt,
|
||||
userChatInput,
|
||||
inputFiles,
|
||||
systemPrompt,
|
||||
stringQuoteText
|
||||
}),
|
||||
async () => {
|
||||
// censor model and system key
|
||||
if (modelConstantsData.censor && !user.openaiAccount?.key) {
|
||||
await postTextCensor({
|
||||
text: `${systemPrompt}
|
||||
${datasetQuoteText}
|
||||
${userChatInput}
|
||||
`
|
||||
});
|
||||
}
|
||||
}
|
||||
]);
|
||||
|
||||
// Get the request messages
|
||||
const concatMessages = [
|
||||
...(modelConstantsData.defaultSystemChatPrompt
|
||||
? [
|
||||
|
|
@ -148,20 +135,39 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
|||
}
|
||||
]
|
||||
: []),
|
||||
...formatGPTMessagesInRequestBefore(filterMessages)
|
||||
...filterMessages
|
||||
] as ChatCompletionMessageParam[];
|
||||
|
||||
const requestMessages = await loadRequestMessages(concatMessages);
|
||||
const [requestMessages, max_tokens] = await Promise.all([
|
||||
loadRequestMessages({
|
||||
messages: concatMessages,
|
||||
useVision: modelConstantsData.vision && aiChatVision,
|
||||
origin: requestOrigin
|
||||
}),
|
||||
computedMaxToken({
|
||||
model: modelConstantsData,
|
||||
maxToken,
|
||||
filterMessages
|
||||
})
|
||||
]);
|
||||
|
||||
const requestBody = {
|
||||
...modelConstantsData?.defaultConfig,
|
||||
model: modelConstantsData.model,
|
||||
temperature,
|
||||
temperature: computedTemperature({
|
||||
model: modelConstantsData,
|
||||
temperature
|
||||
}),
|
||||
max_tokens,
|
||||
stream,
|
||||
messages: requestMessages
|
||||
};
|
||||
// console.log(JSON.stringify(requestBody, null, 2), '===');
|
||||
try {
|
||||
const ai = getAIApi({
|
||||
userKey: user.openaiAccount,
|
||||
timeout: 480000
|
||||
});
|
||||
const response = await ai.chat.completions.create(requestBody, {
|
||||
headers: {
|
||||
Accept: 'application/json, text/plain, */*'
|
||||
|
|
@ -194,7 +200,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
|||
}
|
||||
})();
|
||||
|
||||
const completeMessages = filterMessages.concat({
|
||||
const completeMessages = requestMessages.concat({
|
||||
role: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
content: answerText
|
||||
});
|
||||
|
|
@ -243,7 +249,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
|||
}
|
||||
};
|
||||
|
||||
async function filterQuote({
|
||||
async function filterDatasetQuote({
|
||||
quoteQA = [],
|
||||
model,
|
||||
quoteTemplate
|
||||
|
|
@ -265,44 +271,52 @@ async function filterQuote({
|
|||
// slice filterSearch
|
||||
const filterQuoteQA = await filterSearchResultsByMaxChars(quoteQA, model.quoteMaxToken);
|
||||
|
||||
const quoteText =
|
||||
const datasetQuoteText =
|
||||
filterQuoteQA.length > 0
|
||||
? `${filterQuoteQA.map((item, index) => getValue(item, index).trim()).join('\n------\n')}`
|
||||
: '';
|
||||
|
||||
return {
|
||||
quoteText
|
||||
datasetQuoteText
|
||||
};
|
||||
}
|
||||
async function getChatMessages({
|
||||
quotePrompt,
|
||||
quoteText,
|
||||
quoteQA,
|
||||
datasetQuotePrompt,
|
||||
datasetQuoteText,
|
||||
useDatasetQuote,
|
||||
histories = [],
|
||||
systemPrompt,
|
||||
userChatInput,
|
||||
inputFiles,
|
||||
model
|
||||
model,
|
||||
stringQuoteText
|
||||
}: {
|
||||
quotePrompt?: string;
|
||||
quoteText: string;
|
||||
quoteQA: ChatProps['params']['quoteQA'];
|
||||
datasetQuotePrompt?: string;
|
||||
datasetQuoteText: string;
|
||||
useDatasetQuote: boolean;
|
||||
histories: ChatItemType[];
|
||||
systemPrompt: string;
|
||||
userChatInput: string;
|
||||
inputFiles: UserChatItemValueItemType['file'][];
|
||||
model: LLMModelItemType;
|
||||
stringQuoteText?: string;
|
||||
}) {
|
||||
const replaceInputValue =
|
||||
quoteQA !== undefined
|
||||
? replaceVariable(quotePrompt || Prompt_QuotePromptList[0].value, {
|
||||
quote: quoteText,
|
||||
question: userChatInput
|
||||
})
|
||||
: userChatInput;
|
||||
const replaceInputValue = useDatasetQuote
|
||||
? replaceVariable(datasetQuotePrompt || Prompt_QuotePromptList[0].value, {
|
||||
quote: datasetQuoteText,
|
||||
question: userChatInput
|
||||
})
|
||||
: userChatInput;
|
||||
|
||||
const messages: ChatItemType[] = [
|
||||
...getSystemPrompt(systemPrompt),
|
||||
...(stringQuoteText
|
||||
? getSystemPrompt(
|
||||
replaceVariable(Prompt_DocumentQuote, {
|
||||
quote: stringQuoteText
|
||||
})
|
||||
)
|
||||
: []),
|
||||
...histories,
|
||||
{
|
||||
obj: ChatRoleEnum.Human,
|
||||
|
|
@ -323,29 +337,6 @@ async function getChatMessages({
|
|||
filterMessages
|
||||
};
|
||||
}
|
||||
async function getMaxTokens({
|
||||
maxToken,
|
||||
model,
|
||||
filterMessages = []
|
||||
}: {
|
||||
maxToken: number;
|
||||
model: LLMModelItemType;
|
||||
filterMessages: ChatCompletionMessageParam[];
|
||||
}) {
|
||||
maxToken = Math.min(maxToken, model.maxResponse);
|
||||
const tokensLimit = model.maxContext;
|
||||
|
||||
/* count response max token */
|
||||
const promptsToken = await countGptMessagesTokens(filterMessages);
|
||||
maxToken = promptsToken + maxToken > tokensLimit ? tokensLimit - promptsToken : maxToken;
|
||||
|
||||
if (maxToken <= 0) {
|
||||
maxToken = 200;
|
||||
}
|
||||
return {
|
||||
max_tokens: maxToken
|
||||
};
|
||||
}
|
||||
|
||||
async function streamResponse({
|
||||
res,
|
||||
|
|
|
|||
|
|
@ -55,6 +55,7 @@ import { surrenderProcess } from '../../../common/system/tools';
|
|||
import { dispatchRunCode } from './code/run';
|
||||
import { dispatchTextEditor } from './tools/textEditor';
|
||||
import { dispatchCustomFeedback } from './tools/customFeedback';
|
||||
import { dispatchReadFiles } from './tools/readFiles';
|
||||
|
||||
const callbackMap: Record<FlowNodeTypeEnum, Function> = {
|
||||
[FlowNodeTypeEnum.workflowStart]: dispatchWorkflowStart,
|
||||
|
|
@ -78,6 +79,7 @@ const callbackMap: Record<FlowNodeTypeEnum, Function> = {
|
|||
[FlowNodeTypeEnum.code]: dispatchRunCode,
|
||||
[FlowNodeTypeEnum.textEditor]: dispatchTextEditor,
|
||||
[FlowNodeTypeEnum.customFeedback]: dispatchCustomFeedback,
|
||||
[FlowNodeTypeEnum.readFiles]: dispatchReadFiles,
|
||||
|
||||
// none
|
||||
[FlowNodeTypeEnum.systemConfig]: dispatchSystemConfig,
|
||||
|
|
|
|||
|
|
@ -1,13 +1,16 @@
|
|||
import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt';
|
||||
import { UserChatItemValueItemType } from '@fastgpt/global/core/chat/type';
|
||||
import { NodeInputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
|
||||
export type UserChatInputProps = ModuleDispatchProps<{
|
||||
[NodeInputKeyEnum.userChatInput]: string;
|
||||
[NodeInputKeyEnum.inputFiles]: UserChatItemValueItemType['file'][];
|
||||
}>;
|
||||
type Response = {
|
||||
[NodeOutputKeyEnum.userChatInput]: string;
|
||||
[NodeOutputKeyEnum.userFiles]: string[];
|
||||
};
|
||||
|
||||
export const dispatchWorkflowStart = (props: Record<string, any>) => {
|
||||
export const dispatchWorkflowStart = (props: Record<string, any>): Response => {
|
||||
const {
|
||||
query,
|
||||
params: { userChatInput }
|
||||
|
|
@ -17,6 +20,11 @@ export const dispatchWorkflowStart = (props: Record<string, any>) => {
|
|||
|
||||
return {
|
||||
[NodeInputKeyEnum.userChatInput]: text || userChatInput,
|
||||
[NodeInputKeyEnum.inputFiles]: files
|
||||
[NodeOutputKeyEnum.userFiles]: files
|
||||
.map((item) => {
|
||||
return item?.url ?? '';
|
||||
})
|
||||
.filter(Boolean)
|
||||
// [NodeInputKeyEnum.inputFiles]: files
|
||||
};
|
||||
};
|
||||
|
|
|
|||
|
|
@ -0,0 +1,196 @@
|
|||
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { documentFileType } from '@fastgpt/global/common/file/constants';
|
||||
import axios from 'axios';
|
||||
import { serverRequestBaseUrl } from '../../../../common/api/serverRequest';
|
||||
import { MongoRawTextBuffer } from '../../../../common/buffer/rawText/schema';
|
||||
import { readFromSecondary } from '../../../../common/mongo/utils';
|
||||
import { getErrText } from '@fastgpt/global/common/error/utils';
|
||||
import { detectFileEncoding } from '@fastgpt/global/common/file/tools';
|
||||
import { readRawContentByFileBuffer } from '../../../../common/file/read/utils';
|
||||
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
|
||||
import { UserChatItemValueItemType } from '@fastgpt/global/core/chat/type';
|
||||
|
||||
type Props = ModuleDispatchProps<{
|
||||
[NodeInputKeyEnum.fileUrlList]: string[];
|
||||
}>;
|
||||
type Response = DispatchNodeResultType<{
|
||||
[NodeOutputKeyEnum.text]: string;
|
||||
}>;
|
||||
|
||||
const formatResponseObject = ({
|
||||
filename,
|
||||
url,
|
||||
content
|
||||
}: {
|
||||
filename: string;
|
||||
url: string;
|
||||
content: string;
|
||||
}) => ({
|
||||
filename,
|
||||
url,
|
||||
text: `File: ${filename}
|
||||
<Content>
|
||||
${content}
|
||||
</Content>`,
|
||||
nodeResponsePreviewText: `File: ${filename}
|
||||
<Content>
|
||||
${content.slice(0, 100)}${content.length > 100 ? '......' : ''}
|
||||
</Content>`
|
||||
});
|
||||
|
||||
export const dispatchReadFiles = async (props: Props): Promise<Response> => {
|
||||
const {
|
||||
requestOrigin,
|
||||
teamId,
|
||||
histories,
|
||||
chatConfig,
|
||||
params: { fileUrlList = [] }
|
||||
} = props;
|
||||
const maxFiles = chatConfig?.fileSelectConfig?.maxFiles || 0;
|
||||
|
||||
// Get files from histories
|
||||
const filesFromHistories = histories
|
||||
.filter((item) => {
|
||||
if (item.obj === ChatRoleEnum.Human) {
|
||||
return item.value.filter((value) => value.type === 'file');
|
||||
}
|
||||
return false;
|
||||
})
|
||||
.map((item) => {
|
||||
const value = item.value as UserChatItemValueItemType[];
|
||||
const files = value
|
||||
.map((item) => {
|
||||
return item.file?.url;
|
||||
})
|
||||
.filter(Boolean) as string[];
|
||||
return files;
|
||||
})
|
||||
.flat();
|
||||
|
||||
const parseUrlList = [...fileUrlList, ...filesFromHistories].slice(0, maxFiles);
|
||||
|
||||
const readFilesResult = await Promise.all(
|
||||
parseUrlList
|
||||
.map(async (url) => {
|
||||
// System file
|
||||
if (url.startsWith('/') || (requestOrigin && url.startsWith(requestOrigin))) {
|
||||
// Parse url, get filename query. Keep only documents that can be parsed
|
||||
const parseUrl = new URL(url);
|
||||
const filenameQuery = parseUrl.searchParams.get('filename');
|
||||
if (filenameQuery) {
|
||||
const extensionQuery = filenameQuery.split('.').pop()?.toLowerCase() || '';
|
||||
if (!documentFileType.includes(extensionQuery)) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Remove the origin(Make intranet requests directly)
|
||||
if (requestOrigin && url.startsWith(requestOrigin)) {
|
||||
url = url.replace(requestOrigin, '');
|
||||
}
|
||||
}
|
||||
|
||||
// Get from buffer
|
||||
const fileBuffer = await MongoRawTextBuffer.findOne({ sourceId: url }, undefined, {
|
||||
...readFromSecondary
|
||||
}).lean();
|
||||
if (fileBuffer) {
|
||||
return formatResponseObject({
|
||||
filename: fileBuffer.metadata?.filename || url,
|
||||
url,
|
||||
content: fileBuffer.rawText
|
||||
});
|
||||
}
|
||||
|
||||
try {
|
||||
// Get file buffer
|
||||
const response = await axios.get(url, {
|
||||
baseURL: serverRequestBaseUrl,
|
||||
responseType: 'arraybuffer'
|
||||
});
|
||||
|
||||
const buffer = Buffer.from(response.data, 'binary');
|
||||
|
||||
// Get file name
|
||||
const filename = (() => {
|
||||
const contentDisposition = response.headers['content-disposition'];
|
||||
if (contentDisposition) {
|
||||
const filenameRegex = /filename[^;=\n]*=((['"]).*?\2|[^;\n]*)/;
|
||||
const matches = filenameRegex.exec(contentDisposition);
|
||||
if (matches != null && matches[1]) {
|
||||
return decodeURIComponent(matches[1].replace(/['"]/g, ''));
|
||||
}
|
||||
}
|
||||
|
||||
return url;
|
||||
})();
|
||||
// Extension
|
||||
const extension = filename.split('.').pop()?.toLowerCase() || '';
|
||||
// Get encoding
|
||||
const encoding = (() => {
|
||||
const contentType = response.headers['content-type'];
|
||||
if (contentType) {
|
||||
const charsetRegex = /charset=([^;]*)/;
|
||||
const matches = charsetRegex.exec(contentType);
|
||||
if (matches != null && matches[1]) {
|
||||
return matches[1];
|
||||
}
|
||||
}
|
||||
|
||||
return detectFileEncoding(buffer);
|
||||
})();
|
||||
|
||||
// Read file
|
||||
const { rawText } = await readRawContentByFileBuffer({
|
||||
extension,
|
||||
isQAImport: false,
|
||||
teamId,
|
||||
buffer,
|
||||
encoding
|
||||
});
|
||||
|
||||
// Add to buffer
|
||||
try {
|
||||
if (buffer.length < 14 * 1024 * 1024 && rawText.trim()) {
|
||||
MongoRawTextBuffer.create({
|
||||
sourceId: url,
|
||||
rawText,
|
||||
metadata: {
|
||||
filename: filename
|
||||
}
|
||||
});
|
||||
}
|
||||
} catch (error) {}
|
||||
|
||||
return formatResponseObject({ filename, url, content: rawText });
|
||||
} catch (error) {
|
||||
return formatResponseObject({
|
||||
filename: '',
|
||||
url,
|
||||
content: getErrText(error, 'Load file error')
|
||||
});
|
||||
}
|
||||
})
|
||||
.filter(Boolean)
|
||||
);
|
||||
const text = readFilesResult.map((item) => item?.text ?? '').join('\n******\n');
|
||||
|
||||
return {
|
||||
[NodeOutputKeyEnum.text]: text,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
readFiles: readFilesResult.map((item) => ({
|
||||
name: item?.filename || '',
|
||||
url: item?.url || ''
|
||||
})),
|
||||
readFilesResult: readFilesResult
|
||||
.map((item) => item?.nodeResponsePreviewText ?? '')
|
||||
.join('\n******\n')
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.toolResponses]: {
|
||||
fileContent: text
|
||||
}
|
||||
};
|
||||
};
|
||||
|
|
@ -1,10 +1,6 @@
|
|||
// @ts-nocheck
|
||||
import type { NextApiResponse } from 'next';
|
||||
import {
|
||||
filterGPTMessageByMaxTokens,
|
||||
formatGPTMessagesInRequestBefore,
|
||||
loadChatImgToBase64
|
||||
} from '../../../chat/utils';
|
||||
import { filterGPTMessageByMaxTokens, loadRequestMessages } from '../../../chat/utils';
|
||||
import type { ChatItemType, UserChatItemValueItemType } from '@fastgpt/global/core/chat/type.d';
|
||||
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
|
||||
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
|
|
@ -146,25 +142,17 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
|||
}
|
||||
]
|
||||
: []),
|
||||
...formatGPTMessagesInRequestBefore(filterMessages)
|
||||
...filterMessages
|
||||
] as ChatCompletionMessageParam[];
|
||||
|
||||
if (concatMessages.length === 0) {
|
||||
return Promise.reject('core.chat.error.Messages empty');
|
||||
}
|
||||
|
||||
const loadMessages = await Promise.all(
|
||||
concatMessages.map(async (item) => {
|
||||
if (item.role === ChatCompletionRequestMessageRoleEnum.User) {
|
||||
return {
|
||||
...item,
|
||||
content: await loadChatImgToBase64(item.content)
|
||||
};
|
||||
} else {
|
||||
return item;
|
||||
}
|
||||
})
|
||||
);
|
||||
const loadMessages = await loadRequestMessages({
|
||||
messages: concatMessages,
|
||||
useVision: false
|
||||
});
|
||||
|
||||
const response = await ai.chat.completions.create(
|
||||
{
|
||||
|
|
|
|||
|
|
@ -250,11 +250,13 @@ export const clearCookie = (res: NextApiResponse) => {
|
|||
};
|
||||
|
||||
/* file permission */
|
||||
export const createFileToken = (data: FileTokenQuery) => {
|
||||
export const createFileToken = ({
|
||||
expiredTime = Math.floor(Date.now() / 1000) + 60 * 30,
|
||||
...data
|
||||
}: FileTokenQuery) => {
|
||||
if (!process.env.FILE_TOKEN_KEY) {
|
||||
return Promise.reject('System unset FILE_TOKEN_KEY');
|
||||
}
|
||||
const expiredTime = Math.floor(Date.now() / 1000) + 60 * 30;
|
||||
|
||||
const key = process.env.FILE_TOKEN_KEY as string;
|
||||
const token = jwt.sign(
|
||||
|
|
|
|||
|
|
@ -80,6 +80,7 @@ export const iconPaths = {
|
|||
'core/app/simpleMode/ai': () => import('./icons/core/app/simpleMode/ai.svg'),
|
||||
'core/app/simpleMode/chat': () => import('./icons/core/app/simpleMode/chat.svg'),
|
||||
'core/app/simpleMode/dataset': () => import('./icons/core/app/simpleMode/dataset.svg'),
|
||||
'core/app/simpleMode/file': () => import('./icons/core/app/simpleMode/file.svg'),
|
||||
'core/app/simpleMode/template': () => import('./icons/core/app/simpleMode/template.svg'),
|
||||
'core/app/simpleMode/tts': () => import('./icons/core/app/simpleMode/tts.svg'),
|
||||
'core/app/simpleMode/variable': () => import('./icons/core/app/simpleMode/variable.svg'),
|
||||
|
|
|
|||
|
|
@ -0,0 +1,3 @@
|
|||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 20 21" fill="none">
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M12.3137 2.1798C12.6304 2.36279 12.9051 2.62076 13.4545 3.13669L15.8777 5.41226C16.4766 5.97467 16.7761 6.25591 16.9893 6.59022C17.1705 6.87433 17.3052 7.18555 17.3883 7.51211C17.486 7.89638 17.486 8.30718 17.486 9.12879V13.9567C17.486 15.76 17.486 16.6617 17.1295 17.3481C16.8291 17.9264 16.3575 18.398 15.7791 18.6985C15.0928 19.055 14.1911 19.055 12.3877 19.055H7.61249C5.80911 19.055 4.90741 19.055 4.22107 18.6985C3.6427 18.398 3.17112 17.9264 2.87068 17.3481C2.51416 16.6617 2.51416 15.76 2.51416 13.9567V6.85322C2.51416 5.04983 2.51416 4.14814 2.87068 3.4618C3.17112 2.88343 3.6427 2.41185 4.22107 2.11141C4.90741 1.75488 5.80911 1.75488 7.61249 1.75488H9.96446C10.7181 1.75488 11.095 1.75488 11.4511 1.83825C11.7538 1.90913 12.0445 2.02421 12.3137 2.1798ZM11.9441 4.28211C11.4498 3.80811 11.2026 3.57111 10.9904 3.56252C10.8219 3.55569 10.6593 3.62507 10.5476 3.75142C10.407 3.9105 10.407 4.25295 10.407 4.93784V5.71981C10.407 6.72166 10.407 7.22259 10.602 7.60524C10.7735 7.94183 11.0471 8.21549 11.3837 8.387C11.7664 8.58197 12.2673 8.58197 13.2691 8.58197H14.1686C14.8896 8.58197 15.2501 8.58197 15.4105 8.43647C15.5377 8.32102 15.605 8.15364 15.593 7.98227C15.578 7.76628 15.3178 7.51678 14.7974 7.0178L11.9441 4.28211ZM5.8374 11.8018C5.8374 11.3416 6.21049 10.9685 6.67073 10.9685H13.3331C13.7934 10.9685 14.1665 11.3416 14.1665 11.8018C14.1665 12.262 13.7934 12.6351 13.3331 12.6351H6.67073C6.21049 12.6351 5.8374 12.262 5.8374 11.8018ZM5.82763 14.9253C5.82763 14.4651 6.20073 14.092 6.66096 14.092H11.6623C12.1226 14.092 12.4956 14.4651 12.4956 14.9253C12.4956 15.3855 12.1225 15.7586 11.6623 15.7586H6.66096C6.20073 15.7586 5.82763 15.3855 5.82763 14.9253Z" fill="#00A9A6"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 1.8 KiB |
|
|
@ -1,10 +1,10 @@
|
|||
import React, { useMemo, useRef } from 'react';
|
||||
import MyMenu, { type Props as MyMenuProps } from '../../common/MyMenu';
|
||||
import MyMenu from '../../common/MyMenu';
|
||||
import {
|
||||
FlowNodeInputMap,
|
||||
FlowNodeInputTypeEnum
|
||||
} from '@fastgpt/global/core/workflow/node/constant';
|
||||
import { Box, Button, Flex, useTheme } from '@chakra-ui/react';
|
||||
import { Box, Button, useTheme } from '@chakra-ui/react';
|
||||
import MyIcon from '../../common/Icon';
|
||||
import { useTranslation } from 'next-i18next';
|
||||
import { useConfirm } from '../../../hooks/useConfirm';
|
||||
|
|
@ -142,11 +142,13 @@ const NodeInputSelect = ({
|
|||
|
||||
return (
|
||||
<MyMenu
|
||||
offset={[-0.5, -0.5]}
|
||||
offset={[-0.5, 0.5]}
|
||||
trigger="click"
|
||||
Button={
|
||||
<Button
|
||||
size={'xs'}
|
||||
leftIcon={<MyIcon name={renderTypeData.icon as any} w={'14px'} />}
|
||||
leftIcon={<MyIcon name={renderTypeData.icon as any} w={'0.8rem'} />}
|
||||
rightIcon={<MyIcon name={'common/select'} w={'0.8rem'} color={'myGray.500'} />}
|
||||
variant={'grayBase'}
|
||||
border={theme.borders.base}
|
||||
borderRadius={'xs'}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,22 @@
|
|||
import { useMemo } from 'react';
|
||||
|
||||
export const useWidthVariable = <T = any>({
|
||||
width,
|
||||
widthList = [900, 1200, 1500, 1800, 2100],
|
||||
list
|
||||
}: {
|
||||
width: number;
|
||||
widthList?: number[];
|
||||
list: T[];
|
||||
}) => {
|
||||
const value = useMemo(() => {
|
||||
// 根据 width 计算,找到第一个大于 width 的值
|
||||
const index = widthList.findLastIndex((item) => width > item);
|
||||
if (index === -1) {
|
||||
return list[0];
|
||||
}
|
||||
return list[index];
|
||||
}, [list, width, widthList]);
|
||||
|
||||
return value;
|
||||
};
|
||||
|
|
@ -12,6 +12,7 @@
|
|||
"chat_debug": "Chat Debug",
|
||||
"chat_logs": "Chat Logs",
|
||||
"chat_logs_tips": "Logs will record online, shared and API (chatId required) conversation records for this app",
|
||||
"config_file_upload": "Click to configure file upload rules",
|
||||
"confirm_copy_app_tip": "The system will create an application with the same configuration for you, but the permission will not be copied, please confirm!",
|
||||
"confirm_del_app_tip": "Confirm to delete this app and all its chat records?",
|
||||
"confirm_delete_folder_tip": "Are you sure to delete this folder? All the following applications and corresponding chat records will be deleted, please confirm!",
|
||||
|
|
@ -25,14 +26,21 @@
|
|||
},
|
||||
"current_settings": "Current settings",
|
||||
"day": "day",
|
||||
"document_quote": "Document quote",
|
||||
"document_quote_tip": "It is usually used to accept document content uploaded by users (which requires document parsing), and can also be used to reference other string data.",
|
||||
"document_upload": "Document upload",
|
||||
"edit_app": "Edit app",
|
||||
"edit_info": "Edit info",
|
||||
"execute_time": "execution time",
|
||||
"export_config_successful": "Config copied, please check for important data",
|
||||
"export_configs": "Export Configs",
|
||||
"feedback_count": "User Feedback",
|
||||
"file_upload": "file_upload",
|
||||
"file_upload_tip": "After it is enabled, you can upload documents/pictures. Documents are kept for 7 days and pictures for 15 days. Use of this feature may incur additional charges. To ensure the user experience, select an AI model with a large context length when using this function.",
|
||||
"go_to_chat": "To chat",
|
||||
"go_to_run": "Run",
|
||||
"image_upload": "Image upload",
|
||||
"image_upload_tip": "Be sure to select a visual model that can handle the picture",
|
||||
"import_configs": "Import Configs",
|
||||
"import_configs_failed": "Failed to import configs, please ensure configs are valid!",
|
||||
"interval": {
|
||||
|
|
@ -44,6 +52,9 @@
|
|||
"per_hour": "per hour"
|
||||
},
|
||||
"intro": "It is a large model application orchestration system that provides out-of-the-box data processing, model calling and other capabilities. It can quickly build a knowledge base and perform workflow orchestration through Flow visualization to realize complex knowledge base scenarios!",
|
||||
"llm_not_support_vision": "This model does not support image recognition",
|
||||
"llm_use_vision": "Enable vision",
|
||||
"llm_use_vision_tip": "When image recognition is enabled, the model automatically receives images from Dialog Uploads, as well as image links from User Questions.",
|
||||
"logs_empty": "No logs yet~",
|
||||
"logs_message_total": "Total Messages",
|
||||
"logs_title": "Title",
|
||||
|
|
@ -92,6 +103,7 @@
|
|||
"Simple bot": "Simple bot",
|
||||
"Workflow bot": "Workflow"
|
||||
},
|
||||
"upload_file_max_amount": "Maximum number of files to be uploaded in a single round",
|
||||
"version": {
|
||||
"Revert success": "Revert success"
|
||||
},
|
||||
|
|
@ -106,8 +118,15 @@
|
|||
},
|
||||
"workflow": {
|
||||
"Input guide": "Input guide",
|
||||
"file_url": "Url",
|
||||
"read_files": "Documents parse",
|
||||
"read_files_result": "Document parsing results",
|
||||
"read_files_result_desc": "The original text of the document consists of the file name and the document content. Multiple files are separated by horizontal lines.",
|
||||
"read_files_tip": "Parse the document link passed in the conversation and return the corresponding document content",
|
||||
"template": {
|
||||
"communication": "Communication"
|
||||
}
|
||||
},
|
||||
"user_file_input": "Files url",
|
||||
"user_file_input_desc": "Links to documents and images uploaded by users"
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,20 +15,23 @@
|
|||
"custom_input_guide_url": "Custom thesaurus address",
|
||||
"empty_directory": "There is nothing left to choose from in this directory~",
|
||||
"in_progress": "in progress",
|
||||
"input_guide": "Enter boot",
|
||||
"input_guide_lexicon": "vocabulary",
|
||||
"input_guide_tip": "Some preset questions can be configured. \nWhen the user enters a question, relevant questions will be obtained from these preset questions for prompts.",
|
||||
"insert_input_guide,_some_data_already_exists": "There is duplicate data, which has been automatically filtered. A total of {{len}} pieces of data have been inserted.",
|
||||
"is_chatting": "Chatting...please wait for the end",
|
||||
"items": "strip",
|
||||
"module_runtime_and": "module run time and",
|
||||
"multiple_AI_conversations": "Multiple AI conversations",
|
||||
"new_chat": "new conversation",
|
||||
"new_input_guide_lexicon": "New vocabulary",
|
||||
"plugins_output": "Plugin output",
|
||||
"question_tip": "From left to right, the response order of each module",
|
||||
"rearrangement": "Search results rearranged",
|
||||
"stream_output": "stream output",
|
||||
"view_citations": "View citations",
|
||||
"web_site_sync": "Web site synchronization"
|
||||
"web_site_sync": "Web site synchronization",
|
||||
"file_amount_over": "Exceed maximum number of files {{max}}",
|
||||
"input_guide": "Input guide",
|
||||
"input_guide_lexicon": "Lexicon",
|
||||
"input_guide_tip": "You can configure some preset questions. When the user enters a question, the relevant question is retrieved from these preset questions for prompt.",
|
||||
"insert_input_guide,_some_data_already_exists": "Duplicate data, automatically filtered, insert: {{len}} data",
|
||||
"new_input_guide_lexicon": "New lexicon",
|
||||
"select_file": "Select file",
|
||||
"select_img": "Select images"
|
||||
}
|
||||
|
|
|
|||
|
|
@ -508,7 +508,7 @@
|
|||
"module cq result": "Classification result",
|
||||
"module extract description": "Extract requirement description",
|
||||
"module extract result": "Extraction result",
|
||||
"module historyPreview": "Complete record",
|
||||
"module historyPreview": "Record preview (only part of the content is displayed)",
|
||||
"module http result": "response body",
|
||||
"module if else Result": "Determinator result",
|
||||
"module limit": "Single search limit",
|
||||
|
|
@ -670,7 +670,7 @@
|
|||
"Total files": "A total of {{total}} files",
|
||||
"Training mode": "Training mode",
|
||||
"Upload data": "Upload data",
|
||||
"Upload file progress": "File upload progress",
|
||||
"Upload file progress": "file_upload progress",
|
||||
"Upload status": "Status",
|
||||
"Web link": "Web link",
|
||||
"Web link desc": "Read static web page content as a dataset"
|
||||
|
|
@ -1330,6 +1330,7 @@
|
|||
"minute": "minute"
|
||||
},
|
||||
"unusable_variable": "no usable variable",
|
||||
"upload_file_error": "Upload file error",
|
||||
"user": {
|
||||
"Account": "Account",
|
||||
"Amount of earnings": "Earnings (¥)",
|
||||
|
|
|
|||
|
|
@ -1,4 +1,6 @@
|
|||
{
|
||||
"bucket_chat": "Chat file",
|
||||
"bucket_file": "Dataset file",
|
||||
"click_to_view_raw_source": "View source",
|
||||
"file_name": "File Name",
|
||||
"file_size": "File Size",
|
||||
|
|
|
|||
|
|
@ -27,7 +27,19 @@
|
|||
"Code log": "Log",
|
||||
"Custom inputs": "Custom inputs",
|
||||
"Custom outputs": "Custom outputs",
|
||||
"Error": "Error"
|
||||
"Error": "Error",
|
||||
"Read file result": "Document parsing result preview",
|
||||
"read files": "parsed document"
|
||||
},
|
||||
"template": {
|
||||
"ai_chat": "LLM chat",
|
||||
"ai_chat_intro": "Call the AI model for a conversation",
|
||||
"dataset_search": "Dataset search",
|
||||
"dataset_search_intro": "Call the \"Semantic Search\" and \"Full-text Search\" capabilities to find reference content that may be related to the problem from the \"Knowledge Base\"",
|
||||
"system_config": "System Configuration",
|
||||
"tool_call": "Tool call",
|
||||
"tool_call_intro": "One or more function blocks are automatically selected for calling through the AI model, and plug-ins can also be called.",
|
||||
"workflow_start": "Process starts"
|
||||
},
|
||||
"tool_input": "Tool",
|
||||
"update_link_error": "Update link exception",
|
||||
|
|
|
|||
|
|
@ -13,21 +13,30 @@
|
|||
"chat_debug": "调试预览",
|
||||
"chat_logs": "对话日志",
|
||||
"chat_logs_tips": "日志会记录该应用的在线、分享和 API(需填写 chatId)对话记录",
|
||||
"config_file_upload": "点击配置文件上传规则",
|
||||
"confirm_copy_app_tip": "系统将为您创建一个相同配置应用,但权限不会进行复制,请确认!",
|
||||
"confirm_del_app_tip": "确认删除该应用及其所有聊天记录?",
|
||||
"confirm_delete_folder_tip": "确认删除该文件夹?将会删除它下面所有应用及对应的聊天记录,请确认!",
|
||||
"copy_one_app": "创建副本",
|
||||
"create_copy_success": "创建副本成功",
|
||||
"current_settings": "当前配置",
|
||||
"document_upload": "文档上传",
|
||||
"edit_app": "编辑应用",
|
||||
"edit_info": "编辑信息",
|
||||
"export_config_successful": "已复制配置,自动过滤部分敏感信息,请注意检查是否仍有敏感数据",
|
||||
"export_configs": "导出配置",
|
||||
"feedback_count": "用户反馈",
|
||||
"file_upload": "文件上传",
|
||||
"file_upload_tip": "开启后,可以上传文档/图片。文档保留7天,图片保留15天。使用该功能可能产生较多额外费用。为保证使用体验,使用该功能时,请选择上下文长度较大的AI模型。",
|
||||
"go_to_chat": "去对话",
|
||||
"go_to_run": "去运行",
|
||||
"image_upload": "图片上传",
|
||||
"image_upload_tip": "请确保选择可处理图片的视觉模型",
|
||||
"import_configs": "导入配置",
|
||||
"import_configs_failed": "导入配置失败,请确保配置正常!",
|
||||
"llm_not_support_vision": "该模型不支持图片识别",
|
||||
"llm_use_vision": "启用图片识别",
|
||||
"llm_use_vision_tip": "启用图片识别后,该模型会自动接收来自“对话框上传”的图片,以及“用户问题”中的图片链接。",
|
||||
"logs_empty": "还没有日志噢~",
|
||||
"logs_message_total": "消息总数",
|
||||
"logs_title": "标题",
|
||||
|
|
@ -72,14 +81,22 @@
|
|||
"Simple bot": "简易应用",
|
||||
"Workflow bot": "工作流"
|
||||
},
|
||||
"upload_file_max_amount": "单轮最大文件上传数量",
|
||||
"version": {
|
||||
"Revert success": "回滚成功"
|
||||
},
|
||||
"workflow": {
|
||||
"Input guide": "填写说明",
|
||||
"file_url": "文档链接",
|
||||
"read_files": "文档解析",
|
||||
"read_files_result": "文档解析结果",
|
||||
"read_files_result_desc": "文档原文,由文件名和文档内容组成,多个文件之间通过横线隔开。",
|
||||
"read_files_tip": "解析对话中上传的文档,返回对应文档内容",
|
||||
"template": {
|
||||
"communication": "通信"
|
||||
}
|
||||
},
|
||||
"user_file_input": "文件链接",
|
||||
"user_file_input_desc": "用户上传的文档和图片链接"
|
||||
},
|
||||
"interval": {
|
||||
"per_hour": "每小时",
|
||||
|
|
@ -109,5 +126,7 @@
|
|||
},
|
||||
"day": "日",
|
||||
"execute_time": "执行时间",
|
||||
"time_zone": "时区"
|
||||
"time_zone": "时区",
|
||||
"document_quote": "文档引用",
|
||||
"document_quote_tip": "通常用于接受用户上传的文档内容(这需要文档解析),也可以用于引用其他字符串数据。"
|
||||
}
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@
|
|||
"csv_input_lexicon_tip": "仅支持 CSV 批量导入,点击下载模板",
|
||||
"custom_input_guide_url": "自定义词库地址",
|
||||
"delete_all_input_guide_confirm": "确定要清空输入引导词库吗?",
|
||||
"file_amount_over": "超出最大文件数量 {{max}}",
|
||||
"input_guide": "输入引导",
|
||||
"input_guide_lexicon": "词库",
|
||||
"input_guide_tip": "可以配置一些预设的问题。在用户输入问题时,会从这些预设问题中获取相关问题进行提示。",
|
||||
|
|
@ -30,5 +31,7 @@
|
|||
"question_tip": "从上到下,为各个模块的响应顺序",
|
||||
"rearrangement": "检索结果重排",
|
||||
"web_site_sync": "Web站点同步",
|
||||
"new_chat": "新对话"
|
||||
"new_chat": "新对话",
|
||||
"select_file": "选择文件",
|
||||
"select_img": "选择图片"
|
||||
}
|
||||
|
|
|
|||
|
|
@ -559,7 +559,7 @@
|
|||
"module cq result": "分类结果",
|
||||
"module extract description": "提取背景描述",
|
||||
"module extract result": "提取结果",
|
||||
"module historyPreview": "完整记录",
|
||||
"module historyPreview": "记录预览(仅展示部分内容)",
|
||||
"module http result": "响应体",
|
||||
"module if else Result": "判断器结果",
|
||||
"module limit": "单次搜索上限",
|
||||
|
|
@ -646,7 +646,8 @@
|
|||
"success": "开始同步"
|
||||
}
|
||||
},
|
||||
"training": {}
|
||||
"training": {
|
||||
}
|
||||
},
|
||||
"data": {
|
||||
"Auxiliary Data": "辅助数据",
|
||||
|
|
@ -857,7 +858,7 @@
|
|||
"AppSecret": "AppSecret",
|
||||
"ChatId": "当前对话 ID",
|
||||
"Current time": "当前时间",
|
||||
"Histories": "最近 10 条聊天记录",
|
||||
"Histories": "历史记录",
|
||||
"Key already exists": "Key 已经存在",
|
||||
"Key cannot be empty": "参数名不能为空",
|
||||
"Props name": "参数名",
|
||||
|
|
@ -1331,6 +1332,7 @@
|
|||
},
|
||||
"textarea_variable_picker_tip": "输入 / 可选择变量",
|
||||
"unusable_variable": "无可用变量",
|
||||
"upload_file_error": "上传文件失败",
|
||||
"user": {
|
||||
"Account": "账号",
|
||||
"Amount of earnings": "收益(¥)",
|
||||
|
|
|
|||
|
|
@ -1,9 +1,10 @@
|
|||
{
|
||||
"bucket_chat": "对话文件",
|
||||
"bucket_file": "知识库文件",
|
||||
"click_to_view_raw_source": "点击查看来源",
|
||||
"release_the_mouse_to_upload_the_file": "松开鼠标上传文件",
|
||||
"upload_error_description": "单次只支持上传多个文件或者一个文件夹",
|
||||
"file_name": "文件名",
|
||||
"file_size": "文件大小",
|
||||
"release_the_mouse_to_upload_the_file": "松开鼠标上传文件",
|
||||
"select_and_drag_file_tip": "点击或拖动文件到此处上传",
|
||||
"select_file_amount_limit": "最多选择 {{max}} 个文件",
|
||||
"some_file_count_exceeds_limit": "超出 {{maxCount}} 个文件,已自动截取",
|
||||
|
|
@ -12,5 +13,6 @@
|
|||
"support_max_count": "最多支持 {{maxCount}} 个文件",
|
||||
"support_max_size": "单个文件最大 {{maxSize}}",
|
||||
"upload_failed": "上传异常",
|
||||
"reached_max_file_count": "已达到最大文件数量"
|
||||
"reached_max_file_count": "已达到最大文件数量",
|
||||
"upload_error_description": "单次只支持上传多个文件或者一个文件夹"
|
||||
}
|
||||
|
|
|
|||
|
|
@ -25,7 +25,19 @@
|
|||
"Code log": "Log 日志",
|
||||
"Custom inputs": "自定义输入",
|
||||
"Custom outputs": "自定义输出",
|
||||
"Error": "错误信息"
|
||||
"Error": "错误信息",
|
||||
"Read file result": "文档解析结果预览",
|
||||
"read files": "解析的文档"
|
||||
},
|
||||
"template": {
|
||||
"ai_chat": "AI 对话",
|
||||
"ai_chat_intro": "AI 大模型对话",
|
||||
"dataset_search": "知识库搜索",
|
||||
"dataset_search_intro": "调用“语义检索”和“全文检索”能力,从“知识库”中查找可能与问题相关的参考内容",
|
||||
"system_config": "系统配置",
|
||||
"tool_call": "工具调用",
|
||||
"tool_call_intro": "通过AI模型自动选择一个或多个功能块进行调用,也可以对插件进行调用。",
|
||||
"workflow_start": "流程开始"
|
||||
},
|
||||
"tool_input": "工具参数",
|
||||
"variable_picker_tips": "可输入节点名或变量名搜索",
|
||||
|
|
|
|||
File diff suppressed because one or more lines are too long
|
After Width: | Height: | Size: 1.7 MiB |
|
|
@ -1,9 +1,9 @@
|
|||
import React, { useState } from 'react';
|
||||
import { Skeleton } from '@chakra-ui/react';
|
||||
import { ImageProps, Skeleton } from '@chakra-ui/react';
|
||||
import MyPhotoView from '@fastgpt/web/components/common/Image/PhotoView';
|
||||
import { useBoolean } from 'ahooks';
|
||||
|
||||
const MdImage = ({ src }: { src?: string }) => {
|
||||
const MdImage = ({ src, ...props }: { src?: string } & ImageProps) => {
|
||||
const [isLoaded, { setTrue }] = useBoolean(false);
|
||||
|
||||
const [renderSrc, setRenderSrc] = useState(src);
|
||||
|
|
@ -31,6 +31,7 @@ const MdImage = ({ src }: { src?: string }) => {
|
|||
setRenderSrc('/imgs/errImg.png');
|
||||
setTrue();
|
||||
}}
|
||||
{...props}
|
||||
/>
|
||||
</Skeleton>
|
||||
);
|
||||
|
|
|
|||
|
|
@ -6,8 +6,8 @@ import { useTranslation } from 'next-i18next';
|
|||
const VariableTip = (props: StackProps) => {
|
||||
const { t } = useTranslation();
|
||||
return (
|
||||
<HStack fontSize={'xs'} spacing={1} {...props}>
|
||||
<MyIcon name={'common/info'} w={'0.9rem'} transform={'translateY(1px)'} />
|
||||
<HStack fontSize={'11px'} spacing={1} {...props}>
|
||||
<MyIcon name={'common/info'} w={'0.8rem'} />
|
||||
<Box>{t('common:textarea_variable_picker_tip')}</Box>
|
||||
</HStack>
|
||||
);
|
||||
|
|
|
|||
|
|
@ -41,8 +41,11 @@ const AIChatSettingsModal = ({
|
|||
});
|
||||
const model = watch('model');
|
||||
const showResponseAnswerText = watch(NodeInputKeyEnum.aiChatIsResponseText) !== undefined;
|
||||
const showVisionSwitch = watch(NodeInputKeyEnum.aiChatVision) !== undefined;
|
||||
const showMaxHistoriesSlider = watch('maxHistories') !== undefined;
|
||||
const useVision = watch('aiChatVision');
|
||||
const selectedModel = llmModelList.find((item) => item.model === model) || llmModelList[0];
|
||||
const llmSupportVision = !!selectedModel?.vision;
|
||||
|
||||
const tokenLimit = useMemo(() => {
|
||||
return llmModelList.find((item) => item.model === model)?.maxResponse || 4096;
|
||||
|
|
@ -65,7 +68,7 @@ const AIChatSettingsModal = ({
|
|||
alignItems: 'center',
|
||||
fontSize: 'sm',
|
||||
color: 'myGray.900',
|
||||
width: ['80px', '90px']
|
||||
width: ['6rem', '8rem']
|
||||
};
|
||||
|
||||
return (
|
||||
|
|
@ -110,26 +113,24 @@ const AIChatSettingsModal = ({
|
|||
</Box>
|
||||
</Flex>
|
||||
{feConfigs && (
|
||||
<Flex mt={8}>
|
||||
<Flex mt={6}>
|
||||
<Box {...LabelStyles} mr={2}>
|
||||
{t('common:core.ai.Ai point price')}
|
||||
</Box>
|
||||
<Box flex={1} ml={'10px'}>
|
||||
{t('support.wallet.Ai point every thousand tokens', {
|
||||
<Box flex={1}>
|
||||
{t('common:support.wallet.Ai point every thousand tokens', {
|
||||
points: selectedModel?.charsPointsPrice || 0
|
||||
})}
|
||||
</Box>
|
||||
</Flex>
|
||||
)}
|
||||
<Flex mt={8}>
|
||||
<Flex mt={6}>
|
||||
<Box {...LabelStyles} mr={2}>
|
||||
{t('common:core.ai.Max context')}
|
||||
</Box>
|
||||
<Box flex={1} ml={'10px'}>
|
||||
{selectedModel?.maxContext || 4096}Tokens
|
||||
</Box>
|
||||
<Box flex={1}>{selectedModel?.maxContext || 4096}Tokens</Box>
|
||||
</Flex>
|
||||
<Flex mt={8}>
|
||||
<Flex mt={6}>
|
||||
<Box {...LabelStyles} mr={2}>
|
||||
{t('common:core.ai.Support tool')}
|
||||
<QuestionTip ml={1} label={t('common:core.module.template.AI support tool tip')} />
|
||||
|
|
@ -140,11 +141,11 @@ const AIChatSettingsModal = ({
|
|||
: t('common:common.not_support')}
|
||||
</Box>
|
||||
</Flex>
|
||||
<Flex mt={8}>
|
||||
<Flex mt={6}>
|
||||
<Box {...LabelStyles} mr={2}>
|
||||
{t('common:core.app.Temperature')}
|
||||
</Box>
|
||||
<Box flex={1} ml={'10px'}>
|
||||
<Box flex={1} ml={1}>
|
||||
<MySlider
|
||||
markList={[
|
||||
{ label: t('common:core.app.deterministic'), value: 0 },
|
||||
|
|
@ -161,11 +162,11 @@ const AIChatSettingsModal = ({
|
|||
/>
|
||||
</Box>
|
||||
</Flex>
|
||||
<Flex mt={8}>
|
||||
<Flex mt={6}>
|
||||
<Box {...LabelStyles} mr={2}>
|
||||
{t('common:core.app.Max tokens')}
|
||||
</Box>
|
||||
<Box flex={1} ml={'10px'}>
|
||||
<Box flex={1}>
|
||||
<MySlider
|
||||
markList={[
|
||||
{ label: '100', value: 100 },
|
||||
|
|
@ -184,11 +185,11 @@ const AIChatSettingsModal = ({
|
|||
</Box>
|
||||
</Flex>
|
||||
{showMaxHistoriesSlider && (
|
||||
<Flex mt={8}>
|
||||
<Flex mt={6}>
|
||||
<Box {...LabelStyles} mr={2}>
|
||||
{t('common:core.app.Max histories')}
|
||||
</Box>
|
||||
<Box flex={1} ml={'10px'}>
|
||||
<Box flex={1}>
|
||||
<MySlider
|
||||
markList={[
|
||||
{ label: 0, value: 0 },
|
||||
|
|
@ -207,7 +208,7 @@ const AIChatSettingsModal = ({
|
|||
</Flex>
|
||||
)}
|
||||
{showResponseAnswerText && (
|
||||
<Flex mt={8} alignItems={'center'}>
|
||||
<Flex mt={6} alignItems={'center'}>
|
||||
<Box {...LabelStyles}>
|
||||
{t('common:core.app.Ai response')}
|
||||
<QuestionTip
|
||||
|
|
@ -215,7 +216,7 @@ const AIChatSettingsModal = ({
|
|||
label={t('common:core.module.template.AI response switch tip')}
|
||||
></QuestionTip>
|
||||
</Box>
|
||||
<Box flex={1} ml={'10px'}>
|
||||
<Box flex={1}>
|
||||
<Switch
|
||||
isChecked={getValues(NodeInputKeyEnum.aiChatIsResponseText)}
|
||||
onChange={(e) => {
|
||||
|
|
@ -227,6 +228,29 @@ const AIChatSettingsModal = ({
|
|||
</Box>
|
||||
</Flex>
|
||||
)}
|
||||
{showVisionSwitch && (
|
||||
<Flex mt={6} alignItems={'center'}>
|
||||
<Box {...LabelStyles}>
|
||||
{t('app:llm_use_vision')}
|
||||
<QuestionTip ml={1} label={t('app:llm_use_vision_tip')}></QuestionTip>
|
||||
</Box>
|
||||
<Box flex={1}>
|
||||
{llmSupportVision ? (
|
||||
<Switch
|
||||
isChecked={useVision}
|
||||
onChange={(e) => {
|
||||
const value = e.target.checked;
|
||||
setValue(NodeInputKeyEnum.aiChatVision, value);
|
||||
}}
|
||||
/>
|
||||
) : (
|
||||
<Box fontSize={'sm'} color={'myGray.500'}>
|
||||
{t('app:llm_not_support_vision')}
|
||||
</Box>
|
||||
)}
|
||||
</Box>
|
||||
</Flex>
|
||||
)}
|
||||
</ModalBody>
|
||||
<ModalFooter>
|
||||
<Button variant={'whiteBase'} onClick={onClose}>
|
||||
|
|
|
|||
|
|
@ -1,13 +1,15 @@
|
|||
import React, { useEffect } from 'react';
|
||||
import React from 'react';
|
||||
import { useSystemStore } from '@/web/common/system/useSystemStore';
|
||||
import { LLMModelTypeEnum, llmModelTypeFilterMap } from '@fastgpt/global/core/ai/constants';
|
||||
import { Box, Button, Flex, css, useDisclosure } from '@chakra-ui/react';
|
||||
import { Box, Button, css, useDisclosure } from '@chakra-ui/react';
|
||||
import type { SettingAIDataType } from '@fastgpt/global/core/app/type.d';
|
||||
import AISettingModal from '@/components/core/ai/AISettingModal';
|
||||
import Avatar from '@fastgpt/web/components/common/Avatar';
|
||||
import { HUGGING_FACE_ICON } from '@fastgpt/global/common/system/constants';
|
||||
import MyTooltip from '@fastgpt/web/components/common/MyTooltip';
|
||||
import { useTranslation } from 'next-i18next';
|
||||
import MyIcon from '@fastgpt/web/components/common/Icon';
|
||||
import { useMount } from 'ahooks';
|
||||
|
||||
type Props = {
|
||||
llmModelType?: `${LLMModelTypeEnum}`;
|
||||
|
|
@ -37,14 +39,15 @@ const SettingLLMModel = ({ llmModelType = LLMModelTypeEnum.all, defaultData, onC
|
|||
onClose: onCloseAIChatSetting
|
||||
} = useDisclosure();
|
||||
|
||||
useEffect(() => {
|
||||
// Set default model
|
||||
useMount(() => {
|
||||
if (!model && modelList.length > 0) {
|
||||
onChange({
|
||||
...defaultData,
|
||||
model: modelList[0].model
|
||||
});
|
||||
}
|
||||
}, []);
|
||||
});
|
||||
|
||||
return (
|
||||
<Box
|
||||
|
|
@ -71,10 +74,13 @@ const SettingLLMModel = ({ llmModelType = LLMModelTypeEnum.all, defaultData, onC
|
|||
w={'18px'}
|
||||
/>
|
||||
}
|
||||
rightIcon={<MyIcon name={'common/select'} w={'1rem'} />}
|
||||
pl={4}
|
||||
onClick={onOpenAIChatSetting}
|
||||
>
|
||||
{selectedModel?.name}
|
||||
<Box flex={1} textAlign={'left'}>
|
||||
{selectedModel?.name}
|
||||
</Box>
|
||||
</Button>
|
||||
</MyTooltip>
|
||||
{isOpenAIChatSetting && (
|
||||
|
|
|
|||
|
|
@ -0,0 +1,147 @@
|
|||
import MyIcon from '@fastgpt/web/components/common/Icon';
|
||||
import MyTooltip from '@fastgpt/web/components/common/MyTooltip';
|
||||
import {
|
||||
Box,
|
||||
Button,
|
||||
Flex,
|
||||
ModalBody,
|
||||
useDisclosure,
|
||||
Image,
|
||||
HStack,
|
||||
Switch,
|
||||
ModalFooter
|
||||
} from '@chakra-ui/react';
|
||||
import React, { useMemo } from 'react';
|
||||
import { useTranslation } from 'next-i18next';
|
||||
import type { AppFileSelectConfigType } from '@fastgpt/global/core/app/type.d';
|
||||
import MyModal from '@fastgpt/web/components/common/MyModal';
|
||||
import MySlider from '@/components/Slider';
|
||||
import { defaultAppSelectFileConfig } from '@fastgpt/global/core/app/constants';
|
||||
import ChatFunctionTip from './Tip';
|
||||
import FormLabel from '@fastgpt/web/components/common/MyBox/FormLabel';
|
||||
import { useMount } from 'ahooks';
|
||||
|
||||
const FileSelect = ({
|
||||
forbidVision = false,
|
||||
value = defaultAppSelectFileConfig,
|
||||
onChange
|
||||
}: {
|
||||
forbidVision?: boolean;
|
||||
value?: AppFileSelectConfigType;
|
||||
onChange: (e: AppFileSelectConfigType) => void;
|
||||
}) => {
|
||||
const { t } = useTranslation();
|
||||
const { isOpen, onOpen, onClose } = useDisclosure();
|
||||
|
||||
const formLabel = useMemo(
|
||||
() =>
|
||||
value.canSelectFile || value.canSelectImg
|
||||
? t('common:core.app.whisper.Open')
|
||||
: t('common:core.app.whisper.Close'),
|
||||
[t, value.canSelectFile, value.canSelectImg]
|
||||
);
|
||||
|
||||
// Close select img switch when vision is forbidden
|
||||
useMount(() => {
|
||||
if (forbidVision) {
|
||||
onChange({
|
||||
...value,
|
||||
canSelectImg: false
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
return (
|
||||
<Flex alignItems={'center'}>
|
||||
<MyIcon name={'core/app/simpleMode/file'} mr={2} w={'20px'} />
|
||||
<FormLabel>{t('app:file_upload')}</FormLabel>
|
||||
<ChatFunctionTip type={'file'} />
|
||||
<Box flex={1} />
|
||||
<MyTooltip label={t('app:config_file_upload')}>
|
||||
<Button
|
||||
variant={'transparentBase'}
|
||||
iconSpacing={1}
|
||||
size={'sm'}
|
||||
mr={'-5px'}
|
||||
onClick={onOpen}
|
||||
>
|
||||
{formLabel}
|
||||
</Button>
|
||||
</MyTooltip>
|
||||
<MyModal
|
||||
iconSrc="core/app/simpleMode/file"
|
||||
title={t('app:file_upload')}
|
||||
isOpen={isOpen}
|
||||
onClose={onClose}
|
||||
>
|
||||
<ModalBody>
|
||||
<HStack>
|
||||
<FormLabel flex={'1 0 0'}>{t('app:document_upload')}</FormLabel>
|
||||
<Switch
|
||||
isChecked={value.canSelectFile}
|
||||
onChange={(e) => {
|
||||
onChange({
|
||||
...value,
|
||||
canSelectFile: e.target.checked
|
||||
});
|
||||
}}
|
||||
/>
|
||||
</HStack>
|
||||
<HStack mt={6}>
|
||||
<FormLabel flex={'1 0 0'}>{t('app:image_upload')}</FormLabel>
|
||||
{forbidVision ? (
|
||||
<Box fontSize={'sm'} color={'myGray.500'}>
|
||||
{t('app:llm_not_support_vision')}
|
||||
</Box>
|
||||
) : (
|
||||
<Switch
|
||||
isChecked={value.canSelectImg}
|
||||
onChange={(e) => {
|
||||
onChange({
|
||||
...value,
|
||||
canSelectImg: e.target.checked
|
||||
});
|
||||
}}
|
||||
/>
|
||||
)}
|
||||
</HStack>
|
||||
{!forbidVision && (
|
||||
<Box mt={2} color={'myGray.500'} fontSize={'xs'}>
|
||||
{t('app:image_upload_tip')}
|
||||
</Box>
|
||||
)}
|
||||
|
||||
<Box mt={6}>
|
||||
<FormLabel>{t('app:upload_file_max_amount')}</FormLabel>
|
||||
<Box mt={5}>
|
||||
<MySlider
|
||||
markList={[
|
||||
{ label: '1', value: 1 },
|
||||
{ label: '20', value: 20 }
|
||||
]}
|
||||
width={'100%'}
|
||||
min={1}
|
||||
max={20}
|
||||
step={1}
|
||||
value={value.maxFiles ?? 5}
|
||||
onChange={(e) => {
|
||||
onChange({
|
||||
...value,
|
||||
maxFiles: e
|
||||
});
|
||||
}}
|
||||
/>
|
||||
</Box>
|
||||
</Box>
|
||||
</ModalBody>
|
||||
<ModalFooter>
|
||||
<Button onClick={onClose} px={8}>
|
||||
{t('common:common.Confirm')}
|
||||
</Button>
|
||||
</ModalFooter>
|
||||
</MyModal>
|
||||
</Flex>
|
||||
);
|
||||
};
|
||||
|
||||
export default FileSelect;
|
||||
|
|
@ -9,7 +9,8 @@ enum FnTypeEnum {
|
|||
nextQuestion = 'nextQuestion',
|
||||
tts = 'tts',
|
||||
variable = 'variable',
|
||||
welcome = 'welcome'
|
||||
welcome = 'welcome',
|
||||
file = 'file'
|
||||
}
|
||||
|
||||
const ChatFunctionTip = ({ type }: { type: `${FnTypeEnum}` }) => {
|
||||
|
|
@ -46,6 +47,12 @@ const ChatFunctionTip = ({ type }: { type: `${FnTypeEnum}` }) => {
|
|||
title: t('common:core.app.Welcome Text'),
|
||||
desc: t('common:core.app.tip.welcomeTextTip'),
|
||||
imgUrl: '/imgs/app/welcome.svg'
|
||||
},
|
||||
[FnTypeEnum.file]: {
|
||||
icon: '/imgs/app/welcome-icon.svg',
|
||||
title: t('app:file_upload'),
|
||||
desc: t('app:file_upload_tip'),
|
||||
imgUrl: '/imgs/app/fileUploadPlaceholder.svg'
|
||||
}
|
||||
});
|
||||
const data = map.current[type];
|
||||
|
|
|
|||
|
|
@ -1,16 +1,14 @@
|
|||
import { useSpeech } from '@/web/common/hooks/useSpeech';
|
||||
import { useSystemStore } from '@/web/common/system/useSystemStore';
|
||||
import { Box, Flex, Image, Spinner, Textarea } from '@chakra-ui/react';
|
||||
import React, { useRef, useEffect, useCallback } from 'react';
|
||||
import { Box, Flex, HStack, Image, Spinner, Textarea } from '@chakra-ui/react';
|
||||
import React, { useRef, useEffect, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'next-i18next';
|
||||
import MyTooltip from '@fastgpt/web/components/common/MyTooltip';
|
||||
import MyIcon from '@fastgpt/web/components/common/Icon';
|
||||
import { useSelectFile } from '@/web/common/file/hooks/useSelectFile';
|
||||
import { compressImgFileAndUpload } from '@/web/common/file/controller';
|
||||
import { uploadFile2DB } from '@/web/common/file/controller';
|
||||
import { ChatFileTypeEnum } from '@fastgpt/global/core/chat/constants';
|
||||
import { addDays } from 'date-fns';
|
||||
import { useRequest } from '@fastgpt/web/hooks/useRequest';
|
||||
import { MongoImageTypeEnum } from '@fastgpt/global/common/file/image/constants';
|
||||
import { useRequest2 } from '@fastgpt/web/hooks/useRequest';
|
||||
import { ChatBoxInputFormType, ChatBoxInputType, UserInputFileItemType } from '../type';
|
||||
import { textareaMinH } from '../constants';
|
||||
import { UseFormReturn, useFieldArray } from 'react-hook-form';
|
||||
|
|
@ -19,103 +17,167 @@ import dynamic from 'next/dynamic';
|
|||
import { useContextSelector } from 'use-context-selector';
|
||||
import { getNanoid } from '@fastgpt/global/common/string/tools';
|
||||
import { useSystem } from '@fastgpt/web/hooks/useSystem';
|
||||
import { documentFileType } from '@fastgpt/global/common/file/constants';
|
||||
import { getFileIcon } from '@fastgpt/global/common/file/icon';
|
||||
import { useToast } from '@fastgpt/web/hooks/useToast';
|
||||
import { clone } from 'lodash';
|
||||
import { formatFileSize } from '@fastgpt/global/common/file/tools';
|
||||
|
||||
const InputGuideBox = dynamic(() => import('./InputGuideBox'));
|
||||
|
||||
const fileTypeFilter = (file: File) => {
|
||||
return (
|
||||
file.type.includes('image') ||
|
||||
documentFileType.split(',').some((type) => file.name.endsWith(type.trim()))
|
||||
);
|
||||
};
|
||||
|
||||
const ChatInput = ({
|
||||
onSendMessage,
|
||||
onStop,
|
||||
TextareaDom,
|
||||
showFileSelector = false,
|
||||
resetInputVal,
|
||||
chatForm,
|
||||
appId
|
||||
}: {
|
||||
onSendMessage: (val: ChatBoxInputType & { autoTTSResponse?: boolean }) => void;
|
||||
onStop: () => void;
|
||||
showFileSelector?: boolean;
|
||||
TextareaDom: React.MutableRefObject<HTMLTextAreaElement | null>;
|
||||
resetInputVal: (val: ChatBoxInputType) => void;
|
||||
chatForm: UseFormReturn<ChatBoxInputFormType>;
|
||||
appId: string;
|
||||
}) => {
|
||||
const { isPc } = useSystem();
|
||||
const { toast } = useToast();
|
||||
const { t } = useTranslation();
|
||||
const { feConfigs } = useSystemStore();
|
||||
|
||||
const { setValue, watch, control } = chatForm;
|
||||
const inputValue = watch('input');
|
||||
const {
|
||||
update: updateFile,
|
||||
remove: removeFile,
|
||||
update: updateFiles,
|
||||
remove: removeFiles,
|
||||
fields: fileList,
|
||||
append: appendFile,
|
||||
replace: replaceFile
|
||||
replace: replaceFiles
|
||||
} = useFieldArray({
|
||||
control,
|
||||
name: 'files'
|
||||
});
|
||||
|
||||
const { isChatting, whisperConfig, autoTTSResponse, chatInputGuide, outLinkAuthData } =
|
||||
useContextSelector(ChatBoxContext, (v) => v);
|
||||
const { whisperModel } = useSystemStore();
|
||||
const { isPc } = useSystem();
|
||||
|
||||
const canvasRef = useRef<HTMLCanvasElement>(null);
|
||||
const { t } = useTranslation();
|
||||
const {
|
||||
chatId,
|
||||
isChatting,
|
||||
whisperConfig,
|
||||
autoTTSResponse,
|
||||
chatInputGuide,
|
||||
outLinkAuthData,
|
||||
fileSelectConfig
|
||||
} = useContextSelector(ChatBoxContext, (v) => v);
|
||||
|
||||
const havInput = !!inputValue || fileList.length > 0;
|
||||
const hasFileUploading = fileList.some((item) => !item.url);
|
||||
const canSendMessage = havInput && !hasFileUploading;
|
||||
|
||||
const showSelectFile = fileSelectConfig.canSelectFile;
|
||||
const showSelectImg = fileSelectConfig.canSelectImg;
|
||||
const maxSelectFiles = fileSelectConfig.maxFiles ?? 10;
|
||||
const maxSize = (feConfigs?.uploadFileMaxSize || 1024) * 1024 * 1024; // nkb
|
||||
const { icon: selectFileIcon, tooltip: selectFileTip } = useMemo(() => {
|
||||
if (showSelectFile) {
|
||||
return {
|
||||
icon: 'core/chat/fileSelect',
|
||||
tooltip: t('chat:select_file')
|
||||
};
|
||||
} else if (showSelectImg) {
|
||||
return {
|
||||
icon: 'core/chat/fileSelect',
|
||||
tooltip: t('chat:select_img')
|
||||
};
|
||||
}
|
||||
return {};
|
||||
}, [showSelectFile, showSelectImg, t]);
|
||||
|
||||
/* file selector and upload */
|
||||
const { File, onOpen: onOpenSelectFile } = useSelectFile({
|
||||
fileType: 'image/*',
|
||||
fileType: `${showSelectImg ? 'image/*,' : ''} ${showSelectFile ? documentFileType : ''}`,
|
||||
multiple: true,
|
||||
maxCount: 10
|
||||
maxCount: maxSelectFiles
|
||||
});
|
||||
const { mutate: uploadFile } = useRequest({
|
||||
mutationFn: async ({ file, fileIndex }: { file: UserInputFileItemType; fileIndex: number }) => {
|
||||
if (file.type === ChatFileTypeEnum.image && file.rawFile) {
|
||||
useRequest2(
|
||||
async () => {
|
||||
const filterFiles = fileList.filter((item) => item.status === 0);
|
||||
|
||||
if (filterFiles.length === 0) return;
|
||||
|
||||
replaceFiles(fileList.map((item) => ({ ...item, status: 1 })));
|
||||
|
||||
for (const file of filterFiles) {
|
||||
if (!file.rawFile) continue;
|
||||
|
||||
try {
|
||||
const url = await compressImgFileAndUpload({
|
||||
type: MongoImageTypeEnum.chatImage,
|
||||
const { fileId, previewUrl } = await uploadFile2DB({
|
||||
file: file.rawFile,
|
||||
maxW: 4320,
|
||||
maxH: 4320,
|
||||
maxSize: 1024 * 1024 * 16,
|
||||
// 7 day expired.
|
||||
expiredTime: addDays(new Date(), 7),
|
||||
...outLinkAuthData
|
||||
bucketName: 'chat',
|
||||
metadata: {
|
||||
chatId
|
||||
}
|
||||
});
|
||||
updateFile(fileIndex, {
|
||||
|
||||
updateFiles(fileList.findIndex((item) => item.id === file.id)!, {
|
||||
...file,
|
||||
url
|
||||
status: 1,
|
||||
url: `${location.origin}${previewUrl}`
|
||||
});
|
||||
} catch (error) {
|
||||
removeFile(fileIndex);
|
||||
removeFiles(fileList.findIndex((item) => item.id === file.id)!);
|
||||
console.log(error);
|
||||
return Promise.reject(error);
|
||||
}
|
||||
}
|
||||
},
|
||||
errorToast: t('common:common.Upload File Failed')
|
||||
});
|
||||
{
|
||||
manual: false,
|
||||
errorToast: t('common:upload_file_error'),
|
||||
refreshDeps: [fileList]
|
||||
}
|
||||
);
|
||||
const onSelectFile = useCallback(
|
||||
async (files: File[]) => {
|
||||
if (!files || files.length === 0) {
|
||||
return;
|
||||
}
|
||||
// filter max files
|
||||
if (fileList.length + files.length > maxSelectFiles) {
|
||||
files = files.slice(0, maxSelectFiles - fileList.length);
|
||||
toast({
|
||||
status: 'warning',
|
||||
title: t('chat:file_amount_over', { max: maxSelectFiles })
|
||||
});
|
||||
}
|
||||
|
||||
const filterFilesByMaxSize = files.filter((file) => file.size <= maxSize);
|
||||
if (filterFilesByMaxSize.length < files.length) {
|
||||
toast({
|
||||
status: 'warning',
|
||||
title: t('file:some_file_size_exceeds_limit', { maxSize: formatFileSize(maxSize) })
|
||||
});
|
||||
}
|
||||
|
||||
const loadFiles = await Promise.all(
|
||||
files.map(
|
||||
filterFilesByMaxSize.map(
|
||||
(file) =>
|
||||
new Promise<UserInputFileItemType>((resolve, reject) => {
|
||||
if (file.type.includes('image')) {
|
||||
const reader = new FileReader();
|
||||
reader.readAsDataURL(file);
|
||||
reader.onload = () => {
|
||||
const item = {
|
||||
const item: UserInputFileItemType = {
|
||||
id: getNanoid(6),
|
||||
rawFile: file,
|
||||
type: ChatFileTypeEnum.image,
|
||||
name: file.name,
|
||||
icon: reader.result as string
|
||||
icon: reader.result as string,
|
||||
status: 0
|
||||
};
|
||||
resolve(item);
|
||||
};
|
||||
|
|
@ -128,22 +190,28 @@ const ChatInput = ({
|
|||
rawFile: file,
|
||||
type: ChatFileTypeEnum.file,
|
||||
name: file.name,
|
||||
icon: 'file/pdf'
|
||||
icon: getFileIcon(file.name),
|
||||
status: 0
|
||||
});
|
||||
}
|
||||
})
|
||||
)
|
||||
);
|
||||
appendFile(loadFiles);
|
||||
|
||||
loadFiles.forEach((file, i) =>
|
||||
uploadFile({
|
||||
file,
|
||||
fileIndex: i + fileList.length
|
||||
// Document, image
|
||||
const concatFileList = clone(
|
||||
fileList.concat(loadFiles).sort((a, b) => {
|
||||
if (a.type === ChatFileTypeEnum.image && b.type === ChatFileTypeEnum.file) {
|
||||
return 1;
|
||||
} else if (a.type === ChatFileTypeEnum.file && b.type === ChatFileTypeEnum.image) {
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
})
|
||||
);
|
||||
replaceFiles(concatFileList);
|
||||
},
|
||||
[appendFile, fileList.length, uploadFile]
|
||||
[fileList, maxSelectFiles, replaceFiles, toast, t]
|
||||
);
|
||||
|
||||
/* on send */
|
||||
|
|
@ -155,10 +223,12 @@ const ChatInput = ({
|
|||
text: textareaValue.trim(),
|
||||
files: fileList
|
||||
});
|
||||
replaceFile([]);
|
||||
replaceFiles([]);
|
||||
};
|
||||
|
||||
/* whisper init */
|
||||
const { whisperModel } = useSystemStore();
|
||||
const canvasRef = useRef<HTMLCanvasElement>(null);
|
||||
const {
|
||||
isSpeaking,
|
||||
isTransCription,
|
||||
|
|
@ -194,12 +264,12 @@ const ChatInput = ({
|
|||
files: fileList,
|
||||
autoTTSResponse
|
||||
});
|
||||
replaceFile([]);
|
||||
replaceFiles([]);
|
||||
} else {
|
||||
resetInputVal({ text });
|
||||
}
|
||||
},
|
||||
[autoTTSResponse, fileList, onSendMessage, replaceFile, resetInputVal, whisperConfig?.autoSend]
|
||||
[autoTTSResponse, fileList, onSendMessage, replaceFiles, resetInputVal, whisperConfig?.autoSend]
|
||||
);
|
||||
const onWhisperRecord = useCallback(() => {
|
||||
if (isSpeaking) {
|
||||
|
|
@ -261,13 +331,20 @@ const ChatInput = ({
|
|||
</Flex>
|
||||
|
||||
{/* file preview */}
|
||||
<Flex wrap={'wrap'} px={[2, 4]} userSelect={'none'}>
|
||||
<Flex
|
||||
wrap={'wrap'}
|
||||
px={[2, 4]}
|
||||
userSelect={'none'}
|
||||
gap={2}
|
||||
mb={fileList.length > 0 ? 2 : 0}
|
||||
>
|
||||
{fileList.map((item, index) => (
|
||||
<Box
|
||||
key={item.id}
|
||||
border={'1px solid rgba(0,0,0,0.12)'}
|
||||
mr={2}
|
||||
mb={2}
|
||||
border={'1px solid #E8EBF0'}
|
||||
boxShadow={
|
||||
'0px 2.571px 6.429px 0px rgba(19, 51, 107, 0.08), 0px 0px 0.643px 0px rgba(19, 51, 107, 0.08)'
|
||||
}
|
||||
rounded={'md'}
|
||||
position={'relative'}
|
||||
_hover={{
|
||||
|
|
@ -297,13 +374,13 @@ const ChatInput = ({
|
|||
h={'16px'}
|
||||
color={'myGray.700'}
|
||||
cursor={'pointer'}
|
||||
_hover={{ color: 'primary.500' }}
|
||||
_hover={{ color: 'red.500' }}
|
||||
position={'absolute'}
|
||||
bg={'white'}
|
||||
right={'-8px'}
|
||||
top={'-8px'}
|
||||
onClick={() => {
|
||||
removeFile(index);
|
||||
removeFiles(index);
|
||||
}}
|
||||
className="close-icon"
|
||||
display={['', 'none']}
|
||||
|
|
@ -312,19 +389,27 @@ const ChatInput = ({
|
|||
<Image
|
||||
alt={'img'}
|
||||
src={item.icon}
|
||||
w={['50px', '70px']}
|
||||
h={['50px', '70px']}
|
||||
w={['2rem', '3rem']}
|
||||
h={['2rem', '3rem']}
|
||||
borderRadius={'md'}
|
||||
objectFit={'contain'}
|
||||
/>
|
||||
)}
|
||||
{item.type === ChatFileTypeEnum.file && (
|
||||
<HStack minW={['100px', '150px']} maxW={'250px'} p={2}>
|
||||
<MyIcon name={item.icon as any} w={['1.5rem', '2rem']} h={['1.5rem', '2rem']} />
|
||||
<Box flex={'1 0 0'} className="textEllipsis" fontSize={'xs'}>
|
||||
{item.name}
|
||||
</Box>
|
||||
</HStack>
|
||||
)}
|
||||
</Box>
|
||||
))}
|
||||
</Flex>
|
||||
|
||||
<Flex alignItems={'flex-end'} mt={fileList.length > 0 ? 1 : 0} pl={[2, 4]}>
|
||||
{/* file selector */}
|
||||
{showFileSelector && (
|
||||
{(showSelectFile || showSelectImg) && (
|
||||
<Flex
|
||||
h={'22px'}
|
||||
alignItems={'center'}
|
||||
|
|
@ -336,8 +421,8 @@ const ChatInput = ({
|
|||
onOpenSelectFile();
|
||||
}}
|
||||
>
|
||||
<MyTooltip label={t('common:core.chat.Select Image')}>
|
||||
<MyIcon name={'core/chat/fileSelect'} w={'18px'} color={'myGray.600'} />
|
||||
<MyTooltip label={selectFileTip}>
|
||||
<MyIcon name={selectFileIcon as any} w={'18px'} color={'myGray.600'} />
|
||||
</MyTooltip>
|
||||
<File onSelect={onSelectFile} />
|
||||
</Flex>
|
||||
|
|
@ -404,12 +489,19 @@ const ChatInput = ({
|
|||
}}
|
||||
onPaste={(e) => {
|
||||
const clipboardData = e.clipboardData;
|
||||
if (clipboardData && showFileSelector) {
|
||||
if (clipboardData && (showSelectFile || showSelectImg)) {
|
||||
const items = clipboardData.items;
|
||||
const files = Array.from(items)
|
||||
.map((item) => (item.kind === 'file' ? item.getAsFile() : undefined))
|
||||
.filter(Boolean) as File[];
|
||||
.filter((file) => {
|
||||
console.log(file);
|
||||
return file && fileTypeFilter(file);
|
||||
}) as File[];
|
||||
onSelectFile(files);
|
||||
|
||||
if (files.length > 0) {
|
||||
e.stopPropagation();
|
||||
}
|
||||
}
|
||||
}}
|
||||
/>
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ import { useAudioPlay } from '@/web/common/utils/voice';
|
|||
import { OutLinkChatAuthProps } from '@fastgpt/global/support/permission/chat';
|
||||
import {
|
||||
AppChatConfigType,
|
||||
AppFileSelectConfigType,
|
||||
AppTTSConfigType,
|
||||
AppWhisperConfigType,
|
||||
ChatInputGuideConfigType,
|
||||
|
|
@ -10,6 +11,7 @@ import {
|
|||
} from '@fastgpt/global/core/app/type';
|
||||
import { ChatHistoryItemResType, ChatSiteItemType } from '@fastgpt/global/core/chat/type';
|
||||
import {
|
||||
defaultAppSelectFileConfig,
|
||||
defaultChatInputGuideConfig,
|
||||
defaultTTSConfig,
|
||||
defaultWhisperConfig
|
||||
|
|
@ -64,6 +66,7 @@ type useChatStoreType = OutLinkChatAuthProps &
|
|||
chatInputGuide: ChatInputGuideConfigType;
|
||||
outLinkAuthData: OutLinkChatAuthProps;
|
||||
getHistoryResponseData: ({ dataId }: { dataId: string }) => Promise<ChatHistoryItemResType[]>;
|
||||
fileSelectConfig: AppFileSelectConfigType;
|
||||
};
|
||||
|
||||
export const ChatBoxContext = createContext<useChatStoreType>({
|
||||
|
|
@ -146,7 +149,8 @@ const Provider = ({
|
|||
questionGuide = false,
|
||||
ttsConfig = defaultTTSConfig,
|
||||
whisperConfig = defaultWhisperConfig,
|
||||
chatInputGuide = defaultChatInputGuideConfig
|
||||
chatInputGuide = defaultChatInputGuideConfig,
|
||||
fileSelectConfig = defaultAppSelectFileConfig
|
||||
} = useMemo(() => chatConfig, [chatConfig]);
|
||||
|
||||
const outLinkAuthData = useMemo(
|
||||
|
|
@ -215,6 +219,7 @@ const Provider = ({
|
|||
allVariableList: variables,
|
||||
questionGuide,
|
||||
ttsConfig,
|
||||
fileSelectConfig,
|
||||
whisperConfig,
|
||||
autoTTSResponse,
|
||||
startSegmentedAudio,
|
||||
|
|
|
|||
|
|
@ -73,12 +73,11 @@ const ChatItem = ({
|
|||
const ContentCard = useMemo(() => {
|
||||
if (type === 'Human') {
|
||||
const { text, files = [] } = formatChatValue2InputType(chat.value);
|
||||
|
||||
return (
|
||||
<>
|
||||
<Flex flexDirection={'column'} gap={4}>
|
||||
{files.length > 0 && <FilesBlock files={files} />}
|
||||
<Markdown source={text} />
|
||||
</>
|
||||
{text && <Markdown source={text} />}
|
||||
</Flex>
|
||||
);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,22 +1,89 @@
|
|||
import { Box, Flex, Grid } from '@chakra-ui/react';
|
||||
import { Box, Flex, Grid, Text } from '@chakra-ui/react';
|
||||
import MdImage from '@/components/Markdown/img/Image';
|
||||
import { UserInputFileItemType } from '@/components/core/chat/ChatContainer/ChatBox/type';
|
||||
import MyIcon from '@fastgpt/web/components/common/Icon';
|
||||
import React, { useCallback, useLayoutEffect, useMemo, useRef, useState } from 'react';
|
||||
import { clone } from 'lodash';
|
||||
import { ChatFileTypeEnum } from '@fastgpt/global/core/chat/constants';
|
||||
import { useSystem } from '@fastgpt/web/hooks/useSystem';
|
||||
import { useWidthVariable } from '@fastgpt/web/hooks/useWidthVariable';
|
||||
|
||||
const FilesBlock = ({ files }: { files: UserInputFileItemType[] }) => {
|
||||
const chartRef = useRef<HTMLDivElement>(null);
|
||||
const [width, setWidth] = useState(400);
|
||||
const { isPc } = useSystem();
|
||||
const gridColumns = useWidthVariable({
|
||||
width,
|
||||
widthList: [300, 500, 700],
|
||||
list: ['1fr', 'repeat(2, 1fr)', 'repeat(3, 1fr)']
|
||||
});
|
||||
|
||||
// sort files, file->image
|
||||
const sortFiles = useMemo(() => {
|
||||
return clone(files).sort((a, b) => {
|
||||
if (a.type === ChatFileTypeEnum.image && b.type === ChatFileTypeEnum.file) {
|
||||
return 1;
|
||||
} else if (a.type === ChatFileTypeEnum.file && b.type === ChatFileTypeEnum.image) {
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
});
|
||||
}, [files]);
|
||||
|
||||
const computedChatItemWidth = useCallback(() => {
|
||||
if (!chartRef.current) return;
|
||||
|
||||
// 一直找到 parent = markdown 的元素
|
||||
let parent = chartRef.current?.parentElement;
|
||||
while (parent && !parent.className.includes('chat-box-card')) {
|
||||
parent = parent.parentElement;
|
||||
}
|
||||
|
||||
const clientWidth = parent?.clientWidth ?? 400;
|
||||
setWidth(clientWidth);
|
||||
return parent;
|
||||
}, [isPc]);
|
||||
|
||||
useLayoutEffect(() => {
|
||||
computedChatItemWidth();
|
||||
}, [computedChatItemWidth]);
|
||||
|
||||
return (
|
||||
<Grid gridTemplateColumns={['1fr', '1fr 1fr']} gap={4}>
|
||||
{files.map(({ id, type, name, url }, i) => {
|
||||
if (type === 'image') {
|
||||
return (
|
||||
<Box key={i} rounded={'md'} flex={'1 0 0'} minW={'120px'}>
|
||||
<MdImage src={url} />
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
return null;
|
||||
})}
|
||||
<Grid ref={chartRef} gridTemplateColumns={gridColumns} gap={4} alignItems={'flex-start'}>
|
||||
{sortFiles.map(({ id, type, name, url, icon }, i) => (
|
||||
<Box key={i} bg={'white'} borderRadius={'md'} overflow="hidden">
|
||||
{type === 'image' && <MdImage src={url} minH={'100px'} my={0} />}
|
||||
{type === 'file' && (
|
||||
<Flex
|
||||
p={2}
|
||||
w={'100%'}
|
||||
alignItems="center"
|
||||
cursor={'pointer'}
|
||||
onClick={() => {
|
||||
window.open(url);
|
||||
}}
|
||||
>
|
||||
<MyIcon
|
||||
name={icon as any}
|
||||
flexShrink={0}
|
||||
w={['1.5rem', '2rem']}
|
||||
h={['1.5rem', '2rem']}
|
||||
/>
|
||||
<Text
|
||||
ml={2}
|
||||
fontSize={'xs'}
|
||||
overflow="hidden"
|
||||
textOverflow="ellipsis"
|
||||
whiteSpace="nowrap"
|
||||
>
|
||||
{name || url}
|
||||
</Text>
|
||||
</Flex>
|
||||
)}
|
||||
</Box>
|
||||
))}
|
||||
</Grid>
|
||||
);
|
||||
};
|
||||
|
||||
export default FilesBlock;
|
||||
export default React.memo(FilesBlock);
|
||||
|
|
|
|||
|
|
@ -75,7 +75,6 @@ type Props = OutLinkChatAuthProps &
|
|||
showVoiceIcon?: boolean;
|
||||
showEmptyIntro?: boolean;
|
||||
userAvatar?: string;
|
||||
showFileSelector?: boolean;
|
||||
active?: boolean; // can use
|
||||
appId: string;
|
||||
|
||||
|
|
@ -105,7 +104,6 @@ const ChatBox = (
|
|||
showEmptyIntro = false,
|
||||
appAvatar,
|
||||
userAvatar,
|
||||
showFileSelector,
|
||||
active = true,
|
||||
appId,
|
||||
chatId,
|
||||
|
|
@ -378,7 +376,9 @@ const ChatBox = (
|
|||
return;
|
||||
}
|
||||
|
||||
// Abort the previous request
|
||||
abortRequest();
|
||||
questionGuideController.current?.abort('stop');
|
||||
|
||||
text = text.trim();
|
||||
|
||||
|
|
@ -390,14 +390,13 @@ const ChatBox = (
|
|||
return;
|
||||
}
|
||||
|
||||
// delete invalid variables, 只保留在 variableList 中的变量
|
||||
// Only declared variables are kept
|
||||
const requestVariables: Record<string, any> = {};
|
||||
allVariableList?.forEach((item) => {
|
||||
requestVariables[item.key] = variables[item.key] || '';
|
||||
});
|
||||
|
||||
const responseChatId = getNanoid(24);
|
||||
questionGuideController.current?.abort('stop');
|
||||
|
||||
// set auto audio playing
|
||||
if (autoTTSResponse) {
|
||||
|
|
@ -980,7 +979,6 @@ const ChatBox = (
|
|||
onStop={() => chatController.current?.abort('stop')}
|
||||
TextareaDom={TextareaDom}
|
||||
resetInputVal={resetInputVal}
|
||||
showFileSelector={showFileSelector}
|
||||
chatForm={chatForm}
|
||||
appId={appId}
|
||||
/>
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@ export type UserInputFileItemType = {
|
|||
type: `${ChatFileTypeEnum}`;
|
||||
name: string;
|
||||
icon: string; // img is base64
|
||||
status: 0 | 1; // 0: uploading, 1: success
|
||||
url?: string;
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
import { ChatItemValueItemType } from '@fastgpt/global/core/chat/type';
|
||||
import { ChatBoxInputType, UserInputFileItemType } from './type';
|
||||
import { getNanoid } from '@fastgpt/global/common/string/tools';
|
||||
import { getFileIcon } from '@fastgpt/global/common/file/icon';
|
||||
|
||||
export const formatChatValue2InputType = (value?: ChatItemValueItemType[]): ChatBoxInputType => {
|
||||
if (!value) {
|
||||
|
|
@ -15,15 +16,16 @@ export const formatChatValue2InputType = (value?: ChatItemValueItemType[]): Chat
|
|||
.filter((item) => item.text?.content)
|
||||
.map((item) => item.text?.content || '')
|
||||
.join('');
|
||||
|
||||
const files =
|
||||
(value
|
||||
.map((item) =>
|
||||
?.map((item) =>
|
||||
item.type === 'file' && item.file
|
||||
? {
|
||||
id: getNanoid(),
|
||||
id: item.file.url,
|
||||
type: item.file.type,
|
||||
name: item.file.name,
|
||||
icon: '',
|
||||
icon: getFileIcon(item.file.name),
|
||||
url: item.file.url
|
||||
}
|
||||
: undefined
|
||||
|
|
|
|||
|
|
@ -105,19 +105,19 @@ ${JSON.stringify(questionGuides)}`;
|
|||
overflowY={'auto'}
|
||||
>
|
||||
{toolParams && toolParams !== '{}' && (
|
||||
<Markdown
|
||||
source={`~~~json#Input
|
||||
${toolParams}`}
|
||||
/>
|
||||
)}
|
||||
{toolResponse && (
|
||||
<Box mt={3}>
|
||||
<Box mb={3}>
|
||||
<Markdown
|
||||
source={`~~~json#Response
|
||||
${toolResponse}`}
|
||||
source={`~~~json#Input
|
||||
${toolParams}`}
|
||||
/>
|
||||
</Box>
|
||||
)}
|
||||
{toolResponse && (
|
||||
<Markdown
|
||||
source={`~~~json#Response
|
||||
${toolResponse}`}
|
||||
/>
|
||||
)}
|
||||
</AccordionPanel>
|
||||
</AccordionItem>
|
||||
</Accordion>
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
import React, { useMemo, useState } from 'react';
|
||||
import { Box, Flex, BoxProps, useDisclosure } from '@chakra-ui/react';
|
||||
import { Box, Flex, BoxProps, useDisclosure, HStack } from '@chakra-ui/react';
|
||||
import type { ChatHistoryItemResType } from '@fastgpt/global/core/chat/type.d';
|
||||
import { useTranslation } from 'next-i18next';
|
||||
import { moduleTemplatesFlat } from '@fastgpt/global/core/workflow/template/constants';
|
||||
|
|
@ -16,6 +16,7 @@ import MyIcon from '@fastgpt/web/components/common/Icon';
|
|||
import { useContextSelector } from 'use-context-selector';
|
||||
import { ChatBoxContext } from '../ChatContainer/ChatBox/Provider';
|
||||
import { useRequest2 } from '@fastgpt/web/hooks/useRequest';
|
||||
import { getFileIcon } from '@fastgpt/global/common/file/icon';
|
||||
|
||||
type sideTabItemType = {
|
||||
moduleLogo?: string;
|
||||
|
|
@ -34,7 +35,7 @@ function RowRender({
|
|||
}: { children: React.ReactNode; label: string } & BoxProps) {
|
||||
return (
|
||||
<Box mb={3}>
|
||||
<Box fontSize={'sm'} mb={mb} flex={'0 0 90px'}>
|
||||
<Box fontSize={'sm'} mb={mb} color={'myGray.800'} flex={'0 0 90px'}>
|
||||
{label}:
|
||||
</Box>
|
||||
<Box borderRadius={'sm'} fontSize={['xs', 'sm']} bg={'myGray.50'} {...props}>
|
||||
|
|
@ -435,9 +436,50 @@ export const WholeResponseContent = ({
|
|||
value={activeModule?.textOutput}
|
||||
/>
|
||||
{/* code */}
|
||||
<Row label={workflowT('response.Custom outputs')} value={activeModule?.customOutputs} />
|
||||
<Row label={workflowT('response.Custom inputs')} value={activeModule?.customInputs} />
|
||||
<Row label={workflowT('response.Code log')} value={activeModule?.codeLog} />
|
||||
<>
|
||||
<Row
|
||||
label={t('workflow:response.Custom outputs')}
|
||||
value={activeModule?.customOutputs}
|
||||
/>
|
||||
<Row label={t('workflow:response.Custom inputs')} value={activeModule?.customInputs} />
|
||||
<Row label={t('workflow:response.Code log')} value={activeModule?.codeLog} />
|
||||
</>
|
||||
|
||||
{/* read files */}
|
||||
<>
|
||||
{activeModule?.readFiles && activeModule?.readFiles.length > 0 && (
|
||||
<Row
|
||||
label={t('workflow:response.read files')}
|
||||
rawDom={
|
||||
<Flex flexWrap={'wrap'} gap={3} px={4} py={2}>
|
||||
{activeModule?.readFiles.map((file, i) => (
|
||||
<HStack
|
||||
key={i}
|
||||
bg={'white'}
|
||||
boxShadow={'base'}
|
||||
borderRadius={'sm'}
|
||||
py={1}
|
||||
px={2}
|
||||
{...(file.url
|
||||
? {
|
||||
cursor: 'pointer',
|
||||
onClick: () => window.open(file.url)
|
||||
}
|
||||
: {})}
|
||||
>
|
||||
<MyIcon name={getFileIcon(file.name) as any} w={'1rem'} />
|
||||
<Box>{file.name}</Box>
|
||||
</HStack>
|
||||
))}
|
||||
</Flex>
|
||||
}
|
||||
/>
|
||||
)}
|
||||
<Row
|
||||
label={t('workflow:response.Read file result')}
|
||||
value={activeModule?.readFilesResult}
|
||||
/>
|
||||
</>
|
||||
</Box>
|
||||
)}
|
||||
</>
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
import { AppSchema } from '@fastgpt/global/core/app/type';
|
||||
import { ChatHistoryItemResType } from '@fastgpt/global/core/chat/type';
|
||||
import { RuntimeNodeItemType } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { StoreNodeItemType } from '@fastgpt/global/core/workflow/type';
|
||||
|
|
|
|||
|
|
@ -38,7 +38,7 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse<
|
|||
})();
|
||||
|
||||
res.setHeader('Content-Type', `${file.contentType}; charset=${encoding}`);
|
||||
res.setHeader('Cache-Control', 'public, max-age=3600');
|
||||
res.setHeader('Cache-Control', 'public, max-age=31536000');
|
||||
res.setHeader('Content-Disposition', `inline; filename="${encodeURIComponent(file.filename)}"`);
|
||||
|
||||
stream.pipe(res);
|
||||
|
|
|
|||
|
|
@ -1,12 +1,14 @@
|
|||
import type { NextApiRequest, NextApiResponse } from 'next';
|
||||
import { jsonRes } from '@fastgpt/service/common/response';
|
||||
import { connectToDatabase } from '@/service/mongo';
|
||||
import { authCert } from '@fastgpt/service/support/permission/auth/common';
|
||||
import { uploadFile } from '@fastgpt/service/common/file/gridfs/controller';
|
||||
import { getUploadModel } from '@fastgpt/service/common/file/multer';
|
||||
import { removeFilesByPaths } from '@fastgpt/service/common/file/utils';
|
||||
import { NextAPI } from '@/service/middleware/entry';
|
||||
import { createFileToken } from '@fastgpt/service/support/permission/controller';
|
||||
import { ReadFileBaseUrl } from '@fastgpt/global/common/file/constants';
|
||||
|
||||
export default async function handler(req: NextApiRequest, res: NextApiResponse<any>) {
|
||||
async function handler(req: NextApiRequest, res: NextApiResponse<any>) {
|
||||
/* Creates the multer uploader */
|
||||
const upload = getUploadModel({
|
||||
maxSize: (global.feConfigs?.uploadFileMaxSize || 500) * 1024 * 1024
|
||||
|
|
@ -14,11 +16,8 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse<
|
|||
const filePaths: string[] = [];
|
||||
|
||||
try {
|
||||
await connectToDatabase();
|
||||
const { file, bucketName, metadata } = await upload.doUpload(req, res);
|
||||
|
||||
filePaths.push(file.path);
|
||||
|
||||
const { teamId, tmbId } = await authCert({ req, authToken: true });
|
||||
|
||||
if (!bucketName) {
|
||||
|
|
@ -35,8 +34,21 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse<
|
|||
metadata: metadata
|
||||
});
|
||||
|
||||
jsonRes(res, {
|
||||
data: fileId
|
||||
jsonRes<{
|
||||
fileId: string;
|
||||
previewUrl: string;
|
||||
}>(res, {
|
||||
data: {
|
||||
fileId,
|
||||
previewUrl: `${ReadFileBaseUrl}?filename=${file.originalname}&token=${await createFileToken(
|
||||
{
|
||||
bucketName,
|
||||
teamId,
|
||||
tmbId,
|
||||
fileId
|
||||
}
|
||||
)}`
|
||||
}
|
||||
});
|
||||
} catch (error) {
|
||||
jsonRes(res, {
|
||||
|
|
@ -48,6 +60,8 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse<
|
|||
removeFilesByPaths(filePaths);
|
||||
}
|
||||
|
||||
export default NextAPI(handler);
|
||||
|
||||
export const config = {
|
||||
api: {
|
||||
bodyParser: false
|
||||
|
|
|
|||
|
|
@ -15,6 +15,7 @@ import {
|
|||
import { findAppAndAllChildren } from '@fastgpt/service/core/app/controller';
|
||||
import { MongoResourcePermission } from '@fastgpt/service/support/permission/schema';
|
||||
import { ClientSession } from '@fastgpt/service/common/mongo';
|
||||
import { deleteChatFiles } from '@fastgpt/service/core/chat/controller';
|
||||
|
||||
async function handler(req: NextApiRequest, res: NextApiResponse<any>) {
|
||||
const { appId } = req.query as { appId: string };
|
||||
|
|
@ -53,6 +54,7 @@ export const onDelOneApp = async ({
|
|||
for await (const app of apps) {
|
||||
const appId = app._id;
|
||||
// Chats
|
||||
await deleteChatFiles({ appId });
|
||||
await MongoChatItem.deleteMany(
|
||||
{
|
||||
appId
|
||||
|
|
|
|||
|
|
@ -21,6 +21,7 @@ import {
|
|||
import { NextAPI } from '@/service/middleware/entry';
|
||||
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
|
||||
import { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type';
|
||||
import { AppChatConfigType } from '@fastgpt/global/core/app/type';
|
||||
|
||||
export type Props = {
|
||||
messages: ChatCompletionMessageParam[];
|
||||
|
|
@ -29,6 +30,7 @@ export type Props = {
|
|||
variables: Record<string, any>;
|
||||
appId: string;
|
||||
appName: string;
|
||||
chatConfig: AppChatConfigType;
|
||||
};
|
||||
|
||||
async function handler(req: NextApiRequest, res: NextApiResponse) {
|
||||
|
|
@ -40,7 +42,15 @@ async function handler(req: NextApiRequest, res: NextApiResponse) {
|
|||
res.end();
|
||||
});
|
||||
|
||||
let { nodes = [], edges = [], messages = [], variables = {}, appName, appId } = req.body as Props;
|
||||
let {
|
||||
nodes = [],
|
||||
edges = [],
|
||||
messages = [],
|
||||
variables = {},
|
||||
appName,
|
||||
appId,
|
||||
chatConfig
|
||||
} = req.body as Props;
|
||||
try {
|
||||
// [histories, user]
|
||||
const chatMessages = GPTMessages2Chats(messages);
|
||||
|
|
@ -79,6 +89,7 @@ async function handler(req: NextApiRequest, res: NextApiResponse) {
|
|||
/* start process */
|
||||
const { flowResponses, flowUsages } = await dispatchWorkFlow({
|
||||
res,
|
||||
requestOrigin: req.headers.origin,
|
||||
mode: 'test',
|
||||
teamId,
|
||||
tmbId,
|
||||
|
|
@ -88,6 +99,7 @@ async function handler(req: NextApiRequest, res: NextApiResponse) {
|
|||
runtimeEdges: edges,
|
||||
variables,
|
||||
query: removeEmptyUserInput(userInput),
|
||||
chatConfig,
|
||||
histories: chatMessages,
|
||||
stream: true,
|
||||
detail: true,
|
||||
|
|
|
|||
|
|
@ -1,6 +1,5 @@
|
|||
import type { NextApiRequest, NextApiResponse } from 'next';
|
||||
import { jsonRes } from '@fastgpt/service/common/response';
|
||||
import { connectToDatabase } from '@/service/mongo';
|
||||
import { authCert } from '@fastgpt/service/support/permission/auth/common';
|
||||
import { MongoChat } from '@fastgpt/service/core/chat/chatSchema';
|
||||
import { MongoChatItem } from '@fastgpt/service/core/chat/chatItemSchema';
|
||||
|
|
@ -8,64 +7,71 @@ import { ClearHistoriesProps } from '@/global/core/chat/api';
|
|||
import { authOutLink } from '@/service/support/permission/auth/outLink';
|
||||
import { ChatSourceEnum } from '@fastgpt/global/core/chat/constants';
|
||||
import { authTeamSpaceToken } from '@/service/support/permission/auth/team';
|
||||
import { NextAPI } from '@/service/middleware/entry';
|
||||
import { deleteChatFiles } from '@fastgpt/service/core/chat/controller';
|
||||
import { mongoSessionRun } from '@fastgpt/service/common/mongo/sessionRun';
|
||||
|
||||
/* clear chat history */
|
||||
export default async function handler(req: NextApiRequest, res: NextApiResponse) {
|
||||
try {
|
||||
await connectToDatabase();
|
||||
const { appId, shareId, outLinkUid, teamId, teamToken } = req.query as ClearHistoriesProps;
|
||||
async function handler(req: NextApiRequest, res: NextApiResponse) {
|
||||
const { appId, shareId, outLinkUid, teamId, teamToken } = req.query as ClearHistoriesProps;
|
||||
|
||||
let chatAppId = appId;
|
||||
let chatAppId = appId!;
|
||||
|
||||
const match = await (async () => {
|
||||
if (shareId && outLinkUid) {
|
||||
const { appId, uid } = await authOutLink({ shareId, outLinkUid });
|
||||
const match = await (async () => {
|
||||
if (shareId && outLinkUid) {
|
||||
const { appId, uid } = await authOutLink({ shareId, outLinkUid });
|
||||
|
||||
chatAppId = appId;
|
||||
return {
|
||||
shareId,
|
||||
outLinkUid: uid
|
||||
};
|
||||
}
|
||||
if (teamId && teamToken) {
|
||||
const { uid } = await authTeamSpaceToken({ teamId, teamToken });
|
||||
return {
|
||||
teamId,
|
||||
appId,
|
||||
outLinkUid: uid
|
||||
};
|
||||
}
|
||||
if (appId) {
|
||||
const { tmbId } = await authCert({ req, authToken: true });
|
||||
chatAppId = appId;
|
||||
return {
|
||||
shareId,
|
||||
outLinkUid: uid
|
||||
};
|
||||
}
|
||||
if (teamId && teamToken) {
|
||||
const { uid } = await authTeamSpaceToken({ teamId, teamToken });
|
||||
return {
|
||||
teamId,
|
||||
appId,
|
||||
outLinkUid: uid
|
||||
};
|
||||
}
|
||||
if (appId) {
|
||||
const { tmbId } = await authCert({ req, authToken: true });
|
||||
|
||||
return {
|
||||
tmbId,
|
||||
appId,
|
||||
source: ChatSourceEnum.online
|
||||
};
|
||||
}
|
||||
return {
|
||||
tmbId,
|
||||
appId,
|
||||
source: ChatSourceEnum.online
|
||||
};
|
||||
}
|
||||
|
||||
return Promise.reject('Param are error');
|
||||
})();
|
||||
return Promise.reject('Param are error');
|
||||
})();
|
||||
|
||||
// find chatIds
|
||||
const list = await MongoChat.find(match, 'chatId').lean();
|
||||
const idList = list.map((item) => item.chatId);
|
||||
// find chatIds
|
||||
const list = await MongoChat.find(match, 'chatId').lean();
|
||||
const idList = list.map((item) => item.chatId);
|
||||
|
||||
await MongoChatItem.deleteMany({
|
||||
appId: chatAppId,
|
||||
chatId: { $in: idList }
|
||||
});
|
||||
await MongoChat.deleteMany({
|
||||
appId: chatAppId,
|
||||
chatId: { $in: idList }
|
||||
});
|
||||
await deleteChatFiles({ chatIdList: idList });
|
||||
|
||||
jsonRes(res);
|
||||
} catch (err) {
|
||||
jsonRes(res, {
|
||||
code: 500,
|
||||
error: err
|
||||
});
|
||||
}
|
||||
await mongoSessionRun(async (session) => {
|
||||
await MongoChatItem.deleteMany(
|
||||
{
|
||||
appId: chatAppId,
|
||||
chatId: { $in: idList }
|
||||
},
|
||||
{ session }
|
||||
);
|
||||
await MongoChat.deleteMany(
|
||||
{
|
||||
appId: chatAppId,
|
||||
chatId: { $in: idList }
|
||||
},
|
||||
{ session }
|
||||
);
|
||||
});
|
||||
|
||||
jsonRes(res);
|
||||
}
|
||||
|
||||
export default NextAPI(handler);
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
import type { NextApiRequest, NextApiResponse } from 'next';
|
||||
import type { NextApiResponse } from 'next';
|
||||
import { jsonRes } from '@fastgpt/service/common/response';
|
||||
import { MongoChat } from '@fastgpt/service/core/chat/chatSchema';
|
||||
import { MongoChatItem } from '@fastgpt/service/core/chat/chatItemSchema';
|
||||
|
|
@ -8,6 +8,7 @@ import { mongoSessionRun } from '@fastgpt/service/common/mongo/sessionRun';
|
|||
import { NextAPI } from '@/service/middleware/entry';
|
||||
import { ApiRequestProps } from '@fastgpt/service/type/next';
|
||||
import { WritePermissionVal } from '@fastgpt/global/support/permission/constant';
|
||||
import { deleteChatFiles } from '@fastgpt/service/core/chat/controller';
|
||||
|
||||
/* clear chat history */
|
||||
async function handler(req: ApiRequestProps<{}, DelHistoryProps>, res: NextApiResponse) {
|
||||
|
|
@ -20,6 +21,7 @@ async function handler(req: ApiRequestProps<{}, DelHistoryProps>, res: NextApiRe
|
|||
per: WritePermissionVal
|
||||
});
|
||||
|
||||
await deleteChatFiles({ chatIdList: [chatId] });
|
||||
await mongoSessionRun(async (session) => {
|
||||
await MongoChatItem.deleteMany(
|
||||
{
|
||||
|
|
@ -28,7 +30,7 @@ async function handler(req: ApiRequestProps<{}, DelHistoryProps>, res: NextApiRe
|
|||
},
|
||||
{ session }
|
||||
);
|
||||
await MongoChat.findOneAndRemove(
|
||||
await MongoChat.deleteOne(
|
||||
{
|
||||
appId,
|
||||
chatId
|
||||
|
|
|
|||
|
|
@ -41,6 +41,7 @@ async function handler(
|
|||
/* start process */
|
||||
const { flowUsages, flowResponses, debugResponse } = await dispatchWorkFlow({
|
||||
res,
|
||||
requestOrigin: req.headers.origin,
|
||||
mode: 'debug',
|
||||
teamId,
|
||||
tmbId,
|
||||
|
|
@ -50,6 +51,7 @@ async function handler(
|
|||
runtimeEdges: edges,
|
||||
variables,
|
||||
query: [],
|
||||
chatConfig: defaultApp.chatConfig,
|
||||
histories: [],
|
||||
stream: false,
|
||||
detail: true,
|
||||
|
|
|
|||
|
|
@ -249,6 +249,7 @@ async function handler(req: NextApiRequest, res: NextApiResponse) {
|
|||
if (app.version === 'v2') {
|
||||
return dispatchWorkFlow({
|
||||
res,
|
||||
requestOrigin: req.headers.origin,
|
||||
mode: 'chat',
|
||||
user,
|
||||
teamId: String(teamId),
|
||||
|
|
@ -260,6 +261,7 @@ async function handler(req: NextApiRequest, res: NextApiResponse) {
|
|||
runtimeEdges: initWorkflowEdgeStatus(edges),
|
||||
variables: runtimeVariables,
|
||||
query: removeEmptyUserInput(userQuestion.value),
|
||||
chatConfig,
|
||||
histories: newHistories,
|
||||
stream,
|
||||
detail,
|
||||
|
|
|
|||
|
|
@ -27,9 +27,10 @@ const ChatTest = ({ appForm }: { appForm: AppSimpleEditFormType }) => {
|
|||
});
|
||||
|
||||
useEffect(() => {
|
||||
const { nodes, edges } = form2AppWorkflow(appForm);
|
||||
const { nodes, edges } = form2AppWorkflow(appForm, t);
|
||||
// console.log(form2AppWorkflow(appForm, t));
|
||||
setWorkflowData({ nodes, edges });
|
||||
}, [appForm, setWorkflowData, allDatasets]);
|
||||
}, [appForm, setWorkflowData, allDatasets, t]);
|
||||
|
||||
const { restartChat, ChatContainer } = useChatTest({
|
||||
...workflowData,
|
||||
|
|
|
|||
|
|
@ -47,6 +47,7 @@ const ScheduledTriggerConfig = dynamic(
|
|||
() => import('@/components/core/app/ScheduledTriggerConfig')
|
||||
);
|
||||
const WelcomeTextConfig = dynamic(() => import('@/components/core/app/WelcomeTextConfig'));
|
||||
const FileSelectConfig = dynamic(() => import('@/components/core/app/FileSelect'));
|
||||
|
||||
const BoxStyles: BoxProps = {
|
||||
px: [4, 6],
|
||||
|
|
@ -120,11 +121,11 @@ const EditForm = ({
|
|||
[appForm.chatConfig.variables, t]
|
||||
);
|
||||
|
||||
const selectedModel =
|
||||
llmModelList.find((item) => item.model === appForm.aiSettings.model) ?? llmModelList[0];
|
||||
const tokenLimit = useMemo(() => {
|
||||
return (
|
||||
llmModelList.find((item) => item.model === appForm.aiSettings.model)?.quoteMaxToken || 3000
|
||||
);
|
||||
}, [llmModelList, appForm.aiSettings.model]);
|
||||
return selectedModel.quoteMaxToken || 3000;
|
||||
}, [selectedModel.quoteMaxToken]);
|
||||
|
||||
return (
|
||||
<>
|
||||
|
|
@ -338,6 +339,23 @@ const EditForm = ({
|
|||
</Grid>
|
||||
</Box>
|
||||
|
||||
{/* File select */}
|
||||
<Box {...BoxStyles}>
|
||||
<FileSelectConfig
|
||||
forbidVision={!selectedModel.vision}
|
||||
value={appForm.chatConfig.fileSelectConfig}
|
||||
onChange={(e) => {
|
||||
setAppForm((state) => ({
|
||||
...state,
|
||||
chatConfig: {
|
||||
...state.chatConfig,
|
||||
fileSelectConfig: e
|
||||
}
|
||||
}));
|
||||
}}
|
||||
/>
|
||||
</Box>
|
||||
|
||||
{/* variable */}
|
||||
<Box {...BoxStyles}>
|
||||
<VariableEdit
|
||||
|
|
|
|||
|
|
@ -12,7 +12,6 @@ import PopoverConfirm from '@fastgpt/web/components/common/MyPopover/PopoverConf
|
|||
import { AppSimpleEditFormType } from '@fastgpt/global/core/app/type';
|
||||
import { AppTypeEnum } from '@fastgpt/global/core/app/constants';
|
||||
import { form2AppWorkflow } from '@/web/core/app/utils';
|
||||
import { useSystemStore } from '@/web/common/system/useSystemStore';
|
||||
import { TabEnum } from '../context';
|
||||
import PublishHistoriesSlider, { type InitProps } from '../PublishHistoriesSlider';
|
||||
import { appWorkflow2Form } from '@fastgpt/global/core/app/utils';
|
||||
|
|
@ -52,7 +51,7 @@ const Header = ({
|
|||
);
|
||||
|
||||
const isPublished = useMemo(() => {
|
||||
const data = form2AppWorkflow(appForm);
|
||||
const data = form2AppWorkflow(appForm, t);
|
||||
|
||||
return compareWorkflow(
|
||||
{
|
||||
|
|
@ -66,11 +65,11 @@ const Header = ({
|
|||
chatConfig: data.chatConfig
|
||||
}
|
||||
);
|
||||
}, [appDetail.chatConfig, appDetail.modules, appForm]);
|
||||
}, [appDetail.chatConfig, appDetail.modules, appForm, t]);
|
||||
|
||||
const onSubmitPublish = useCallback(
|
||||
async (data: AppSimpleEditFormType) => {
|
||||
const { nodes, edges } = form2AppWorkflow(data);
|
||||
const { nodes, edges } = form2AppWorkflow(data, t);
|
||||
await onPublish({
|
||||
nodes,
|
||||
edges,
|
||||
|
|
@ -78,7 +77,7 @@ const Header = ({
|
|||
type: AppTypeEnum.simple
|
||||
});
|
||||
},
|
||||
[onPublish]
|
||||
[onPublish, t]
|
||||
);
|
||||
|
||||
const [historiesDefaultData, setHistoriesDefaultData] = useState<InitProps>();
|
||||
|
|
@ -119,9 +118,11 @@ const Header = ({
|
|||
: publishStatusStyle.unPublish.colorSchema
|
||||
}
|
||||
>
|
||||
{isPublished
|
||||
? publishStatusStyle.published.text
|
||||
: publishStatusStyle.unPublish.text}
|
||||
{t(
|
||||
isPublished
|
||||
? publishStatusStyle.published.text
|
||||
: publishStatusStyle.unPublish.text
|
||||
)}
|
||||
</MyTag>
|
||||
)}
|
||||
|
||||
|
|
@ -133,7 +134,7 @@ const Header = ({
|
|||
w={'30px'}
|
||||
variant={'whitePrimary'}
|
||||
onClick={() => {
|
||||
const { nodes, edges } = form2AppWorkflow(appForm);
|
||||
const { nodes, edges } = form2AppWorkflow(appForm, t);
|
||||
setHistoriesDefaultData({
|
||||
nodes,
|
||||
edges,
|
||||
|
|
|
|||
|
|
@ -190,9 +190,11 @@ const AppCard = ({ showSaveStatus }: { showSaveStatus: boolean }) => {
|
|||
: publishStatusStyle.unPublish.colorSchema
|
||||
}
|
||||
>
|
||||
{isPublished
|
||||
? publishStatusStyle.published.text
|
||||
: publishStatusStyle.unPublish.text}
|
||||
{t(
|
||||
isPublished
|
||||
? publishStatusStyle.published.text
|
||||
: publishStatusStyle.unPublish.text
|
||||
)}
|
||||
</MyTag>
|
||||
</Flex>
|
||||
</MyTooltip>
|
||||
|
|
|
|||
|
|
@ -36,6 +36,7 @@ const nodeTypes: Record<FlowNodeTypeEnum, any> = {
|
|||
[FlowNodeTypeEnum.systemConfig]: dynamic(() => import('./nodes/NodeSystemConfig')),
|
||||
[FlowNodeTypeEnum.workflowStart]: dynamic(() => import('./nodes/NodeWorkflowStart')),
|
||||
[FlowNodeTypeEnum.chatNode]: NodeSimple,
|
||||
[FlowNodeTypeEnum.readFiles]: NodeSimple,
|
||||
[FlowNodeTypeEnum.datasetSearchNode]: NodeSimple,
|
||||
[FlowNodeTypeEnum.datasetConcatNode]: dynamic(() => import('./nodes/NodeDatasetConcat')),
|
||||
[FlowNodeTypeEnum.answerNode]: dynamic(() => import('./nodes/NodeAnswer')),
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue