mirror of
https://github.com/labring/FastGPT.git
synced 2025-12-26 04:32:50 +00:00
feat: plan ask (#5650)
* feat: plan ask * fix * fix: unit test * fix: build * refactor: plan llm call
This commit is contained in:
parent
aeaad52f46
commit
ac6b235280
|
|
@ -1,6 +1,6 @@
|
|||
import type { NodeOutputItemType } from '../../../../chat/type';
|
||||
import type { FlowNodeInputTypeEnum } from 'core/workflow/node/constant';
|
||||
import type { WorkflowIOValueTypeEnum } from 'core/workflow/constants';
|
||||
import type { FlowNodeInputTypeEnum } from '../../../../../core/workflow/node/constant';
|
||||
import type { WorkflowIOValueTypeEnum } from '../../../../../core/workflow/constants';
|
||||
import type { ChatCompletionMessageParam } from '../../../../ai/type';
|
||||
import type { RuntimeEdgeItemType } from '../../../type/edge';
|
||||
|
||||
|
|
|
|||
|
|
@ -6,7 +6,10 @@ import type {
|
|||
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
|
||||
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
|
||||
import type { AIChatItemType, AIChatItemValueItemType } from '@fastgpt/global/core/chat/type';
|
||||
import type { WorkflowInteractiveResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
|
||||
import type {
|
||||
InteractiveNodeResponseType,
|
||||
WorkflowInteractiveResponseType
|
||||
} from '@fastgpt/global/core/workflow/template/system/interactive/type';
|
||||
import type { CreateLLMResponseProps, ResponseEvents } from './request';
|
||||
import { createLLMResponse } from './request';
|
||||
import type { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
|
||||
|
|
@ -39,13 +42,14 @@ type RunAgentCallProps = {
|
|||
response: string;
|
||||
usages: ChatNodeUsageType[];
|
||||
isEnd: boolean;
|
||||
interactive?: InteractiveNodeResponseType;
|
||||
}>;
|
||||
} & ResponseEvents;
|
||||
|
||||
type RunAgentResponse = {
|
||||
completeMessages: ChatCompletionMessageParam[];
|
||||
assistantResponses: AIChatItemValueItemType[];
|
||||
interactiveResponse?: WorkflowInteractiveResponseType;
|
||||
interactiveResponse?: InteractiveNodeResponseType;
|
||||
|
||||
// Usage
|
||||
inputTokens: number;
|
||||
|
|
@ -71,7 +75,7 @@ export const runAgentCall = async ({
|
|||
let runTimes = 0;
|
||||
|
||||
const assistantResponses: AIChatItemValueItemType[] = [];
|
||||
let interactiveResponse: WorkflowInteractiveResponseType | undefined;
|
||||
let interactiveResponse: InteractiveNodeResponseType | undefined;
|
||||
|
||||
let requestMessages = messages;
|
||||
|
||||
|
|
@ -127,7 +131,7 @@ export const runAgentCall = async ({
|
|||
let isEndSign = false;
|
||||
for await (const tool of toolCalls) {
|
||||
// TODO: 加入交互节点处理
|
||||
const { response, usages, isEnd } = await handleToolResponse({
|
||||
const { response, usages, isEnd, interactive } = await handleToolResponse({
|
||||
call: tool,
|
||||
messages: requestMessages.slice(0, requestMessagesLength) // 取原来 request 的上下文
|
||||
});
|
||||
|
|
@ -142,8 +146,12 @@ export const runAgentCall = async ({
|
|||
content: response
|
||||
});
|
||||
subAppUsages.push(...usages);
|
||||
}
|
||||
|
||||
if (interactive) {
|
||||
interactiveResponse = interactive;
|
||||
isEndSign = true;
|
||||
}
|
||||
}
|
||||
// TODO: 移动到工作流里 assistantResponses concat
|
||||
const currentAssistantResponses = GPTMessages2Chats({
|
||||
messages: requestMessages.slice(requestMessagesLength),
|
||||
|
|
@ -168,6 +176,7 @@ export const runAgentCall = async ({
|
|||
outputTokens,
|
||||
completeMessages: requestMessages,
|
||||
assistantResponses,
|
||||
subAppUsages
|
||||
subAppUsages,
|
||||
interactiveResponse
|
||||
};
|
||||
};
|
||||
|
|
|
|||
|
|
@ -1,18 +1,30 @@
|
|||
import { SubAppIds } from './sub/constants';
|
||||
|
||||
export const getMasterAgentDefaultPrompt = () => {
|
||||
return `## 角色
|
||||
你是一个智能的任务协调者和监督者,负责分析用户需求并合理调度各种工具和子应用来完成复杂任务。
|
||||
return `## 角色定位
|
||||
你是一个高级任务调度器(Task Orchestrator),负责分析用户需求、制定执行策略、协调多个专业Agent协同工作,确保任务高质量完成。
|
||||
|
||||
## 工作内容
|
||||
1. 判断用户问题是否复杂,如果任务复杂,则先使用 "${SubAppIds.plan}" 制定计划,然后根据计划逐步完成任务
|
||||
2. 按需调用工具来辅助完成任务
|
||||
3. 整合各个工具的执行结果,形成完整的回答
|
||||
## 核心职责
|
||||
### 任务分析与规划
|
||||
- **复杂度评估**:判断任务是否需要分解(涉及多个领域、多个步骤、需要不同专业能力)
|
||||
- 简单任务:直接调用相关工具或Agent完成
|
||||
- 复杂任务:先调用 "${SubAppIds.plan}" 制定详细计划,再按计划执行
|
||||
|
||||
## 注意事项
|
||||
- 优先使用最相关和最可靠的工具
|
||||
- 如果某个工具执行失败,要有备选方案
|
||||
- 保持回答的逻辑性和连贯性
|
||||
- 对于不确定的信息要明确标注
|
||||
- 始终以用户需求为中心,提供有价值的帮助`;
|
||||
### 粒度控制
|
||||
- **原子任务原则**:每个子任务应该是单一、明确、可独立完成的
|
||||
- **专业匹配原则**:每个子任务分配给最适合的专业Agent
|
||||
- **避免任务过载**:不要将多个复杂任务打包给单个Agent
|
||||
|
||||
### 结果验证
|
||||
- 检查每个子任务是否达到预期目标
|
||||
- 验证输出格式和内容完整性
|
||||
- 确保各部分结果的一致性和连贯性
|
||||
|
||||
## 关键原则
|
||||
1. **分而治之**:复杂任务必须分解,不能一次性丢给单个Agent
|
||||
2. **专业对口**:根据任务特性选择最合适的Agent
|
||||
3. **循序渐进**:按照逻辑顺序执行,确保前后连贯
|
||||
4. **结果导向**:关注最终用户需求的满足度
|
||||
5. **灵活调整**:根据执行情况动态调整策略
|
||||
`;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ import {
|
|||
WorkflowIOValueTypeEnum
|
||||
} from '@fastgpt/global/core/workflow/constants';
|
||||
import {
|
||||
ConfirmPlanAgentText,
|
||||
DispatchNodeResponseKeyEnum,
|
||||
SseResponseEventEnum
|
||||
} from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
|
|
@ -56,6 +57,8 @@ import { addFilePrompt2Input, getFileInputPrompt } from './sub/file/utils';
|
|||
import type { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type';
|
||||
import { dispatchFileRead } from './sub/file';
|
||||
import { dispatchApp, dispatchPlugin } from './sub/app';
|
||||
import type { InteractiveNodeResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
|
||||
import { getNanoid } from '@fastgpt/global/common/string/tools';
|
||||
|
||||
export type DispatchAgentModuleProps = ModuleDispatchProps<{
|
||||
[NodeInputKeyEnum.history]?: ChatItemType[];
|
||||
|
|
@ -183,7 +186,7 @@ export const dispatchRunAgent = async (props: DispatchAgentModuleProps): Promise
|
|||
|
||||
// Get master request messages
|
||||
const systemMessages = chats2GPTMessages({
|
||||
messages: getSystemPrompt_ChatItemType(systemPrompt || getMasterAgentDefaultPrompt()),
|
||||
messages: getSystemPrompt_ChatItemType(getMasterAgentDefaultPrompt()),
|
||||
reserveId: false
|
||||
});
|
||||
const historyMessages: ChatCompletionMessageParam[] = (() => {
|
||||
|
|
@ -193,6 +196,10 @@ export const dispatchRunAgent = async (props: DispatchAgentModuleProps): Promise
|
|||
|
||||
return chats2GPTMessages({ messages: chatHistories, reserveId: false });
|
||||
})();
|
||||
|
||||
if (lastInteractive?.type !== 'userSelect' && lastInteractive?.type !== 'userInput') {
|
||||
userChatInput = query[0].text?.content ?? userChatInput;
|
||||
}
|
||||
const userMessages = chats2GPTMessages({
|
||||
messages: [
|
||||
{
|
||||
|
|
@ -205,38 +212,56 @@ export const dispatchRunAgent = async (props: DispatchAgentModuleProps): Promise
|
|||
],
|
||||
reserveId: false
|
||||
});
|
||||
|
||||
const requestMessages = [...systemMessages, ...historyMessages, ...userMessages];
|
||||
let planMessages: ChatCompletionMessageParam[] = [];
|
||||
|
||||
// TODO: 执行 plan function(只有lastInteractive userselect/userInput 时候,才不需要进入 plan)
|
||||
if (lastInteractive?.type !== 'userSelect' && lastInteractive?.type !== 'userInput') {
|
||||
// const planResponse = xxxx
|
||||
// requestMessages.push(一组 toolcall)
|
||||
if (
|
||||
lastInteractive?.type !== 'userSelect' &&
|
||||
lastInteractive?.type !== 'userInput' &&
|
||||
userChatInput !== ConfirmPlanAgentText
|
||||
) {
|
||||
const { completeMessages, toolMessages, usages, interactiveResponse } =
|
||||
await dispatchPlanAgent({
|
||||
messages: requestMessages,
|
||||
subApps: subAppList,
|
||||
model,
|
||||
temperature,
|
||||
top_p: aiChatTopP,
|
||||
stream,
|
||||
onReasoning: ({ text }: { text: string }) => {
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.answer,
|
||||
data: textAdaptGptResponse({
|
||||
reasoning_content: text
|
||||
})
|
||||
});
|
||||
},
|
||||
onStreaming: ({ text }: { text: string }) => {
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.answer,
|
||||
data: textAdaptGptResponse({
|
||||
text
|
||||
})
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.answer,
|
||||
data: textAdaptGptResponse({
|
||||
text: '这是 plan'
|
||||
})
|
||||
});
|
||||
if (toolMessages) requestMessages.push(...toolMessages);
|
||||
|
||||
return {
|
||||
[DispatchNodeResponseKeyEnum.memories]: {
|
||||
[planMessagesKey]: [
|
||||
{
|
||||
role: 'user',
|
||||
content: '测试'
|
||||
},
|
||||
{
|
||||
role: 'assistant',
|
||||
content: '测试'
|
||||
}
|
||||
]
|
||||
[masterMessagesKey]: filterMemoryMessages(requestMessages),
|
||||
[planMessagesKey]: filterMemoryMessages(completeMessages)
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.interactive]: interactiveResponse
|
||||
|
||||
// Mock:返回 plan check
|
||||
[DispatchNodeResponseKeyEnum.interactive]: {
|
||||
type: 'agentPlanCheck',
|
||||
params: {}
|
||||
}
|
||||
// [DispatchNodeResponseKeyEnum.interactive]: {
|
||||
// type: 'agentPlanCheck',
|
||||
// params: {}
|
||||
// }
|
||||
|
||||
// Mock: 返回 plan user select
|
||||
// [DispatchNodeResponseKeyEnum.interactive]: {
|
||||
|
|
@ -254,7 +279,7 @@ export const dispatchRunAgent = async (props: DispatchAgentModuleProps): Promise
|
|||
// }
|
||||
// ]
|
||||
// }
|
||||
// },
|
||||
// }
|
||||
// Mock: 返回 plan user input
|
||||
// [DispatchNodeResponseKeyEnum.interactive]: {
|
||||
// type: 'agentPlanAskUserForm',
|
||||
|
|
@ -283,296 +308,307 @@ export const dispatchRunAgent = async (props: DispatchAgentModuleProps): Promise
|
|||
}
|
||||
|
||||
const dispatchFlowResponse: ChatHistoryItemResType[] = [];
|
||||
// console.log(JSON.stringify(requestMessages, null, 2));
|
||||
const { completeMessages, assistantResponses, inputTokens, outputTokens, subAppUsages } =
|
||||
await runAgentCall({
|
||||
maxRunAgentTimes: 100,
|
||||
interactiveEntryToolParams: lastInteractive?.toolParams,
|
||||
body: {
|
||||
messages: requestMessages,
|
||||
model: agentModel,
|
||||
temperature,
|
||||
stream,
|
||||
top_p: aiChatTopP,
|
||||
subApps: subAppList
|
||||
},
|
||||
const {
|
||||
completeMessages,
|
||||
assistantResponses,
|
||||
inputTokens,
|
||||
outputTokens,
|
||||
subAppUsages,
|
||||
interactiveResponse
|
||||
} = await runAgentCall({
|
||||
maxRunAgentTimes: 100,
|
||||
interactiveEntryToolParams: lastInteractive?.toolParams,
|
||||
body: {
|
||||
messages: requestMessages,
|
||||
model: agentModel,
|
||||
temperature,
|
||||
stream,
|
||||
top_p: aiChatTopP,
|
||||
subApps: subAppList
|
||||
},
|
||||
|
||||
userKey: externalProvider.openaiAccount,
|
||||
isAborted: res ? () => res.closed : undefined,
|
||||
getToolInfo: getSubAppInfo,
|
||||
userKey: externalProvider.openaiAccount,
|
||||
isAborted: res ? () => res.closed : undefined,
|
||||
getToolInfo: getSubAppInfo,
|
||||
|
||||
onReasoning({ text }) {
|
||||
workflowStreamResponse?.({
|
||||
onReasoning({ text }) {
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.answer,
|
||||
data: textAdaptGptResponse({
|
||||
reasoning_content: text
|
||||
})
|
||||
});
|
||||
},
|
||||
onStreaming({ text }) {
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.answer,
|
||||
data: textAdaptGptResponse({
|
||||
text
|
||||
})
|
||||
});
|
||||
},
|
||||
onToolCall({ call }) {
|
||||
const subApp = getSubAppInfo(call.function.name);
|
||||
workflowStreamResponse?.({
|
||||
id: call.id,
|
||||
event: SseResponseEventEnum.toolCall,
|
||||
data: {
|
||||
tool: {
|
||||
id: `${nodeId}/${call.function.name}`,
|
||||
toolName: subApp?.name || call.function.name,
|
||||
toolAvatar: subApp?.avatar || '',
|
||||
functionName: call.function.name,
|
||||
params: call.function.arguments ?? ''
|
||||
}
|
||||
}
|
||||
});
|
||||
},
|
||||
onToolParam({ call, params }) {
|
||||
workflowStreamResponse?.({
|
||||
id: call.id,
|
||||
event: SseResponseEventEnum.toolParams,
|
||||
data: {
|
||||
tool: {
|
||||
params
|
||||
}
|
||||
}
|
||||
});
|
||||
},
|
||||
|
||||
handleToolResponse: async ({ call, messages }) => {
|
||||
const toolId = call.function.name;
|
||||
const childWorkflowStreamResponse = getWorkflowChildResponseWrite({
|
||||
subAppId: `${nodeId}/${toolId}`,
|
||||
id: call.id,
|
||||
fn: workflowStreamResponse
|
||||
});
|
||||
const onReasoning = ({ text }: { text: string }) => {
|
||||
childWorkflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.answer,
|
||||
data: textAdaptGptResponse({
|
||||
reasoning_content: text
|
||||
})
|
||||
});
|
||||
},
|
||||
onStreaming({ text }) {
|
||||
workflowStreamResponse?.({
|
||||
};
|
||||
const onStreaming = ({ text }: { text: string }) => {
|
||||
childWorkflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.answer,
|
||||
data: textAdaptGptResponse({
|
||||
text
|
||||
})
|
||||
});
|
||||
},
|
||||
onToolCall({ call }) {
|
||||
const subApp = getSubAppInfo(call.function.name);
|
||||
workflowStreamResponse?.({
|
||||
id: call.id,
|
||||
event: SseResponseEventEnum.toolCall,
|
||||
data: {
|
||||
tool: {
|
||||
id: `${nodeId}/${call.function.name}`,
|
||||
toolName: subApp?.name || call.function.name,
|
||||
toolAvatar: subApp?.avatar || '',
|
||||
functionName: call.function.name,
|
||||
params: call.function.arguments ?? ''
|
||||
}
|
||||
}
|
||||
});
|
||||
},
|
||||
onToolParam({ call, params }) {
|
||||
workflowStreamResponse?.({
|
||||
id: call.id,
|
||||
event: SseResponseEventEnum.toolParams,
|
||||
data: {
|
||||
tool: {
|
||||
params
|
||||
}
|
||||
}
|
||||
});
|
||||
},
|
||||
};
|
||||
|
||||
handleToolResponse: async ({ call, messages }) => {
|
||||
const toolId = call.function.name;
|
||||
const childWorkflowStreamResponse = getWorkflowChildResponseWrite({
|
||||
subAppId: `${nodeId}/${toolId}`,
|
||||
id: call.id,
|
||||
fn: workflowStreamResponse
|
||||
});
|
||||
const onReasoning = ({ text }: { text: string }) => {
|
||||
childWorkflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.answer,
|
||||
data: textAdaptGptResponse({
|
||||
reasoning_content: text
|
||||
})
|
||||
});
|
||||
};
|
||||
const onStreaming = ({ text }: { text: string }) => {
|
||||
childWorkflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.answer,
|
||||
data: textAdaptGptResponse({
|
||||
text
|
||||
})
|
||||
});
|
||||
};
|
||||
|
||||
const {
|
||||
response,
|
||||
usages = [],
|
||||
isEnd
|
||||
} = await (async () => {
|
||||
try {
|
||||
if (toolId === SubAppIds.stop) {
|
||||
return {
|
||||
response: '',
|
||||
usages: [],
|
||||
isEnd: true
|
||||
};
|
||||
} else if (toolId === SubAppIds.plan) {
|
||||
const { response, usages } = await dispatchPlanAgent({
|
||||
messages,
|
||||
tools: subAppList,
|
||||
model,
|
||||
temperature,
|
||||
top_p: aiChatTopP,
|
||||
stream,
|
||||
onReasoning,
|
||||
onStreaming
|
||||
});
|
||||
|
||||
return {
|
||||
response,
|
||||
usages,
|
||||
isEnd: false
|
||||
};
|
||||
} else if (toolId === SubAppIds.model) {
|
||||
const { systemPrompt, task } = parseToolArgs<{
|
||||
systemPrompt: string;
|
||||
task: string;
|
||||
}>(call.function.arguments);
|
||||
|
||||
const { response, usages } = await dispatchModelAgent({
|
||||
model,
|
||||
temperature,
|
||||
top_p: aiChatTopP,
|
||||
stream,
|
||||
systemPrompt,
|
||||
task,
|
||||
onReasoning,
|
||||
onStreaming
|
||||
});
|
||||
return {
|
||||
response,
|
||||
usages,
|
||||
isEnd: false
|
||||
};
|
||||
} else if (toolId === SubAppIds.fileRead) {
|
||||
const { file_indexes } = parseToolArgs<{
|
||||
file_indexes: string[];
|
||||
}>(call.function.arguments);
|
||||
if (!Array.isArray(file_indexes)) {
|
||||
return {
|
||||
response: 'file_indexes is not array',
|
||||
usages: [],
|
||||
isEnd: false
|
||||
};
|
||||
}
|
||||
|
||||
const files = file_indexes.map((index) => ({
|
||||
index,
|
||||
url: filesMap[index]
|
||||
}));
|
||||
const result = await dispatchFileRead({
|
||||
files,
|
||||
teamId: runningUserInfo.teamId,
|
||||
tmbId: runningUserInfo.tmbId,
|
||||
customPdfParse: chatConfig?.fileSelectConfig?.customPdfParse
|
||||
});
|
||||
return {
|
||||
response: result.response,
|
||||
usages: result.usages,
|
||||
isEnd: false
|
||||
};
|
||||
}
|
||||
// User Sub App
|
||||
else {
|
||||
const node = subAppsMap.get(toolId);
|
||||
if (!node) {
|
||||
return {
|
||||
response: 'Can not find the tool',
|
||||
usages: [],
|
||||
isEnd: false
|
||||
};
|
||||
}
|
||||
|
||||
const toolCallParams = parseToolArgs(call.function.arguments);
|
||||
// Get params
|
||||
const requestParams = (() => {
|
||||
const params: Record<string, any> = toolCallParams;
|
||||
|
||||
node.inputs.forEach((input) => {
|
||||
if (input.key in toolCallParams) {
|
||||
return;
|
||||
}
|
||||
// Skip some special key
|
||||
if (
|
||||
[
|
||||
NodeInputKeyEnum.childrenNodeIdList,
|
||||
NodeInputKeyEnum.systemInputConfig
|
||||
].includes(input.key as NodeInputKeyEnum)
|
||||
) {
|
||||
params[input.key] = input.value;
|
||||
return;
|
||||
}
|
||||
|
||||
// replace {{$xx.xx$}} and {{xx}} variables
|
||||
let value = replaceEditorVariable({
|
||||
text: input.value,
|
||||
nodes: runtimeNodes,
|
||||
variables
|
||||
});
|
||||
|
||||
// replace reference variables
|
||||
value = getReferenceVariableValue({
|
||||
value,
|
||||
nodes: runtimeNodes,
|
||||
variables
|
||||
});
|
||||
|
||||
params[input.key] = valueTypeFormat(value, input.valueType);
|
||||
});
|
||||
|
||||
return params;
|
||||
})();
|
||||
|
||||
if (node.flowNodeType === FlowNodeTypeEnum.tool) {
|
||||
const { response, usages } = await dispatchTool({
|
||||
node,
|
||||
params: requestParams,
|
||||
runningUserInfo,
|
||||
runningAppInfo,
|
||||
variables,
|
||||
workflowStreamResponse: childWorkflowStreamResponse
|
||||
});
|
||||
return {
|
||||
response,
|
||||
usages,
|
||||
isEnd: false
|
||||
};
|
||||
} else if (
|
||||
node.flowNodeType === FlowNodeTypeEnum.appModule ||
|
||||
node.flowNodeType === FlowNodeTypeEnum.pluginModule
|
||||
) {
|
||||
const fn =
|
||||
node.flowNodeType === FlowNodeTypeEnum.appModule ? dispatchApp : dispatchPlugin;
|
||||
|
||||
const { response, usages } = await fn({
|
||||
...props,
|
||||
node,
|
||||
workflowStreamResponse: childWorkflowStreamResponse,
|
||||
callParams: {
|
||||
appId: node.pluginId,
|
||||
version: node.version,
|
||||
...requestParams
|
||||
}
|
||||
});
|
||||
|
||||
return {
|
||||
response,
|
||||
usages,
|
||||
isEnd: false
|
||||
};
|
||||
} else {
|
||||
return {
|
||||
response: 'Can not find the tool',
|
||||
usages: [],
|
||||
isEnd: false
|
||||
};
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
const {
|
||||
response,
|
||||
usages = [],
|
||||
isEnd,
|
||||
interactive
|
||||
} = await (async () => {
|
||||
try {
|
||||
if (toolId === SubAppIds.stop) {
|
||||
return {
|
||||
response: getErrText(error),
|
||||
response: '',
|
||||
usages: [],
|
||||
isEnd: true
|
||||
};
|
||||
} else if (toolId === SubAppIds.plan) {
|
||||
const { completeMessages, response, usages, interactiveResponse } =
|
||||
await dispatchPlanAgent({
|
||||
messages,
|
||||
subApps: subAppList,
|
||||
model,
|
||||
temperature,
|
||||
top_p: aiChatTopP,
|
||||
stream,
|
||||
onReasoning,
|
||||
onStreaming
|
||||
});
|
||||
|
||||
planMessages = completeMessages;
|
||||
|
||||
return {
|
||||
response,
|
||||
usages,
|
||||
isEnd: false,
|
||||
interactive: interactiveResponse
|
||||
};
|
||||
} else if (toolId === SubAppIds.model) {
|
||||
const { systemPrompt, task } = parseToolArgs<{
|
||||
systemPrompt: string;
|
||||
task: string;
|
||||
}>(call.function.arguments);
|
||||
|
||||
const { response, usages } = await dispatchModelAgent({
|
||||
model,
|
||||
temperature,
|
||||
top_p: aiChatTopP,
|
||||
stream,
|
||||
systemPrompt,
|
||||
task,
|
||||
onReasoning,
|
||||
onStreaming
|
||||
});
|
||||
return {
|
||||
response,
|
||||
usages,
|
||||
isEnd: false
|
||||
};
|
||||
} else if (toolId === SubAppIds.fileRead) {
|
||||
const { file_indexes } = parseToolArgs<{
|
||||
file_indexes: string[];
|
||||
}>(call.function.arguments);
|
||||
if (!Array.isArray(file_indexes)) {
|
||||
return {
|
||||
response: 'file_indexes is not array',
|
||||
usages: [],
|
||||
isEnd: false
|
||||
};
|
||||
}
|
||||
|
||||
const files = file_indexes.map((index) => ({
|
||||
index,
|
||||
url: filesMap[index]
|
||||
}));
|
||||
const result = await dispatchFileRead({
|
||||
files,
|
||||
teamId: runningUserInfo.teamId,
|
||||
tmbId: runningUserInfo.tmbId,
|
||||
customPdfParse: chatConfig?.fileSelectConfig?.customPdfParse
|
||||
});
|
||||
return {
|
||||
response: result.response,
|
||||
usages: result.usages,
|
||||
isEnd: false
|
||||
};
|
||||
}
|
||||
})();
|
||||
// User Sub App
|
||||
else {
|
||||
const node = subAppsMap.get(toolId);
|
||||
if (!node) {
|
||||
return {
|
||||
response: 'Can not find the tool',
|
||||
usages: [],
|
||||
isEnd: false
|
||||
};
|
||||
}
|
||||
|
||||
// Push stream response
|
||||
workflowStreamResponse?.({
|
||||
id: call.id,
|
||||
event: SseResponseEventEnum.toolResponse,
|
||||
data: {
|
||||
tool: {
|
||||
id: call.id,
|
||||
response
|
||||
const toolCallParams = parseToolArgs(call.function.arguments);
|
||||
// Get params
|
||||
const requestParams = (() => {
|
||||
const params: Record<string, any> = toolCallParams;
|
||||
|
||||
node.inputs.forEach((input) => {
|
||||
if (input.key in toolCallParams) {
|
||||
return;
|
||||
}
|
||||
// Skip some special key
|
||||
if (
|
||||
[
|
||||
NodeInputKeyEnum.childrenNodeIdList,
|
||||
NodeInputKeyEnum.systemInputConfig
|
||||
].includes(input.key as NodeInputKeyEnum)
|
||||
) {
|
||||
params[input.key] = input.value;
|
||||
return;
|
||||
}
|
||||
|
||||
// replace {{$xx.xx$}} and {{xx}} variables
|
||||
let value = replaceEditorVariable({
|
||||
text: input.value,
|
||||
nodes: runtimeNodes,
|
||||
variables
|
||||
});
|
||||
|
||||
// replace reference variables
|
||||
value = getReferenceVariableValue({
|
||||
value,
|
||||
nodes: runtimeNodes,
|
||||
variables
|
||||
});
|
||||
|
||||
params[input.key] = valueTypeFormat(value, input.valueType);
|
||||
});
|
||||
|
||||
return params;
|
||||
})();
|
||||
|
||||
if (node.flowNodeType === FlowNodeTypeEnum.tool) {
|
||||
const { response, usages } = await dispatchTool({
|
||||
node,
|
||||
params: requestParams,
|
||||
runningUserInfo,
|
||||
runningAppInfo,
|
||||
variables,
|
||||
workflowStreamResponse: childWorkflowStreamResponse
|
||||
});
|
||||
return {
|
||||
response,
|
||||
usages,
|
||||
isEnd: false
|
||||
};
|
||||
} else if (
|
||||
node.flowNodeType === FlowNodeTypeEnum.appModule ||
|
||||
node.flowNodeType === FlowNodeTypeEnum.pluginModule
|
||||
) {
|
||||
const fn =
|
||||
node.flowNodeType === FlowNodeTypeEnum.appModule ? dispatchApp : dispatchPlugin;
|
||||
|
||||
const { response, usages } = await fn({
|
||||
...props,
|
||||
node,
|
||||
workflowStreamResponse: childWorkflowStreamResponse,
|
||||
callParams: {
|
||||
appId: node.pluginId,
|
||||
version: node.version,
|
||||
...requestParams
|
||||
}
|
||||
});
|
||||
|
||||
return {
|
||||
response,
|
||||
usages,
|
||||
isEnd: false
|
||||
};
|
||||
} else {
|
||||
return {
|
||||
response: 'Can not find the tool',
|
||||
usages: [],
|
||||
isEnd: false
|
||||
};
|
||||
}
|
||||
}
|
||||
});
|
||||
} catch (error) {
|
||||
return {
|
||||
response: getErrText(error),
|
||||
usages: [],
|
||||
isEnd: false
|
||||
};
|
||||
}
|
||||
})();
|
||||
|
||||
// TODO: 推送账单
|
||||
// Push stream response
|
||||
workflowStreamResponse?.({
|
||||
id: call.id,
|
||||
event: SseResponseEventEnum.toolResponse,
|
||||
data: {
|
||||
tool: {
|
||||
id: call.id,
|
||||
response
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
return {
|
||||
response,
|
||||
usages,
|
||||
isEnd
|
||||
};
|
||||
}
|
||||
});
|
||||
// TODO: 推送账单
|
||||
|
||||
return {
|
||||
response,
|
||||
usages,
|
||||
isEnd,
|
||||
interactive
|
||||
};
|
||||
}
|
||||
});
|
||||
|
||||
// Usage count
|
||||
const { totalPoints: modelTotalPoints, modelName } = formatModelChars2Points({
|
||||
|
|
@ -597,7 +633,7 @@ export const dispatchRunAgent = async (props: DispatchAgentModuleProps): Promise
|
|||
// TODO: 需要对 memoryMessages 单独建表存储
|
||||
[DispatchNodeResponseKeyEnum.memories]: {
|
||||
[masterMessagesKey]: filterMemoryMessages(completeMessages),
|
||||
[planMessagesKey]: [] // TODO: plan messages 需要记录
|
||||
[planMessagesKey]: [filterMemoryMessages(planMessages)]
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.assistantResponses]: previewAssistantResponses,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
|
|
@ -628,7 +664,7 @@ export const dispatchRunAgent = async (props: DispatchAgentModuleProps): Promise
|
|||
// Tool usage
|
||||
...subAppUsages
|
||||
],
|
||||
[DispatchNodeResponseKeyEnum.interactive]: undefined
|
||||
[DispatchNodeResponseKeyEnum.interactive]: interactiveResponse
|
||||
};
|
||||
} catch (error) {
|
||||
return getNodeErrResponse({ error });
|
||||
|
|
|
|||
|
|
@ -1,61 +0,0 @@
|
|||
import type { ChatCompletionTool } from '@fastgpt/global/core/ai/type';
|
||||
import { SubAppIds } from '../constants';
|
||||
|
||||
export const AskAgentTool: ChatCompletionTool = {
|
||||
type: 'function',
|
||||
function: {
|
||||
name: SubAppIds.ask,
|
||||
description: '调用此工具,向用户发起交互式提问',
|
||||
parameters: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
mode: {
|
||||
type: 'string',
|
||||
enum: ['userSelect', 'formInput', 'userInput'],
|
||||
description: '交互模式'
|
||||
},
|
||||
prompt: {
|
||||
type: 'string',
|
||||
description: '向用户展示的提示信息'
|
||||
},
|
||||
options: {
|
||||
type: 'array',
|
||||
description: '当 mode=userSelect 时可供选择的选项',
|
||||
items: {
|
||||
type: 'string'
|
||||
}
|
||||
},
|
||||
form: {
|
||||
type: 'array',
|
||||
description: '当 mode=formInput 时需要填写的表单字段列表',
|
||||
items: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
field: {
|
||||
type: 'string',
|
||||
description: '字段名,如 name, age, 同时会展示给用户一样的label'
|
||||
},
|
||||
type: {
|
||||
type: 'string',
|
||||
enum: ['textInput', 'numberInput', 'singleSelect', 'multiSelect'],
|
||||
description: '字段输入类型'
|
||||
},
|
||||
required: { type: 'boolean', description: '该字段是否必填', default: false },
|
||||
options: {
|
||||
type: 'array',
|
||||
description: '当 type 为 singleSelect 或 multiSelect 时的可选项',
|
||||
items: { type: 'string' }
|
||||
}
|
||||
},
|
||||
required: ['field', 'type']
|
||||
}
|
||||
},
|
||||
userInput: {
|
||||
type: 'string',
|
||||
description: '当 mode=userInput 时用户自由输入的内容'
|
||||
}
|
||||
},
|
||||
required: ['mode', 'prompt']
|
||||
}
|
||||
}
|
||||
};
|
||||
|
|
@ -0,0 +1,91 @@
|
|||
import type { ChatCompletionTool } from '@fastgpt/global/core/ai/type';
|
||||
import { SubAppIds } from '../../constants';
|
||||
|
||||
export type AskAgentToolParamsType = Partial<{
|
||||
mode: 'select' | 'formInput' | 'input';
|
||||
prompt: string;
|
||||
options: string[];
|
||||
form: {
|
||||
field: string;
|
||||
type: 'textInput' | 'numberInput' | 'singleSelect' | 'multiSelect';
|
||||
required: boolean;
|
||||
options: string[];
|
||||
}[];
|
||||
}>;
|
||||
|
||||
export const AskAgentTool: ChatCompletionTool = {
|
||||
type: 'function',
|
||||
function: {
|
||||
name: SubAppIds.ask,
|
||||
description: `
|
||||
调用此工具时,LLM 可以向用户发起一次交互式问题。
|
||||
参数结构非常简洁,仅包含 mode、prompt、options 三个字段,但通过不同的组合方式可以覆盖两种主要交互场景:
|
||||
1. mode = "select"
|
||||
- 用于枚举型选择(如调查问卷、多项选择、分支逻辑)。
|
||||
- prompt: 展示在问题顶部的主要提问文案。
|
||||
- options: 字符串数组,表示可供选择的选项。前端渲染时可以将每个选项显示为卡片、列表项或按钮。
|
||||
- 场景示例:
|
||||
* 让用户在几个备选方案中选择最贴近的情况。
|
||||
* 希望结果结构化,便于后续逻辑分支。
|
||||
2. mode = "input"
|
||||
- 用于自由文本输入,适合用户提供个性化或开放式回答。
|
||||
- prompt: 展示的问题提示,引导用户填写。
|
||||
- options: 此模式下通常留空或忽略。
|
||||
- 场景示例:
|
||||
* 需要用户补充说明原因、填写备注、输入 URL/编号等。
|
||||
* 当 "select" 的选项无法覆盖用户真实答案时,可以再调用一次 "input" 追问。
|
||||
|
||||
使用建议:
|
||||
- 优先使用 "select" 以获得结构化结果,减少歧义。
|
||||
- 当问题答案无法预先枚举,或需要用户自由表达时,使用 "input"。
|
||||
- 如果需要“Something else”选项,可以把它放进 options 里作为一个普通选项,然后再根据用户选择调用一次 "input" 让用户详细描述。
|
||||
`,
|
||||
parameters: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
mode: {
|
||||
type: 'string',
|
||||
enum: ['select', 'input'],
|
||||
description: '交互模式'
|
||||
},
|
||||
prompt: {
|
||||
type: 'string',
|
||||
description: '向用户展示的提示信息'
|
||||
},
|
||||
options: {
|
||||
type: 'array',
|
||||
description: '当 mode=select 时可供选择的选项',
|
||||
items: {
|
||||
type: 'string'
|
||||
}
|
||||
}
|
||||
// form: {
|
||||
// type: 'array',
|
||||
// description: '当 mode=formInput 时需要填写的表单字段列表',
|
||||
// items: {
|
||||
// type: 'object',
|
||||
// properties: {
|
||||
// field: {
|
||||
// type: 'string',
|
||||
// description: '字段名,如 name, age, 同时会展示给用户一样的label'
|
||||
// },
|
||||
// type: {
|
||||
// type: 'string',
|
||||
// enum: ['textInput', 'numberInput', 'singleSelect', 'multiSelect'],
|
||||
// description: '字段输入类型'
|
||||
// },
|
||||
// required: { type: 'boolean', description: '该字段是否必填', default: false },
|
||||
// options: {
|
||||
// type: 'array',
|
||||
// description: '当 type 为 singleSelect 或 multiSelect 时的可选项',
|
||||
// items: { type: 'string' }
|
||||
// }
|
||||
// },
|
||||
// required: ['field', 'type']
|
||||
// }
|
||||
// },
|
||||
},
|
||||
required: ['mode', 'prompt']
|
||||
}
|
||||
}
|
||||
};
|
||||
|
|
@ -5,6 +5,7 @@ export const PlanAgentTool: ChatCompletionTool = {
|
|||
type: 'function',
|
||||
function: {
|
||||
name: SubAppIds.plan,
|
||||
description: '分析和拆解用户问题,制定分步计划。'
|
||||
description: '分析和拆解用户问题,制定分步计划。',
|
||||
parameters: {}
|
||||
}
|
||||
};
|
||||
|
|
|
|||
|
|
@ -8,10 +8,16 @@ import { getLLMModel } from '../../../../../../ai/model';
|
|||
import { formatModelChars2Points } from '../../../../../../../support/wallet/usage/utils';
|
||||
import type { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
|
||||
import { SubAppIds } from '../constants';
|
||||
import type { InteractiveNodeResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
|
||||
import { runAgentCall } from '../../../../../../../core/ai/llm/agentCall';
|
||||
import { parseToolArgs } from '../../../utils';
|
||||
import { AskAgentTool, type AskAgentToolParamsType } from './ask/constants';
|
||||
import { getNanoid } from '@fastgpt/global/common/string/tools';
|
||||
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
|
||||
|
||||
type PlanAgentConfig = {
|
||||
model: string;
|
||||
customSystemPrompt?: string;
|
||||
systemPrompt?: string;
|
||||
temperature?: number;
|
||||
top_p?: number;
|
||||
stream?: boolean;
|
||||
|
|
@ -19,7 +25,7 @@ type PlanAgentConfig = {
|
|||
|
||||
type DispatchPlanAgentProps = PlanAgentConfig & {
|
||||
messages: ChatCompletionMessageParam[];
|
||||
tools: ChatCompletionTool[];
|
||||
subApps: ChatCompletionTool[];
|
||||
onReasoning: ResponseEvents['onReasoning'];
|
||||
onStreaming: ResponseEvents['onStreaming'];
|
||||
};
|
||||
|
|
@ -27,14 +33,17 @@ type DispatchPlanAgentProps = PlanAgentConfig & {
|
|||
type DispatchPlanAgentResponse = {
|
||||
response: string;
|
||||
usages: ChatNodeUsageType[];
|
||||
completeMessages: ChatCompletionMessageParam[];
|
||||
toolMessages?: ChatCompletionMessageParam[];
|
||||
interactiveResponse?: InteractiveNodeResponseType;
|
||||
};
|
||||
|
||||
export const dispatchPlanAgent = async ({
|
||||
messages,
|
||||
|
||||
tools,
|
||||
subApps,
|
||||
model,
|
||||
customSystemPrompt,
|
||||
systemPrompt,
|
||||
temperature,
|
||||
top_p,
|
||||
stream,
|
||||
|
|
@ -46,14 +55,21 @@ export const dispatchPlanAgent = async ({
|
|||
const requestMessages: ChatCompletionMessageParam[] = [
|
||||
{
|
||||
role: 'system',
|
||||
content: getPlanAgentPrompt(customSystemPrompt)
|
||||
content: getPlanAgentPrompt(systemPrompt)
|
||||
},
|
||||
...messages.filter((item) => item.role !== 'system'),
|
||||
{ role: 'user', content: 'Start plan' }
|
||||
...messages.filter((item) => item.role !== 'system')
|
||||
];
|
||||
const filterPlanTools = tools.filter((item) => item.function.name !== SubAppIds.plan);
|
||||
const filterPlanTools = subApps.filter((item) => item.function.name !== SubAppIds.plan);
|
||||
filterPlanTools.push(AskAgentTool);
|
||||
|
||||
const { answerText, usage } = await createLLMResponse({
|
||||
const {
|
||||
reasoningText,
|
||||
answerText,
|
||||
toolCalls = [],
|
||||
usage,
|
||||
getEmptyResponseTip,
|
||||
completeMessages
|
||||
} = await createLLMResponse({
|
||||
body: {
|
||||
model: modelData.model,
|
||||
temperature,
|
||||
|
|
@ -70,12 +86,74 @@ export const dispatchPlanAgent = async ({
|
|||
onStreaming
|
||||
});
|
||||
|
||||
if (!answerText && !reasoningText && !toolCalls.length) {
|
||||
return Promise.reject(getEmptyResponseTip());
|
||||
}
|
||||
|
||||
// TODO: 需要考虑多个 Interactive 并发的情况
|
||||
let interactiveResponse: InteractiveNodeResponseType = {
|
||||
type: 'agentPlanCheck',
|
||||
params: {}
|
||||
};
|
||||
|
||||
for await (const call of toolCalls) {
|
||||
const toolId = call.function.name;
|
||||
|
||||
if (toolId === SubAppIds.ask) {
|
||||
const params = parseToolArgs<AskAgentToolParamsType>(call.function.arguments);
|
||||
|
||||
if (params.mode === 'select') {
|
||||
interactiveResponse = {
|
||||
type: 'agentPlanAskUserSelect',
|
||||
params: {
|
||||
description: params?.prompt ?? '选择选项',
|
||||
userSelectOptions: params?.options?.map((v, i) => {
|
||||
return { key: `option${i}`, value: v };
|
||||
})
|
||||
}
|
||||
} as InteractiveNodeResponseType;
|
||||
}
|
||||
if (params.mode === 'input') {
|
||||
interactiveResponse = {
|
||||
type: 'agentPlanAskQuery',
|
||||
params: {
|
||||
content: params?.prompt ?? '输入详细信息'
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const { totalPoints, modelName } = formatModelChars2Points({
|
||||
model: modelData.model,
|
||||
inputTokens: usage.inputTokens,
|
||||
outputTokens: usage.outputTokens
|
||||
});
|
||||
|
||||
const toolMessages: ChatCompletionMessageParam[] = [];
|
||||
if (answerText) {
|
||||
const toolId = getNanoid(6);
|
||||
const toolCall: ChatCompletionMessageParam = {
|
||||
role: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
tool_calls: [
|
||||
{
|
||||
id: toolId,
|
||||
type: 'function',
|
||||
function: {
|
||||
name: SubAppIds.plan,
|
||||
arguments: ''
|
||||
}
|
||||
}
|
||||
]
|
||||
};
|
||||
const toolCallResponse: ChatCompletionMessageParam = {
|
||||
role: ChatCompletionRequestMessageRoleEnum.Tool,
|
||||
tool_call_id: toolId,
|
||||
content: answerText
|
||||
};
|
||||
toolMessages.push(toolCall, toolCallResponse);
|
||||
}
|
||||
|
||||
return {
|
||||
response: answerText,
|
||||
usages: [
|
||||
|
|
@ -86,6 +164,9 @@ export const dispatchPlanAgent = async ({
|
|||
inputTokens: usage.inputTokens,
|
||||
outputTokens: usage.outputTokens
|
||||
}
|
||||
]
|
||||
],
|
||||
completeMessages,
|
||||
toolMessages,
|
||||
interactiveResponse
|
||||
};
|
||||
};
|
||||
|
|
|
|||
|
|
@ -11,37 +11,53 @@ ${
|
|||
: ''
|
||||
}
|
||||
|
||||
<task>
|
||||
根据用户提供的主题或目标,生成一份详细、可执行的项目计划文档,包含合理的阶段划分与具体待办事项。
|
||||
</task>
|
||||
|
||||
<inputs>
|
||||
- 用户输入:一个需要制定的主题、目标或任务描述。
|
||||
- 输入格式:自然语言描述,可能包含背景、目标、约束、优先级、本地化偏好。
|
||||
</inputs>
|
||||
|
||||
<process>
|
||||
1. 解析用户输入,提取核心目标、关键要素、约束与本地化偏好。
|
||||
2. 评估任务复杂度(简单:2-3 步;复杂:4-7 步),据此确定阶段数量。
|
||||
3. 各阶段生成 3-5 条可执行 Todo,动词开头,MECE 且无重叠。
|
||||
2. 评估任务复杂度, 据此确定阶段数量。
|
||||
3. 禁止调用除"ask_agent"以外的任何工具.
|
||||
4. 语言风格本地化(根据用户输入语言进行术语与语序调整)。
|
||||
5. 产出完整计划,严格使用占位符 [主题] 与标记体系;确保编号连续、标签闭合、结构清晰。
|
||||
5. 严格按照 JSON Schema 生成完整计划,不得输出多余内容。
|
||||
6. 仅在缺少关键信息时使用"ask_agent"工具询问用户(如:未指定目的地、预算、时间等必要细节)
|
||||
7. 如果信息充足或用户已回答询问,必须直接输出JSON格式的完整计划,不再调用工具
|
||||
</process>
|
||||
|
||||
<requirements>
|
||||
- 必须严格遵循以下注释标记格式:
|
||||
* <!--@title--> 标记主标题
|
||||
* <!--@desc--> 标记整体描述
|
||||
* <!--@step:N:start--> 和 <!--@step:N:end--> 包裹步骤块
|
||||
* <!--@step:N:title--> 标记步骤标题
|
||||
* <!--@step:N:desc--> 标记步骤描述
|
||||
* <!--@todos:N:start--> 和 <!--@todos:N:end--> 包裹待办列表
|
||||
* <!--@todo:N.X--> 标记单个待办事项
|
||||
* <!--@note:N--> 添加重要注释或备注
|
||||
- 步骤数量随复杂度自动调整;每步 3-5 条 Todo。
|
||||
- 编号(N、X)必须连续、准确。
|
||||
- 描述语言简洁、专业、可操作;各阶段逻辑递进、MECE。
|
||||
- 进行本地化调整(术语、量词、表达习惯)。
|
||||
- 必须严格输出 JSON,不能包含代码块标记(如 \`\`\`)、注释或额外说明文字。
|
||||
- 输出结构必须符合以下 JSON Schema:
|
||||
\`\`\`json
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"task": {
|
||||
"type": "string",
|
||||
"description": "任务主题, 准确覆盖本次所有执行步骤的核心内容和维度"
|
||||
},
|
||||
"steps": {
|
||||
"type": "array",
|
||||
"description": "阶段步骤列表",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "string",
|
||||
"description": "唯一标识"
|
||||
},
|
||||
"title": {
|
||||
"type": "string",
|
||||
"description": "阶段标题"
|
||||
},
|
||||
"description": {
|
||||
"type": "string",
|
||||
"description": "阶段描述, 并在末尾@对应任务将要移交使用的工具/子智能体"
|
||||
},
|
||||
},
|
||||
"required": ["id", "title", "description"]
|
||||
}
|
||||
}
|
||||
},
|
||||
"required": ["title", "description", "steps"]
|
||||
}
|
||||
\`\`\`
|
||||
</requirements>
|
||||
|
||||
<guardrails>
|
||||
|
|
@ -52,42 +68,21 @@ ${
|
|||
|
||||
<output>
|
||||
<format>
|
||||
# [主题] 深度调研计划 <!--@title-->
|
||||
|
||||
全面了解 [主题] 的 [核心维度描述] <!--@desc-->
|
||||
|
||||
<!--@step:1:start-->
|
||||
## Step 1: [阶段名称] <!--@step:1:title-->
|
||||
[阶段目标描述] <!--@step:1:desc-->
|
||||
### Todo List
|
||||
<!--@todos:1:start-->
|
||||
- [ ] [具体任务描述] <!--@todo:1.1-->
|
||||
- [ ] [具体任务描述] <!--@todo:1.2-->
|
||||
- [ ] [具体任务描述] <!--@todo:1.3-->
|
||||
<!--@todos:1:end-->
|
||||
<!--@note:1--> [可选备注]
|
||||
<!--@step:1:end-->
|
||||
|
||||
<!--@step:2:start-->
|
||||
## Step 2: [阶段名称] <!--@step:2:title-->
|
||||
[阶段目标描述] <!--@step:2:desc-->
|
||||
### Todo List
|
||||
<!--@todos:2:start-->
|
||||
- [ ] [具体任务描述] <!--@todo:2.1-->
|
||||
- [ ] [具体任务描述] <!--@todo:2.2-->
|
||||
- [ ] [具体任务描述] <!--@todo:2.3-->
|
||||
<!--@todos:2:end-->
|
||||
<!--@note:2--> [可选备注]
|
||||
<!--@step:2:end-->
|
||||
</format>
|
||||
|
||||
<style>
|
||||
- 标题简洁有力,突出核心主题
|
||||
- 描述准确概括该阶段的核心目标
|
||||
- 待办事项以动词开头,明确可执行
|
||||
- 保持专业术语的准确性
|
||||
- 语言流畅、逻辑清晰
|
||||
</style>
|
||||
{
|
||||
"task": "[主题] 深度调研计划",
|
||||
"steps": [
|
||||
{
|
||||
"id": "[id]",
|
||||
"title": "[阶段名称]",
|
||||
"description": "[阶段描述] @sub_agent"
|
||||
},
|
||||
{
|
||||
"id": "[id]",
|
||||
"title": "[阶段名称]",
|
||||
"description": "[阶段描述] @sub_agent"
|
||||
}
|
||||
]
|
||||
}
|
||||
</output>
|
||||
`;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -1,3 +1,7 @@
|
|||
import type {
|
||||
ChatCompletionAssistantMessageParam,
|
||||
ChatCompletionMessageParam
|
||||
} from '@fastgpt/global/core/ai/type';
|
||||
import type { ToolNodeItemType } from './type';
|
||||
|
||||
const namespaceMap = new Map<string, string>([
|
||||
|
|
|
|||
|
|
@ -101,7 +101,7 @@ export const getHistoryFileLinks = (histories: ChatItemType[]) => {
|
|||
return histories
|
||||
.filter((item) => {
|
||||
if (item.obj === ChatRoleEnum.Human) {
|
||||
return item.value.filter((value) => value.type === 'file');
|
||||
return item.value.filter((value) => value.file);
|
||||
}
|
||||
return false;
|
||||
})
|
||||
|
|
|
|||
|
|
@ -83,7 +83,7 @@ const RenderInput = () => {
|
|||
if (histories.length === 0) return pluginInputs;
|
||||
try {
|
||||
const historyValue = histories[0]?.value as UserChatItemValueItemType[];
|
||||
const inputValueString = historyValue.find((item) => item.type === 'text')?.text?.content;
|
||||
const inputValueString = historyValue.find((item) => item.text?.content)?.text?.content;
|
||||
|
||||
if (!inputValueString) return pluginInputs;
|
||||
return JSON.parse(inputValueString) as FlowNodeInputItemType[];
|
||||
|
|
@ -135,7 +135,7 @@ const RenderInput = () => {
|
|||
if (!historyValue) return undefined;
|
||||
|
||||
try {
|
||||
const inputValueString = historyValue.find((item) => item.type === 'text')?.text?.content;
|
||||
const inputValueString = historyValue.find((item) => item.text?.content)?.text?.content;
|
||||
return (
|
||||
inputValueString &&
|
||||
JSON.parse(inputValueString).reduce(
|
||||
|
|
@ -160,7 +160,7 @@ const RenderInput = () => {
|
|||
// Parse history file
|
||||
const historyFileList = (() => {
|
||||
const historyValue = histories[0]?.value as UserChatItemValueItemType[];
|
||||
return historyValue?.filter((item) => item.type === 'file').map((item) => item.file);
|
||||
return historyValue?.filter((item) => item.file).map((item) => item.file);
|
||||
})();
|
||||
|
||||
reset({
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ import AIResponseBox from '../../../components/AIResponseBox';
|
|||
import { useTranslation } from 'next-i18next';
|
||||
import ComplianceTip from '@/components/common/ComplianceTip/index';
|
||||
import { ChatRecordContext } from '@/web/core/chat/context/chatRecordContext';
|
||||
import type { AIChatItemValueItemType } from '@fastgpt/global/core/chat/type';
|
||||
|
||||
const RenderOutput = () => {
|
||||
const { t } = useTranslation();
|
||||
|
|
@ -38,7 +39,7 @@ const RenderOutput = () => {
|
|||
<AIResponseBox
|
||||
chatItemDataId={histories[1].dataId}
|
||||
key={key}
|
||||
value={value}
|
||||
value={value as AIChatItemValueItemType}
|
||||
isLastResponseValue={true}
|
||||
isChatting={isChatting}
|
||||
/>
|
||||
|
|
|
|||
|
|
@ -73,9 +73,9 @@ const InputFormEditModal = ({
|
|||
}
|
||||
];
|
||||
|
||||
const defaultValueType = inputTypeList
|
||||
.flat()
|
||||
.find((item) => item.value === inputType)?.defaultValueType;
|
||||
const defaultValueType =
|
||||
inputTypeList.flat().find((item) => item.value === inputType)?.defaultValueType ??
|
||||
WorkflowIOValueTypeEnum.string;
|
||||
|
||||
const onSubmitSuccess = useCallback(
|
||||
(data: UserInputFormItemType, action: 'confirm' | 'continue') => {
|
||||
|
|
|
|||
|
|
@ -1,29 +1,28 @@
|
|||
import { describe, expect, it } from 'vitest';
|
||||
import {
|
||||
form2AppWorkflow,
|
||||
filterSensitiveFormData,
|
||||
getAppQGuideCustomURL
|
||||
} from '@/web/core/app/utils';
|
||||
import { filterSensitiveFormData, getAppQGuideCustomURL } from '@/web/core/app/utils';
|
||||
import { form2AppWorkflow } from '@/pageComponents/app/detail/Edit/SimpleApp/utils';
|
||||
import { FlowNodeTypeEnum } from '@fastgpt/global/core/workflow/node/constant';
|
||||
import { NodeInputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
import { getDefaultAppForm } from '@fastgpt/global/core/app/utils';
|
||||
import type { AppFormEditFormType } from '@fastgpt/global/core/app/type';
|
||||
|
||||
describe('form2AppWorkflow', () => {
|
||||
const mockT = (str: string) => str;
|
||||
|
||||
it('should generate simple chat workflow when no datasets or tools selected', () => {
|
||||
const form = {
|
||||
const form: AppFormEditFormType = {
|
||||
aiSettings: {
|
||||
model: 'gpt-3.5',
|
||||
temperature: 0.7,
|
||||
maxToken: 2000,
|
||||
systemPrompt: 'You are a helpful assistant',
|
||||
[NodeInputKeyEnum.aiModel]: 'gpt-3.5',
|
||||
[NodeInputKeyEnum.aiChatTemperature]: 0.7,
|
||||
[NodeInputKeyEnum.aiChatMaxToken]: 2000,
|
||||
[NodeInputKeyEnum.aiSystemPrompt]: 'You are a helpful assistant',
|
||||
maxHistories: 5,
|
||||
aiChatReasoning: true,
|
||||
aiChatTopP: 0.8,
|
||||
aiChatStopSign: '',
|
||||
aiChatResponseFormat: '',
|
||||
aiChatJsonSchema: ''
|
||||
[NodeInputKeyEnum.aiChatIsResponseText]: true,
|
||||
[NodeInputKeyEnum.aiChatReasoning]: true,
|
||||
[NodeInputKeyEnum.aiChatTopP]: 0.8,
|
||||
[NodeInputKeyEnum.aiChatStopSign]: '',
|
||||
[NodeInputKeyEnum.aiChatResponseFormat]: '',
|
||||
[NodeInputKeyEnum.aiChatJsonSchema]: ''
|
||||
},
|
||||
dataset: {
|
||||
datasets: [],
|
||||
|
|
@ -49,21 +48,29 @@ describe('form2AppWorkflow', () => {
|
|||
});
|
||||
|
||||
it('should generate dataset workflow when datasets are selected', () => {
|
||||
const form = {
|
||||
const form: AppFormEditFormType = {
|
||||
aiSettings: {
|
||||
model: 'gpt-3.5',
|
||||
temperature: 0.7,
|
||||
maxToken: 2000,
|
||||
systemPrompt: 'You are a helpful assistant',
|
||||
[NodeInputKeyEnum.aiModel]: 'gpt-3.5',
|
||||
[NodeInputKeyEnum.aiChatTemperature]: 0.7,
|
||||
[NodeInputKeyEnum.aiChatMaxToken]: 2000,
|
||||
[NodeInputKeyEnum.aiSystemPrompt]: 'You are a helpful assistant',
|
||||
maxHistories: 5,
|
||||
aiChatReasoning: true,
|
||||
aiChatTopP: 0.8,
|
||||
aiChatStopSign: '',
|
||||
aiChatResponseFormat: '',
|
||||
aiChatJsonSchema: ''
|
||||
[NodeInputKeyEnum.aiChatIsResponseText]: true,
|
||||
[NodeInputKeyEnum.aiChatReasoning]: true,
|
||||
[NodeInputKeyEnum.aiChatTopP]: 0.8,
|
||||
[NodeInputKeyEnum.aiChatStopSign]: '',
|
||||
[NodeInputKeyEnum.aiChatResponseFormat]: '',
|
||||
[NodeInputKeyEnum.aiChatJsonSchema]: ''
|
||||
},
|
||||
dataset: {
|
||||
datasets: ['dataset1'],
|
||||
datasets: [
|
||||
{
|
||||
datasetId: 'dataset1',
|
||||
avatar: '',
|
||||
name: 'Test Dataset',
|
||||
vectorModel: { model: 'text-embedding-ada-002' } as any
|
||||
}
|
||||
],
|
||||
similarity: 0.8,
|
||||
limit: 1500,
|
||||
searchMode: 'embedding',
|
||||
|
|
@ -88,14 +95,32 @@ describe('form2AppWorkflow', () => {
|
|||
|
||||
describe('filterSensitiveFormData', () => {
|
||||
it('should filter sensitive data from app form', () => {
|
||||
const appForm = {
|
||||
const appForm: AppFormEditFormType = {
|
||||
aiSettings: {
|
||||
model: 'gpt-4',
|
||||
temperature: 0.8
|
||||
[NodeInputKeyEnum.aiModel]: 'gpt-4',
|
||||
[NodeInputKeyEnum.aiChatTemperature]: 0.8,
|
||||
maxHistories: 5,
|
||||
[NodeInputKeyEnum.aiChatIsResponseText]: true
|
||||
},
|
||||
dataset: {
|
||||
datasets: ['sensitive-dataset'],
|
||||
similarity: 0.9
|
||||
datasets: [
|
||||
{
|
||||
datasetId: 'sensitive-dataset',
|
||||
avatar: '',
|
||||
name: 'Sensitive Dataset',
|
||||
vectorModel: { model: 'text-embedding-ada-002' } as any
|
||||
}
|
||||
],
|
||||
searchMode: 'embedding' as any,
|
||||
similarity: 0.9,
|
||||
limit: 1500,
|
||||
embeddingWeight: 0.7,
|
||||
usingReRank: false,
|
||||
rerankModel: '',
|
||||
rerankWeight: 0.5,
|
||||
datasetSearchUsingExtensionQuery: false,
|
||||
datasetSearchExtensionModel: '',
|
||||
datasetSearchExtensionBg: ''
|
||||
},
|
||||
selectedTools: [],
|
||||
chatConfig: {}
|
||||
|
|
@ -125,7 +150,7 @@ describe('getAppQGuideCustomURL', () => {
|
|||
]
|
||||
}
|
||||
]
|
||||
};
|
||||
} as any;
|
||||
|
||||
const result = getAppQGuideCustomURL(appDetail);
|
||||
expect(result).toBe('https://example.com');
|
||||
|
|
@ -139,7 +164,7 @@ describe('getAppQGuideCustomURL', () => {
|
|||
inputs: []
|
||||
}
|
||||
]
|
||||
};
|
||||
} as any;
|
||||
|
||||
const result = getAppQGuideCustomURL(appDetail);
|
||||
expect(result).toBe('');
|
||||
|
|
|
|||
Loading…
Reference in New Issue