feat: plan ask (#5650)

* feat: plan ask

* fix

* fix: unit test

* fix: build

* refactor: plan llm call
This commit is contained in:
francis 2025-09-16 15:54:41 +08:00 committed by archer
parent aeaad52f46
commit ac6b235280
No known key found for this signature in database
GPG Key ID: 4446499B846D4A9E
15 changed files with 682 additions and 488 deletions

View File

@ -1,6 +1,6 @@
import type { NodeOutputItemType } from '../../../../chat/type';
import type { FlowNodeInputTypeEnum } from 'core/workflow/node/constant';
import type { WorkflowIOValueTypeEnum } from 'core/workflow/constants';
import type { FlowNodeInputTypeEnum } from '../../../../../core/workflow/node/constant';
import type { WorkflowIOValueTypeEnum } from '../../../../../core/workflow/constants';
import type { ChatCompletionMessageParam } from '../../../../ai/type';
import type { RuntimeEdgeItemType } from '../../../type/edge';

View File

@ -6,7 +6,10 @@ import type {
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
import type { AIChatItemType, AIChatItemValueItemType } from '@fastgpt/global/core/chat/type';
import type { WorkflowInteractiveResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
import type {
InteractiveNodeResponseType,
WorkflowInteractiveResponseType
} from '@fastgpt/global/core/workflow/template/system/interactive/type';
import type { CreateLLMResponseProps, ResponseEvents } from './request';
import { createLLMResponse } from './request';
import type { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
@ -39,13 +42,14 @@ type RunAgentCallProps = {
response: string;
usages: ChatNodeUsageType[];
isEnd: boolean;
interactive?: InteractiveNodeResponseType;
}>;
} & ResponseEvents;
type RunAgentResponse = {
completeMessages: ChatCompletionMessageParam[];
assistantResponses: AIChatItemValueItemType[];
interactiveResponse?: WorkflowInteractiveResponseType;
interactiveResponse?: InteractiveNodeResponseType;
// Usage
inputTokens: number;
@ -71,7 +75,7 @@ export const runAgentCall = async ({
let runTimes = 0;
const assistantResponses: AIChatItemValueItemType[] = [];
let interactiveResponse: WorkflowInteractiveResponseType | undefined;
let interactiveResponse: InteractiveNodeResponseType | undefined;
let requestMessages = messages;
@ -127,7 +131,7 @@ export const runAgentCall = async ({
let isEndSign = false;
for await (const tool of toolCalls) {
// TODO: 加入交互节点处理
const { response, usages, isEnd } = await handleToolResponse({
const { response, usages, isEnd, interactive } = await handleToolResponse({
call: tool,
messages: requestMessages.slice(0, requestMessagesLength) // 取原来 request 的上下文
});
@ -142,8 +146,12 @@ export const runAgentCall = async ({
content: response
});
subAppUsages.push(...usages);
}
if (interactive) {
interactiveResponse = interactive;
isEndSign = true;
}
}
// TODO: 移动到工作流里 assistantResponses concat
const currentAssistantResponses = GPTMessages2Chats({
messages: requestMessages.slice(requestMessagesLength),
@ -168,6 +176,7 @@ export const runAgentCall = async ({
outputTokens,
completeMessages: requestMessages,
assistantResponses,
subAppUsages
subAppUsages,
interactiveResponse
};
};

View File

@ -1,18 +1,30 @@
import { SubAppIds } from './sub/constants';
export const getMasterAgentDefaultPrompt = () => {
return `## 角色
return `## 角色定位
Task OrchestratorAgent协同工作
##
1. 使 "${SubAppIds.plan}"
2.
3.
##
###
- ****
- Agent完成
- "${SubAppIds.plan}"
##
- 使
-
-
-
- `;
###
- ****
- ****Agent
- ****Agent
###
-
-
-
##
1. ****Agent
2. ****Agent
3. ****
4. ****
5. ****
`;
};

View File

@ -4,6 +4,7 @@ import {
WorkflowIOValueTypeEnum
} from '@fastgpt/global/core/workflow/constants';
import {
ConfirmPlanAgentText,
DispatchNodeResponseKeyEnum,
SseResponseEventEnum
} from '@fastgpt/global/core/workflow/runtime/constants';
@ -56,6 +57,8 @@ import { addFilePrompt2Input, getFileInputPrompt } from './sub/file/utils';
import type { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type';
import { dispatchFileRead } from './sub/file';
import { dispatchApp, dispatchPlugin } from './sub/app';
import type { InteractiveNodeResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
import { getNanoid } from '@fastgpt/global/common/string/tools';
export type DispatchAgentModuleProps = ModuleDispatchProps<{
[NodeInputKeyEnum.history]?: ChatItemType[];
@ -183,7 +186,7 @@ export const dispatchRunAgent = async (props: DispatchAgentModuleProps): Promise
// Get master request messages
const systemMessages = chats2GPTMessages({
messages: getSystemPrompt_ChatItemType(systemPrompt || getMasterAgentDefaultPrompt()),
messages: getSystemPrompt_ChatItemType(getMasterAgentDefaultPrompt()),
reserveId: false
});
const historyMessages: ChatCompletionMessageParam[] = (() => {
@ -193,6 +196,10 @@ export const dispatchRunAgent = async (props: DispatchAgentModuleProps): Promise
return chats2GPTMessages({ messages: chatHistories, reserveId: false });
})();
if (lastInteractive?.type !== 'userSelect' && lastInteractive?.type !== 'userInput') {
userChatInput = query[0].text?.content ?? userChatInput;
}
const userMessages = chats2GPTMessages({
messages: [
{
@ -205,38 +212,56 @@ export const dispatchRunAgent = async (props: DispatchAgentModuleProps): Promise
],
reserveId: false
});
const requestMessages = [...systemMessages, ...historyMessages, ...userMessages];
let planMessages: ChatCompletionMessageParam[] = [];
// TODO: 执行 plan function(只有lastInteractive userselect/userInput 时候,才不需要进入 plan)
if (lastInteractive?.type !== 'userSelect' && lastInteractive?.type !== 'userInput') {
// const planResponse = xxxx
// requestMessages.push(一组 toolcall)
if (
lastInteractive?.type !== 'userSelect' &&
lastInteractive?.type !== 'userInput' &&
userChatInput !== ConfirmPlanAgentText
) {
const { completeMessages, toolMessages, usages, interactiveResponse } =
await dispatchPlanAgent({
messages: requestMessages,
subApps: subAppList,
model,
temperature,
top_p: aiChatTopP,
stream,
onReasoning: ({ text }: { text: string }) => {
workflowStreamResponse?.({
event: SseResponseEventEnum.answer,
data: textAdaptGptResponse({
reasoning_content: text
})
});
},
onStreaming: ({ text }: { text: string }) => {
workflowStreamResponse?.({
event: SseResponseEventEnum.answer,
data: textAdaptGptResponse({
text
})
});
}
});
workflowStreamResponse?.({
event: SseResponseEventEnum.answer,
data: textAdaptGptResponse({
text: '这是 plan'
})
});
if (toolMessages) requestMessages.push(...toolMessages);
return {
[DispatchNodeResponseKeyEnum.memories]: {
[planMessagesKey]: [
{
role: 'user',
content: '测试'
},
{
role: 'assistant',
content: '测试'
}
]
[masterMessagesKey]: filterMemoryMessages(requestMessages),
[planMessagesKey]: filterMemoryMessages(completeMessages)
},
[DispatchNodeResponseKeyEnum.interactive]: interactiveResponse
// Mock返回 plan check
[DispatchNodeResponseKeyEnum.interactive]: {
type: 'agentPlanCheck',
params: {}
}
// [DispatchNodeResponseKeyEnum.interactive]: {
// type: 'agentPlanCheck',
// params: {}
// }
// Mock: 返回 plan user select
// [DispatchNodeResponseKeyEnum.interactive]: {
@ -254,7 +279,7 @@ export const dispatchRunAgent = async (props: DispatchAgentModuleProps): Promise
// }
// ]
// }
// },
// }
// Mock: 返回 plan user input
// [DispatchNodeResponseKeyEnum.interactive]: {
// type: 'agentPlanAskUserForm',
@ -283,296 +308,307 @@ export const dispatchRunAgent = async (props: DispatchAgentModuleProps): Promise
}
const dispatchFlowResponse: ChatHistoryItemResType[] = [];
// console.log(JSON.stringify(requestMessages, null, 2));
const { completeMessages, assistantResponses, inputTokens, outputTokens, subAppUsages } =
await runAgentCall({
maxRunAgentTimes: 100,
interactiveEntryToolParams: lastInteractive?.toolParams,
body: {
messages: requestMessages,
model: agentModel,
temperature,
stream,
top_p: aiChatTopP,
subApps: subAppList
},
const {
completeMessages,
assistantResponses,
inputTokens,
outputTokens,
subAppUsages,
interactiveResponse
} = await runAgentCall({
maxRunAgentTimes: 100,
interactiveEntryToolParams: lastInteractive?.toolParams,
body: {
messages: requestMessages,
model: agentModel,
temperature,
stream,
top_p: aiChatTopP,
subApps: subAppList
},
userKey: externalProvider.openaiAccount,
isAborted: res ? () => res.closed : undefined,
getToolInfo: getSubAppInfo,
userKey: externalProvider.openaiAccount,
isAborted: res ? () => res.closed : undefined,
getToolInfo: getSubAppInfo,
onReasoning({ text }) {
workflowStreamResponse?.({
onReasoning({ text }) {
workflowStreamResponse?.({
event: SseResponseEventEnum.answer,
data: textAdaptGptResponse({
reasoning_content: text
})
});
},
onStreaming({ text }) {
workflowStreamResponse?.({
event: SseResponseEventEnum.answer,
data: textAdaptGptResponse({
text
})
});
},
onToolCall({ call }) {
const subApp = getSubAppInfo(call.function.name);
workflowStreamResponse?.({
id: call.id,
event: SseResponseEventEnum.toolCall,
data: {
tool: {
id: `${nodeId}/${call.function.name}`,
toolName: subApp?.name || call.function.name,
toolAvatar: subApp?.avatar || '',
functionName: call.function.name,
params: call.function.arguments ?? ''
}
}
});
},
onToolParam({ call, params }) {
workflowStreamResponse?.({
id: call.id,
event: SseResponseEventEnum.toolParams,
data: {
tool: {
params
}
}
});
},
handleToolResponse: async ({ call, messages }) => {
const toolId = call.function.name;
const childWorkflowStreamResponse = getWorkflowChildResponseWrite({
subAppId: `${nodeId}/${toolId}`,
id: call.id,
fn: workflowStreamResponse
});
const onReasoning = ({ text }: { text: string }) => {
childWorkflowStreamResponse?.({
event: SseResponseEventEnum.answer,
data: textAdaptGptResponse({
reasoning_content: text
})
});
},
onStreaming({ text }) {
workflowStreamResponse?.({
};
const onStreaming = ({ text }: { text: string }) => {
childWorkflowStreamResponse?.({
event: SseResponseEventEnum.answer,
data: textAdaptGptResponse({
text
})
});
},
onToolCall({ call }) {
const subApp = getSubAppInfo(call.function.name);
workflowStreamResponse?.({
id: call.id,
event: SseResponseEventEnum.toolCall,
data: {
tool: {
id: `${nodeId}/${call.function.name}`,
toolName: subApp?.name || call.function.name,
toolAvatar: subApp?.avatar || '',
functionName: call.function.name,
params: call.function.arguments ?? ''
}
}
});
},
onToolParam({ call, params }) {
workflowStreamResponse?.({
id: call.id,
event: SseResponseEventEnum.toolParams,
data: {
tool: {
params
}
}
});
},
};
handleToolResponse: async ({ call, messages }) => {
const toolId = call.function.name;
const childWorkflowStreamResponse = getWorkflowChildResponseWrite({
subAppId: `${nodeId}/${toolId}`,
id: call.id,
fn: workflowStreamResponse
});
const onReasoning = ({ text }: { text: string }) => {
childWorkflowStreamResponse?.({
event: SseResponseEventEnum.answer,
data: textAdaptGptResponse({
reasoning_content: text
})
});
};
const onStreaming = ({ text }: { text: string }) => {
childWorkflowStreamResponse?.({
event: SseResponseEventEnum.answer,
data: textAdaptGptResponse({
text
})
});
};
const {
response,
usages = [],
isEnd
} = await (async () => {
try {
if (toolId === SubAppIds.stop) {
return {
response: '',
usages: [],
isEnd: true
};
} else if (toolId === SubAppIds.plan) {
const { response, usages } = await dispatchPlanAgent({
messages,
tools: subAppList,
model,
temperature,
top_p: aiChatTopP,
stream,
onReasoning,
onStreaming
});
return {
response,
usages,
isEnd: false
};
} else if (toolId === SubAppIds.model) {
const { systemPrompt, task } = parseToolArgs<{
systemPrompt: string;
task: string;
}>(call.function.arguments);
const { response, usages } = await dispatchModelAgent({
model,
temperature,
top_p: aiChatTopP,
stream,
systemPrompt,
task,
onReasoning,
onStreaming
});
return {
response,
usages,
isEnd: false
};
} else if (toolId === SubAppIds.fileRead) {
const { file_indexes } = parseToolArgs<{
file_indexes: string[];
}>(call.function.arguments);
if (!Array.isArray(file_indexes)) {
return {
response: 'file_indexes is not array',
usages: [],
isEnd: false
};
}
const files = file_indexes.map((index) => ({
index,
url: filesMap[index]
}));
const result = await dispatchFileRead({
files,
teamId: runningUserInfo.teamId,
tmbId: runningUserInfo.tmbId,
customPdfParse: chatConfig?.fileSelectConfig?.customPdfParse
});
return {
response: result.response,
usages: result.usages,
isEnd: false
};
}
// User Sub App
else {
const node = subAppsMap.get(toolId);
if (!node) {
return {
response: 'Can not find the tool',
usages: [],
isEnd: false
};
}
const toolCallParams = parseToolArgs(call.function.arguments);
// Get params
const requestParams = (() => {
const params: Record<string, any> = toolCallParams;
node.inputs.forEach((input) => {
if (input.key in toolCallParams) {
return;
}
// Skip some special key
if (
[
NodeInputKeyEnum.childrenNodeIdList,
NodeInputKeyEnum.systemInputConfig
].includes(input.key as NodeInputKeyEnum)
) {
params[input.key] = input.value;
return;
}
// replace {{$xx.xx$}} and {{xx}} variables
let value = replaceEditorVariable({
text: input.value,
nodes: runtimeNodes,
variables
});
// replace reference variables
value = getReferenceVariableValue({
value,
nodes: runtimeNodes,
variables
});
params[input.key] = valueTypeFormat(value, input.valueType);
});
return params;
})();
if (node.flowNodeType === FlowNodeTypeEnum.tool) {
const { response, usages } = await dispatchTool({
node,
params: requestParams,
runningUserInfo,
runningAppInfo,
variables,
workflowStreamResponse: childWorkflowStreamResponse
});
return {
response,
usages,
isEnd: false
};
} else if (
node.flowNodeType === FlowNodeTypeEnum.appModule ||
node.flowNodeType === FlowNodeTypeEnum.pluginModule
) {
const fn =
node.flowNodeType === FlowNodeTypeEnum.appModule ? dispatchApp : dispatchPlugin;
const { response, usages } = await fn({
...props,
node,
workflowStreamResponse: childWorkflowStreamResponse,
callParams: {
appId: node.pluginId,
version: node.version,
...requestParams
}
});
return {
response,
usages,
isEnd: false
};
} else {
return {
response: 'Can not find the tool',
usages: [],
isEnd: false
};
}
}
} catch (error) {
const {
response,
usages = [],
isEnd,
interactive
} = await (async () => {
try {
if (toolId === SubAppIds.stop) {
return {
response: getErrText(error),
response: '',
usages: [],
isEnd: true
};
} else if (toolId === SubAppIds.plan) {
const { completeMessages, response, usages, interactiveResponse } =
await dispatchPlanAgent({
messages,
subApps: subAppList,
model,
temperature,
top_p: aiChatTopP,
stream,
onReasoning,
onStreaming
});
planMessages = completeMessages;
return {
response,
usages,
isEnd: false,
interactive: interactiveResponse
};
} else if (toolId === SubAppIds.model) {
const { systemPrompt, task } = parseToolArgs<{
systemPrompt: string;
task: string;
}>(call.function.arguments);
const { response, usages } = await dispatchModelAgent({
model,
temperature,
top_p: aiChatTopP,
stream,
systemPrompt,
task,
onReasoning,
onStreaming
});
return {
response,
usages,
isEnd: false
};
} else if (toolId === SubAppIds.fileRead) {
const { file_indexes } = parseToolArgs<{
file_indexes: string[];
}>(call.function.arguments);
if (!Array.isArray(file_indexes)) {
return {
response: 'file_indexes is not array',
usages: [],
isEnd: false
};
}
const files = file_indexes.map((index) => ({
index,
url: filesMap[index]
}));
const result = await dispatchFileRead({
files,
teamId: runningUserInfo.teamId,
tmbId: runningUserInfo.tmbId,
customPdfParse: chatConfig?.fileSelectConfig?.customPdfParse
});
return {
response: result.response,
usages: result.usages,
isEnd: false
};
}
})();
// User Sub App
else {
const node = subAppsMap.get(toolId);
if (!node) {
return {
response: 'Can not find the tool',
usages: [],
isEnd: false
};
}
// Push stream response
workflowStreamResponse?.({
id: call.id,
event: SseResponseEventEnum.toolResponse,
data: {
tool: {
id: call.id,
response
const toolCallParams = parseToolArgs(call.function.arguments);
// Get params
const requestParams = (() => {
const params: Record<string, any> = toolCallParams;
node.inputs.forEach((input) => {
if (input.key in toolCallParams) {
return;
}
// Skip some special key
if (
[
NodeInputKeyEnum.childrenNodeIdList,
NodeInputKeyEnum.systemInputConfig
].includes(input.key as NodeInputKeyEnum)
) {
params[input.key] = input.value;
return;
}
// replace {{$xx.xx$}} and {{xx}} variables
let value = replaceEditorVariable({
text: input.value,
nodes: runtimeNodes,
variables
});
// replace reference variables
value = getReferenceVariableValue({
value,
nodes: runtimeNodes,
variables
});
params[input.key] = valueTypeFormat(value, input.valueType);
});
return params;
})();
if (node.flowNodeType === FlowNodeTypeEnum.tool) {
const { response, usages } = await dispatchTool({
node,
params: requestParams,
runningUserInfo,
runningAppInfo,
variables,
workflowStreamResponse: childWorkflowStreamResponse
});
return {
response,
usages,
isEnd: false
};
} else if (
node.flowNodeType === FlowNodeTypeEnum.appModule ||
node.flowNodeType === FlowNodeTypeEnum.pluginModule
) {
const fn =
node.flowNodeType === FlowNodeTypeEnum.appModule ? dispatchApp : dispatchPlugin;
const { response, usages } = await fn({
...props,
node,
workflowStreamResponse: childWorkflowStreamResponse,
callParams: {
appId: node.pluginId,
version: node.version,
...requestParams
}
});
return {
response,
usages,
isEnd: false
};
} else {
return {
response: 'Can not find the tool',
usages: [],
isEnd: false
};
}
}
});
} catch (error) {
return {
response: getErrText(error),
usages: [],
isEnd: false
};
}
})();
// TODO: 推送账单
// Push stream response
workflowStreamResponse?.({
id: call.id,
event: SseResponseEventEnum.toolResponse,
data: {
tool: {
id: call.id,
response
}
}
});
return {
response,
usages,
isEnd
};
}
});
// TODO: 推送账单
return {
response,
usages,
isEnd,
interactive
};
}
});
// Usage count
const { totalPoints: modelTotalPoints, modelName } = formatModelChars2Points({
@ -597,7 +633,7 @@ export const dispatchRunAgent = async (props: DispatchAgentModuleProps): Promise
// TODO: 需要对 memoryMessages 单独建表存储
[DispatchNodeResponseKeyEnum.memories]: {
[masterMessagesKey]: filterMemoryMessages(completeMessages),
[planMessagesKey]: [] // TODO: plan messages 需要记录
[planMessagesKey]: [filterMemoryMessages(planMessages)]
},
[DispatchNodeResponseKeyEnum.assistantResponses]: previewAssistantResponses,
[DispatchNodeResponseKeyEnum.nodeResponse]: {
@ -628,7 +664,7 @@ export const dispatchRunAgent = async (props: DispatchAgentModuleProps): Promise
// Tool usage
...subAppUsages
],
[DispatchNodeResponseKeyEnum.interactive]: undefined
[DispatchNodeResponseKeyEnum.interactive]: interactiveResponse
};
} catch (error) {
return getNodeErrResponse({ error });

View File

@ -1,61 +0,0 @@
import type { ChatCompletionTool } from '@fastgpt/global/core/ai/type';
import { SubAppIds } from '../constants';
export const AskAgentTool: ChatCompletionTool = {
type: 'function',
function: {
name: SubAppIds.ask,
description: '调用此工具,向用户发起交互式提问',
parameters: {
type: 'object',
properties: {
mode: {
type: 'string',
enum: ['userSelect', 'formInput', 'userInput'],
description: '交互模式'
},
prompt: {
type: 'string',
description: '向用户展示的提示信息'
},
options: {
type: 'array',
description: '当 mode=userSelect 时可供选择的选项',
items: {
type: 'string'
}
},
form: {
type: 'array',
description: '当 mode=formInput 时需要填写的表单字段列表',
items: {
type: 'object',
properties: {
field: {
type: 'string',
description: '字段名,如 name, age, 同时会展示给用户一样的label'
},
type: {
type: 'string',
enum: ['textInput', 'numberInput', 'singleSelect', 'multiSelect'],
description: '字段输入类型'
},
required: { type: 'boolean', description: '该字段是否必填', default: false },
options: {
type: 'array',
description: '当 type 为 singleSelect 或 multiSelect 时的可选项',
items: { type: 'string' }
}
},
required: ['field', 'type']
}
},
userInput: {
type: 'string',
description: '当 mode=userInput 时用户自由输入的内容'
}
},
required: ['mode', 'prompt']
}
}
};

View File

@ -0,0 +1,91 @@
import type { ChatCompletionTool } from '@fastgpt/global/core/ai/type';
import { SubAppIds } from '../../constants';
export type AskAgentToolParamsType = Partial<{
mode: 'select' | 'formInput' | 'input';
prompt: string;
options: string[];
form: {
field: string;
type: 'textInput' | 'numberInput' | 'singleSelect' | 'multiSelect';
required: boolean;
options: string[];
}[];
}>;
export const AskAgentTool: ChatCompletionTool = {
type: 'function',
function: {
name: SubAppIds.ask,
description: `
LLM
modepromptoptions
1. mode = "select"
-
- prompt: 展示在问题顶部的主要提问文案
- options: 字符串数组
-
*
* 便
2. mode = "input"
-
- prompt: 展示的问题提示
- options: 此模式下通常留空或忽略
-
* URL/
* "select" "input"
使
- 使 "select"
- 使 "input"
- Something else options "input"
`,
parameters: {
type: 'object',
properties: {
mode: {
type: 'string',
enum: ['select', 'input'],
description: '交互模式'
},
prompt: {
type: 'string',
description: '向用户展示的提示信息'
},
options: {
type: 'array',
description: '当 mode=select 时可供选择的选项',
items: {
type: 'string'
}
}
// form: {
// type: 'array',
// description: '当 mode=formInput 时需要填写的表单字段列表',
// items: {
// type: 'object',
// properties: {
// field: {
// type: 'string',
// description: '字段名,如 name, age, 同时会展示给用户一样的label'
// },
// type: {
// type: 'string',
// enum: ['textInput', 'numberInput', 'singleSelect', 'multiSelect'],
// description: '字段输入类型'
// },
// required: { type: 'boolean', description: '该字段是否必填', default: false },
// options: {
// type: 'array',
// description: '当 type 为 singleSelect 或 multiSelect 时的可选项',
// items: { type: 'string' }
// }
// },
// required: ['field', 'type']
// }
// },
},
required: ['mode', 'prompt']
}
}
};

View File

@ -5,6 +5,7 @@ export const PlanAgentTool: ChatCompletionTool = {
type: 'function',
function: {
name: SubAppIds.plan,
description: '分析和拆解用户问题,制定分步计划。'
description: '分析和拆解用户问题,制定分步计划。',
parameters: {}
}
};

View File

@ -8,10 +8,16 @@ import { getLLMModel } from '../../../../../../ai/model';
import { formatModelChars2Points } from '../../../../../../../support/wallet/usage/utils';
import type { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
import { SubAppIds } from '../constants';
import type { InteractiveNodeResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
import { runAgentCall } from '../../../../../../../core/ai/llm/agentCall';
import { parseToolArgs } from '../../../utils';
import { AskAgentTool, type AskAgentToolParamsType } from './ask/constants';
import { getNanoid } from '@fastgpt/global/common/string/tools';
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
type PlanAgentConfig = {
model: string;
customSystemPrompt?: string;
systemPrompt?: string;
temperature?: number;
top_p?: number;
stream?: boolean;
@ -19,7 +25,7 @@ type PlanAgentConfig = {
type DispatchPlanAgentProps = PlanAgentConfig & {
messages: ChatCompletionMessageParam[];
tools: ChatCompletionTool[];
subApps: ChatCompletionTool[];
onReasoning: ResponseEvents['onReasoning'];
onStreaming: ResponseEvents['onStreaming'];
};
@ -27,14 +33,17 @@ type DispatchPlanAgentProps = PlanAgentConfig & {
type DispatchPlanAgentResponse = {
response: string;
usages: ChatNodeUsageType[];
completeMessages: ChatCompletionMessageParam[];
toolMessages?: ChatCompletionMessageParam[];
interactiveResponse?: InteractiveNodeResponseType;
};
export const dispatchPlanAgent = async ({
messages,
tools,
subApps,
model,
customSystemPrompt,
systemPrompt,
temperature,
top_p,
stream,
@ -46,14 +55,21 @@ export const dispatchPlanAgent = async ({
const requestMessages: ChatCompletionMessageParam[] = [
{
role: 'system',
content: getPlanAgentPrompt(customSystemPrompt)
content: getPlanAgentPrompt(systemPrompt)
},
...messages.filter((item) => item.role !== 'system'),
{ role: 'user', content: 'Start plan' }
...messages.filter((item) => item.role !== 'system')
];
const filterPlanTools = tools.filter((item) => item.function.name !== SubAppIds.plan);
const filterPlanTools = subApps.filter((item) => item.function.name !== SubAppIds.plan);
filterPlanTools.push(AskAgentTool);
const { answerText, usage } = await createLLMResponse({
const {
reasoningText,
answerText,
toolCalls = [],
usage,
getEmptyResponseTip,
completeMessages
} = await createLLMResponse({
body: {
model: modelData.model,
temperature,
@ -70,12 +86,74 @@ export const dispatchPlanAgent = async ({
onStreaming
});
if (!answerText && !reasoningText && !toolCalls.length) {
return Promise.reject(getEmptyResponseTip());
}
// TODO: 需要考虑多个 Interactive 并发的情况
let interactiveResponse: InteractiveNodeResponseType = {
type: 'agentPlanCheck',
params: {}
};
for await (const call of toolCalls) {
const toolId = call.function.name;
if (toolId === SubAppIds.ask) {
const params = parseToolArgs<AskAgentToolParamsType>(call.function.arguments);
if (params.mode === 'select') {
interactiveResponse = {
type: 'agentPlanAskUserSelect',
params: {
description: params?.prompt ?? '选择选项',
userSelectOptions: params?.options?.map((v, i) => {
return { key: `option${i}`, value: v };
})
}
} as InteractiveNodeResponseType;
}
if (params.mode === 'input') {
interactiveResponse = {
type: 'agentPlanAskQuery',
params: {
content: params?.prompt ?? '输入详细信息'
}
};
}
}
}
const { totalPoints, modelName } = formatModelChars2Points({
model: modelData.model,
inputTokens: usage.inputTokens,
outputTokens: usage.outputTokens
});
const toolMessages: ChatCompletionMessageParam[] = [];
if (answerText) {
const toolId = getNanoid(6);
const toolCall: ChatCompletionMessageParam = {
role: ChatCompletionRequestMessageRoleEnum.Assistant,
tool_calls: [
{
id: toolId,
type: 'function',
function: {
name: SubAppIds.plan,
arguments: ''
}
}
]
};
const toolCallResponse: ChatCompletionMessageParam = {
role: ChatCompletionRequestMessageRoleEnum.Tool,
tool_call_id: toolId,
content: answerText
};
toolMessages.push(toolCall, toolCallResponse);
}
return {
response: answerText,
usages: [
@ -86,6 +164,9 @@ export const dispatchPlanAgent = async ({
inputTokens: usage.inputTokens,
outputTokens: usage.outputTokens
}
]
],
completeMessages,
toolMessages,
interactiveResponse
};
};

View File

@ -11,37 +11,53 @@ ${
: ''
}
<task>
</task>
<inputs>
-
-
</inputs>
<process>
1.
2. 2-3 4-7
3. 3-5 TodoMECE
2. ,
3. "ask_agent".
4.
5. 使 []
5. JSON Schema
6. 使"ask_agent"
7. JSON格式的完整计划
</process>
<requirements>
-
* <!--@title-->
* <!--@desc-->
* <!--@step:N:start--> <!--@step:N:end-->
* <!--@step:N:title-->
* <!--@step:N:desc-->
* <!--@todos:N:start--> <!--@todos:N:end-->
* <!--@todo:N.X-->
* <!--@note:N-->
- 3-5 Todo
- NX
- MECE
-
- JSON \`\`\`)、注释或额外说明文字。
- JSON Schema
\`\`\`json
{
"type": "object",
"properties": {
"task": {
"type": "string",
"description": "任务主题, 准确覆盖本次所有执行步骤的核心内容和维度"
},
"steps": {
"type": "array",
"description": "阶段步骤列表",
"items": {
"type": "object",
"properties": {
"id": {
"type": "string",
"description": "唯一标识"
},
"title": {
"type": "string",
"description": "阶段标题"
},
"description": {
"type": "string",
"description": "阶段描述, 并在末尾@对应任务将要移交使用的工具/子智能体"
},
},
"required": ["id", "title", "description"]
}
}
},
"required": ["title", "description", "steps"]
}
\`\`\`
</requirements>
<guardrails>
@ -52,42 +68,21 @@ ${
<output>
<format>
# [] <!--@title-->
[] [] <!--@desc-->
<!--@step:1:start-->
## Step 1: [] <!--@step:1:title-->
[] <!--@step:1:desc-->
### Todo List
<!--@todos:1:start-->
- [ ] [] <!--@todo:1.1-->
- [ ] [] <!--@todo:1.2-->
- [ ] [] <!--@todo:1.3-->
<!--@todos:1:end-->
<!--@note:1--> []
<!--@step:1:end-->
<!--@step:2:start-->
## Step 2: [] <!--@step:2:title-->
[] <!--@step:2:desc-->
### Todo List
<!--@todos:2:start-->
- [ ] [] <!--@todo:2.1-->
- [ ] [] <!--@todo:2.2-->
- [ ] [] <!--@todo:2.3-->
<!--@todos:2:end-->
<!--@note:2--> []
<!--@step:2:end-->
</format>
<style>
-
-
-
-
-
</style>
{
"task": "[主题] 深度调研计划",
"steps": [
{
"id": "[id]",
"title": "[阶段名称]",
"description": "[阶段描述] @sub_agent"
},
{
"id": "[id]",
"title": "[阶段名称]",
"description": "[阶段描述] @sub_agent"
}
]
}
</output>
`;
};

View File

@ -1,3 +1,7 @@
import type {
ChatCompletionAssistantMessageParam,
ChatCompletionMessageParam
} from '@fastgpt/global/core/ai/type';
import type { ToolNodeItemType } from './type';
const namespaceMap = new Map<string, string>([

View File

@ -101,7 +101,7 @@ export const getHistoryFileLinks = (histories: ChatItemType[]) => {
return histories
.filter((item) => {
if (item.obj === ChatRoleEnum.Human) {
return item.value.filter((value) => value.type === 'file');
return item.value.filter((value) => value.file);
}
return false;
})

View File

@ -83,7 +83,7 @@ const RenderInput = () => {
if (histories.length === 0) return pluginInputs;
try {
const historyValue = histories[0]?.value as UserChatItemValueItemType[];
const inputValueString = historyValue.find((item) => item.type === 'text')?.text?.content;
const inputValueString = historyValue.find((item) => item.text?.content)?.text?.content;
if (!inputValueString) return pluginInputs;
return JSON.parse(inputValueString) as FlowNodeInputItemType[];
@ -135,7 +135,7 @@ const RenderInput = () => {
if (!historyValue) return undefined;
try {
const inputValueString = historyValue.find((item) => item.type === 'text')?.text?.content;
const inputValueString = historyValue.find((item) => item.text?.content)?.text?.content;
return (
inputValueString &&
JSON.parse(inputValueString).reduce(
@ -160,7 +160,7 @@ const RenderInput = () => {
// Parse history file
const historyFileList = (() => {
const historyValue = histories[0]?.value as UserChatItemValueItemType[];
return historyValue?.filter((item) => item.type === 'file').map((item) => item.file);
return historyValue?.filter((item) => item.file).map((item) => item.file);
})();
reset({

View File

@ -8,6 +8,7 @@ import AIResponseBox from '../../../components/AIResponseBox';
import { useTranslation } from 'next-i18next';
import ComplianceTip from '@/components/common/ComplianceTip/index';
import { ChatRecordContext } from '@/web/core/chat/context/chatRecordContext';
import type { AIChatItemValueItemType } from '@fastgpt/global/core/chat/type';
const RenderOutput = () => {
const { t } = useTranslation();
@ -38,7 +39,7 @@ const RenderOutput = () => {
<AIResponseBox
chatItemDataId={histories[1].dataId}
key={key}
value={value}
value={value as AIChatItemValueItemType}
isLastResponseValue={true}
isChatting={isChatting}
/>

View File

@ -73,9 +73,9 @@ const InputFormEditModal = ({
}
];
const defaultValueType = inputTypeList
.flat()
.find((item) => item.value === inputType)?.defaultValueType;
const defaultValueType =
inputTypeList.flat().find((item) => item.value === inputType)?.defaultValueType ??
WorkflowIOValueTypeEnum.string;
const onSubmitSuccess = useCallback(
(data: UserInputFormItemType, action: 'confirm' | 'continue') => {

View File

@ -1,29 +1,28 @@
import { describe, expect, it } from 'vitest';
import {
form2AppWorkflow,
filterSensitiveFormData,
getAppQGuideCustomURL
} from '@/web/core/app/utils';
import { filterSensitiveFormData, getAppQGuideCustomURL } from '@/web/core/app/utils';
import { form2AppWorkflow } from '@/pageComponents/app/detail/Edit/SimpleApp/utils';
import { FlowNodeTypeEnum } from '@fastgpt/global/core/workflow/node/constant';
import { NodeInputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { getDefaultAppForm } from '@fastgpt/global/core/app/utils';
import type { AppFormEditFormType } from '@fastgpt/global/core/app/type';
describe('form2AppWorkflow', () => {
const mockT = (str: string) => str;
it('should generate simple chat workflow when no datasets or tools selected', () => {
const form = {
const form: AppFormEditFormType = {
aiSettings: {
model: 'gpt-3.5',
temperature: 0.7,
maxToken: 2000,
systemPrompt: 'You are a helpful assistant',
[NodeInputKeyEnum.aiModel]: 'gpt-3.5',
[NodeInputKeyEnum.aiChatTemperature]: 0.7,
[NodeInputKeyEnum.aiChatMaxToken]: 2000,
[NodeInputKeyEnum.aiSystemPrompt]: 'You are a helpful assistant',
maxHistories: 5,
aiChatReasoning: true,
aiChatTopP: 0.8,
aiChatStopSign: '',
aiChatResponseFormat: '',
aiChatJsonSchema: ''
[NodeInputKeyEnum.aiChatIsResponseText]: true,
[NodeInputKeyEnum.aiChatReasoning]: true,
[NodeInputKeyEnum.aiChatTopP]: 0.8,
[NodeInputKeyEnum.aiChatStopSign]: '',
[NodeInputKeyEnum.aiChatResponseFormat]: '',
[NodeInputKeyEnum.aiChatJsonSchema]: ''
},
dataset: {
datasets: [],
@ -49,21 +48,29 @@ describe('form2AppWorkflow', () => {
});
it('should generate dataset workflow when datasets are selected', () => {
const form = {
const form: AppFormEditFormType = {
aiSettings: {
model: 'gpt-3.5',
temperature: 0.7,
maxToken: 2000,
systemPrompt: 'You are a helpful assistant',
[NodeInputKeyEnum.aiModel]: 'gpt-3.5',
[NodeInputKeyEnum.aiChatTemperature]: 0.7,
[NodeInputKeyEnum.aiChatMaxToken]: 2000,
[NodeInputKeyEnum.aiSystemPrompt]: 'You are a helpful assistant',
maxHistories: 5,
aiChatReasoning: true,
aiChatTopP: 0.8,
aiChatStopSign: '',
aiChatResponseFormat: '',
aiChatJsonSchema: ''
[NodeInputKeyEnum.aiChatIsResponseText]: true,
[NodeInputKeyEnum.aiChatReasoning]: true,
[NodeInputKeyEnum.aiChatTopP]: 0.8,
[NodeInputKeyEnum.aiChatStopSign]: '',
[NodeInputKeyEnum.aiChatResponseFormat]: '',
[NodeInputKeyEnum.aiChatJsonSchema]: ''
},
dataset: {
datasets: ['dataset1'],
datasets: [
{
datasetId: 'dataset1',
avatar: '',
name: 'Test Dataset',
vectorModel: { model: 'text-embedding-ada-002' } as any
}
],
similarity: 0.8,
limit: 1500,
searchMode: 'embedding',
@ -88,14 +95,32 @@ describe('form2AppWorkflow', () => {
describe('filterSensitiveFormData', () => {
it('should filter sensitive data from app form', () => {
const appForm = {
const appForm: AppFormEditFormType = {
aiSettings: {
model: 'gpt-4',
temperature: 0.8
[NodeInputKeyEnum.aiModel]: 'gpt-4',
[NodeInputKeyEnum.aiChatTemperature]: 0.8,
maxHistories: 5,
[NodeInputKeyEnum.aiChatIsResponseText]: true
},
dataset: {
datasets: ['sensitive-dataset'],
similarity: 0.9
datasets: [
{
datasetId: 'sensitive-dataset',
avatar: '',
name: 'Sensitive Dataset',
vectorModel: { model: 'text-embedding-ada-002' } as any
}
],
searchMode: 'embedding' as any,
similarity: 0.9,
limit: 1500,
embeddingWeight: 0.7,
usingReRank: false,
rerankModel: '',
rerankWeight: 0.5,
datasetSearchUsingExtensionQuery: false,
datasetSearchExtensionModel: '',
datasetSearchExtensionBg: ''
},
selectedTools: [],
chatConfig: {}
@ -125,7 +150,7 @@ describe('getAppQGuideCustomURL', () => {
]
}
]
};
} as any;
const result = getAppQGuideCustomURL(appDetail);
expect(result).toBe('https://example.com');
@ -139,7 +164,7 @@ describe('getAppQGuideCustomURL', () => {
inputs: []
}
]
};
} as any;
const result = getAppQGuideCustomURL(appDetail);
expect(result).toBe('');