feat: plan response in ui

This commit is contained in:
archer 2025-11-18 18:02:21 +08:00
parent f7bc8cc090
commit 3e5208ef4c
No known key found for this signature in database
GPG Key ID: 4446499B846D4A9E
47 changed files with 1106 additions and 1548 deletions

View File

@ -15,6 +15,7 @@ import type { FlowNodeInputItemType } from '../workflow/type/io';
import type { FlowNodeTemplateType } from '../workflow/type/node.d';
import { ChatCompletionMessageParam } from '../ai/type';
import type { RequireOnlyOne } from '../../common/type/utils';
import type { AgentPlanType } from '../../../service/core/workflow/dispatch/ai/agent/sub/plan/type';
/* --------- chat ---------- */
export type ChatSchemaType = {
@ -95,17 +96,10 @@ export type AIChatItemValueItemType = {
};
tool: ToolModuleResponseItemType;
interactive: WorkflowInteractiveResponseType;
text?: {
content: string;
};
reasoning?: {
content: string;
};
interactive?: WorkflowInteractiveResponseType;
agentPlan: AgentPlanType;
// Abandon
tools?: ToolModuleResponseItemType[];
tools: ToolModuleResponseItemType[];
}>;
export type AIChatItemType = {
obj: ChatRoleEnum.AI;

View File

@ -15,7 +15,8 @@ export enum SseResponseEventEnum {
flowResponses = 'flowResponses', // sse response request
updateVariables = 'updateVariables',
interactive = 'interactive' // user select
interactive = 'interactive', // user select
agentPlan = 'agentPlan' // agent plan
}
export enum DispatchNodeResponseKeyEnum {

View File

@ -20,13 +20,13 @@ type InteractiveNodeType = {
nodeOutputs?: NodeOutputItemType[];
};
type ChildrenInteractive = InteractiveNodeType & {
export type ChildrenInteractive = InteractiveNodeType & {
type: 'childrenInteractive';
params: {
childrenResponse: WorkflowInteractiveResponseType;
};
};
type ToolCallChildrenInteractive = InteractiveNodeType & {
export type ToolCallChildrenInteractive = InteractiveNodeType & {
type: 'toolChildrenInteractive';
params: {
childrenResponse: WorkflowInteractiveResponseType;
@ -38,7 +38,7 @@ type ToolCallChildrenInteractive = InteractiveNodeType & {
};
// Loop bode
type LoopInteractive = InteractiveNodeType & {
export type LoopInteractive = InteractiveNodeType & {
type: 'loopInteractive';
params: {
loopResult: any[];

View File

@ -1,213 +0,0 @@
import type {
ChatCompletionMessageParam,
ChatCompletionTool,
ChatCompletionMessageToolCall
} from '@fastgpt/global/core/ai/type';
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
import type { AIChatItemType, AIChatItemValueItemType } from '@fastgpt/global/core/chat/type';
import type {
InteractiveNodeResponseType,
WorkflowInteractiveResponseType
} from '@fastgpt/global/core/workflow/template/system/interactive/type';
import type { CreateLLMResponseProps, ResponseEvents } from './request';
import { createLLMResponse } from './request';
import type { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
import type { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
import { countGptMessagesTokens, countPromptTokens } from '../../../common/string/tiktoken/index';
import { addLog } from '../../../common/system/log';
import type { AgentPlanStepType } from '../../workflow/dispatch/ai/agent/sub/plan/type';
import { calculateCompressionThresholds } from './compress/constants';
import { compressRequestMessages, compressToolcallResponse } from './compress';
type RunAgentCallProps = {
maxRunAgentTimes: number;
interactiveEntryToolParams?: WorkflowInteractiveResponseType['toolParams'];
currentStep: AgentPlanStepType;
body: {
messages: ChatCompletionMessageParam[];
model: LLMModelItemType;
temperature?: number;
top_p?: number;
stream?: boolean;
subApps: ChatCompletionTool[];
};
userKey?: CreateLLMResponseProps['userKey'];
isAborted?: CreateLLMResponseProps['isAborted'];
getToolInfo: (id: string) => {
name: string;
avatar: string;
};
handleToolResponse: (e: {
call: ChatCompletionMessageToolCall;
messages: ChatCompletionMessageParam[];
}) => Promise<{
response: string;
usages: ChatNodeUsageType[];
interactive?: InteractiveNodeResponseType;
}>;
} & ResponseEvents;
type RunAgentResponse = {
completeMessages: ChatCompletionMessageParam[];
assistantResponses: AIChatItemValueItemType[];
interactiveResponse?: InteractiveNodeResponseType;
// Usage
inputTokens: number;
outputTokens: number;
subAppUsages: ChatNodeUsageType[];
};
export const runAgentCall = async ({
maxRunAgentTimes,
interactiveEntryToolParams,
currentStep,
body: { model, messages, stream, temperature, top_p, subApps },
userKey,
isAborted,
handleToolResponse,
getToolInfo,
onReasoning,
onStreaming,
onToolCall,
onToolParam
}: RunAgentCallProps): Promise<RunAgentResponse> => {
let runTimes = 0;
const assistantResponses: AIChatItemValueItemType[] = [];
let interactiveResponse: InteractiveNodeResponseType | undefined;
let requestMessages = messages;
let inputTokens: number = 0;
let outputTokens: number = 0;
const subAppUsages: ChatNodeUsageType[] = [];
// TODO: interactive rewrite messages
while (runTimes < maxRunAgentTimes) {
// TODO: 费用检测
runTimes++;
// 对请求的 requestMessages 进行压缩
const taskDescription = currentStep.description || currentStep.title;
if (taskDescription) {
const result = await compressRequestMessages(requestMessages, model, taskDescription);
requestMessages = result.messages;
inputTokens += result.usage?.inputTokens || 0;
outputTokens += result.usage?.outputTokens || 0;
}
// Request LLM
let {
reasoningText: reasoningContent,
answerText: answer,
toolCalls = [],
usage,
getEmptyResponseTip,
completeMessages
} = await createLLMResponse({
body: {
model,
messages: requestMessages,
tool_choice: 'auto',
toolCallMode: model.toolChoice ? 'toolChoice' : 'prompt',
tools: subApps,
parallel_tool_calls: true,
stream,
temperature,
top_p
},
userKey,
isAborted,
onReasoning,
onStreaming,
onToolCall,
onToolParam
});
if (!answer && !reasoningContent && !toolCalls.length) {
return Promise.reject(getEmptyResponseTip());
}
const requestMessagesLength = requestMessages.length;
requestMessages = completeMessages.slice();
for await (const tool of toolCalls) {
// TODO: 加入交互节点处理
// Call tool and compress tool response
const { response, usages, interactive } = await handleToolResponse({
call: tool,
messages: requestMessages.slice(0, requestMessagesLength)
}).then(async (res) => {
const thresholds = calculateCompressionThresholds(model.maxContext);
const toolTokenCount = await countPromptTokens(res.response);
const response = await (async () => {
if (toolTokenCount > thresholds.singleTool.threshold && currentStep) {
const taskDescription = currentStep.description || currentStep.title;
return await compressToolcallResponse(
res.response,
model,
tool.function.name,
taskDescription,
thresholds.singleTool.target
);
} else {
return res.response;
}
})();
return {
...res,
response
};
});
requestMessages.push({
tool_call_id: tool.id,
role: ChatCompletionRequestMessageRoleEnum.Tool,
content: response
});
subAppUsages.push(...usages);
if (interactive) {
interactiveResponse = interactive;
}
}
// TODO: 移动到工作流里 assistantResponses concat
const currentAssistantResponses = GPTMessages2Chats({
messages: requestMessages.slice(requestMessagesLength),
getToolInfo
})[0] as AIChatItemType;
if (currentAssistantResponses) {
assistantResponses.push(...currentAssistantResponses.value);
}
// Usage concat
inputTokens += usage.inputTokens;
outputTokens += usage.outputTokens;
if (toolCalls.length === 0) {
break;
}
}
return {
inputTokens,
outputTokens,
completeMessages: requestMessages,
assistantResponses,
subAppUsages,
interactiveResponse
};
};

View File

@ -35,7 +35,7 @@ export type ResponseEvents = {
onStreaming?: (e: { text: string }) => void;
onReasoning?: (e: { text: string }) => void;
onToolCall?: (e: { call: ChatCompletionMessageToolCall }) => void;
onToolParam?: (e: { call: ChatCompletionMessageToolCall; params: string }) => void;
onToolParam?: (e: { tool: ChatCompletionMessageToolCall; params: string }) => void;
};
export type CreateLLMResponseProps<T extends CompletionsBodyType = CompletionsBodyType> = {
@ -260,7 +260,7 @@ export const createStreamResponse = async ({
if (currentTool && arg) {
currentTool.function.arguments += arg;
onToolParam?.({ call: currentTool, params: arg });
onToolParam?.({ tool: currentTool, params: arg });
}
}
});

View File

@ -0,0 +1,196 @@
import type { AgentPlanStepType } from './sub/plan/type';
import { getLLMModel } from '../../../../ai/model';
import { countPromptTokens } from '../../../../../common/string/tiktoken/index';
import { createLLMResponse } from '../../../../ai/llm/request';
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
import { addLog } from '../../../../../common/system/log';
import { calculateCompressionThresholds } from '../../../../ai/llm/compress/constants';
export const getMasterAgentSystemPrompt = async ({
steps,
step,
userInput,
background = '',
model
}: {
steps: AgentPlanStepType[];
step: AgentPlanStepType;
userInput: string;
background?: string;
model: string;
}) => {
/**
* Depends on
* stepPrompt token 15% LLM 12%
*/
const compressStepPrompt = async (
stepPrompt: string,
model: string,
currentDescription: string
): Promise<string> => {
if (!stepPrompt) return stepPrompt;
const modelData = getLLMModel(model);
if (!modelData) return stepPrompt;
const tokenCount = await countPromptTokens(stepPrompt);
const thresholds = calculateCompressionThresholds(modelData.maxContext);
const maxTokenThreshold = thresholds.dependsOn.threshold;
if (tokenCount <= maxTokenThreshold) {
return stepPrompt;
}
const targetTokens = thresholds.dependsOn.target;
const compressionSystemPrompt = `<role>
token
</role>
<task_context>
"步骤ID → 步骤标题 → 执行结果"
</task_context>
<compression_workflow>
****
1.
2.
- []
- []
- []
3.
****
1. **** 80-100%
- ID
-
-
2. **** 40-60%
- ID
- 2-3
-
3. **** 10-20%
- ID
-
-
</compression_workflow>
<compression_principles>
-
-
-
-
-
-
</compression_principles>
<quality_check>
1.
2.
3.
4.
</quality_check>`;
const userPrompt = `请对以下工作流步骤的执行历史进行压缩,保留与当前任务最相关的信息。
****${currentDescription}
****
${stepPrompt}
****
- ${tokenCount} tokens
- ${targetTokens} tokens ${Math.round((targetTokens / tokenCount) * 100)}%
****
1. 使"# 步骤ID: [id]\\n\\t - 步骤标题: [title]\\n\\t - 执行结果: [精简后的结果]"
2.
-
-
-
3.
4.
****
-
-
-
`;
try {
const { answerText } = await createLLMResponse({
body: {
model: modelData,
messages: [
{
role: ChatCompletionRequestMessageRoleEnum.System,
content: compressionSystemPrompt
},
{
role: ChatCompletionRequestMessageRoleEnum.User,
content: userPrompt
}
],
temperature: 0.1,
stream: false
}
});
return answerText || stepPrompt;
} catch (error) {
console.error('压缩 stepPrompt 失败:', error);
// 压缩失败时返回原始内容
return stepPrompt;
}
};
let stepPrompt = steps
.filter((item) => step.depends_on && step.depends_on.includes(item.id))
.map(
(item) =>
`# 步骤ID: ${item.id}\n\t - 步骤标题: ${item.title}\n\t - 执行结果: ${item.response}`
)
.filter(Boolean)
.join('\n');
addLog.debug(`Step call depends_on (LLM): ${step.id}`, step.depends_on);
// 压缩依赖的上下文
stepPrompt = await compressStepPrompt(stepPrompt, model, step.description || step.title);
return `请根据任务背景、之前步骤的执行结果和当前步骤要求选择并调用相应的工具。如果是一个总结性质的步骤,请整合之前步骤的结果进行总结。
目标: ${userInput}
前置信息: ${background}
步骤ID: ${step.id}
步骤标题: ${step.title}
${
stepPrompt
? `【之前步骤的执行结果】
${stepPrompt}`
: ''
}
1.
2. 使
3.
4.
5.
6.
7.
`;
};

View File

@ -0,0 +1,487 @@
import type { NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { NodeInputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import {
ConfirmPlanAgentText,
DispatchNodeResponseKeyEnum,
SseResponseEventEnum
} from '@fastgpt/global/core/workflow/runtime/constants';
import type {
DispatchNodeResultType,
ModuleDispatchProps,
RuntimeNodeItemType
} from '@fastgpt/global/core/workflow/runtime/type';
import { getLLMModel } from '../../../../ai/model';
import { getNodeErrResponse, getHistories } from '../../utils';
import type { AIChatItemValueItemType, ChatItemType } from '@fastgpt/global/core/chat/type';
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import {
chats2GPTMessages,
chatValue2RuntimePrompt,
GPTMessages2Chats
} from '@fastgpt/global/core/chat/adapt';
import { formatModelChars2Points } from '../../../../../support/wallet/usage/utils';
import { filterMemoryMessages } from '../utils';
import { systemSubInfo } from './sub/constants';
import { textAdaptGptResponse } from '@fastgpt/global/core/workflow/runtime/utils';
import { dispatchPlanAgent, dispatchReplanAgent } from './sub/plan';
import type { FlowNodeTemplateType } from '@fastgpt/global/core/workflow/type/node';
import { getSubApps, rewriteSubAppsToolset } from './sub';
import { getFileInputPrompt } from './sub/file/utils';
import type { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type';
import type { AgentPlanType } from './sub/plan/type';
import type { localeType } from '@fastgpt/global/common/i18n/type';
import { stepCall } from './master/call';
import type { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
import { addLog } from '../../../../../common/system/log';
import { checkTaskComplexity } from './master/taskComplexity';
export type DispatchAgentModuleProps = ModuleDispatchProps<{
[NodeInputKeyEnum.history]?: ChatItemType[];
[NodeInputKeyEnum.userChatInput]: string;
[NodeInputKeyEnum.fileUrlList]?: string[];
[NodeInputKeyEnum.aiModel]: string;
[NodeInputKeyEnum.aiSystemPrompt]: string;
[NodeInputKeyEnum.aiChatTemperature]?: number;
[NodeInputKeyEnum.aiChatTopP]?: number;
[NodeInputKeyEnum.subApps]?: FlowNodeTemplateType[];
[NodeInputKeyEnum.isAskAgent]?: boolean;
[NodeInputKeyEnum.isPlanAgent]?: boolean;
}>;
type Response = DispatchNodeResultType<{
[NodeOutputKeyEnum.answerText]: string;
}>;
export const dispatchRunAgent = async (props: DispatchAgentModuleProps): Promise<Response> => {
let {
node: { nodeId, name, isEntry, version, inputs },
lang,
runtimeNodes,
histories,
query,
requestOrigin,
chatConfig,
lastInteractive,
runningUserInfo,
runningAppInfo,
externalProvider,
stream,
workflowDispatchDeep,
workflowStreamResponse,
usagePush,
params: {
model,
systemPrompt,
userChatInput,
history = 6,
fileUrlList: fileLinks,
temperature,
aiChatTopP,
subApps = [],
isPlanAgent = true,
isAskAgent = true
}
} = props;
const agentModel = getLLMModel(model);
const chatHistories = getHistories(history, histories);
const historiesMessages = chats2GPTMessages({
messages: chatHistories,
reserveId: false,
reserveTool: false
});
const planMessagesKey = `planMessages-${nodeId}`;
const replanMessagesKey = `replanMessages-${nodeId}`;
const agentPlanKey = `agentPlan-${nodeId}`;
// 交互模式进来的话,这个值才是交互输入的值
const interactiveInput = lastInteractive ? chatValue2RuntimePrompt(query).text : '';
// Get history messages
let { planHistoryMessages, replanMessages, agentPlan } = (() => {
const lastHistory = chatHistories[chatHistories.length - 1];
if (lastHistory && lastHistory.obj === ChatRoleEnum.AI) {
return {
planHistoryMessages: (lastHistory.memories?.[planMessagesKey] ||
[]) as ChatCompletionMessageParam[],
replanMessages: (lastHistory.memories?.[replanMessagesKey] ||
[]) as ChatCompletionMessageParam[],
agentPlan: (lastHistory.memories?.[agentPlanKey] || []) as AgentPlanType
};
}
return {
planHistoryMessages: undefined,
replanMessages: undefined,
agentPlan: undefined
};
})();
try {
// Get files
const fileUrlInput = inputs.find((item) => item.key === NodeInputKeyEnum.fileUrlList);
if (!fileUrlInput || !fileUrlInput.value || fileUrlInput.value.length === 0) {
fileLinks = undefined;
}
const { filesMap, prompt: fileInputPrompt } = getFileInputPrompt({
fileUrls: fileLinks,
requestOrigin,
maxFiles: chatConfig?.fileSelectConfig?.maxFiles || 20,
histories: chatHistories
});
// Get sub apps
const { subAppList, subAppsMap, getSubAppInfo } = await useSubApps({
subApps,
lang,
filesMap
});
/* ===== AI Start ===== */
/* ===== Check task complexity ===== */
const taskIsComplexity = await (async () => {
// Check task complexity: 第一次进入任务时候进行判断。(有 plan了说明已经开始执行任务了
const isCheckTaskComplexityStep = isPlanAgent && !agentPlan && !planHistoryMessages;
// if (isCheckTaskComplexityStep) {
// const res = await checkTaskComplexity({
// model,
// userChatInput
// });
// if (res.usage) {
// usagePush([res.usage]);
// }
// return res.complex;
// }
// 对轮运行时候,代表都是进入复杂流程
return true;
})();
if (taskIsComplexity) {
/* ===== Plan Agent ===== */
const planCallFn = async () => {
// 点了确认。此时肯定有 agentPlans
if (
lastInteractive?.type === 'agentPlanCheck' &&
interactiveInput === ConfirmPlanAgentText &&
agentPlan
) {
planHistoryMessages = undefined;
} else {
const { answerText, plan, completeMessages, usages, interactiveResponse } =
await dispatchPlanAgent({
historyMessages: planHistoryMessages || historiesMessages,
userInput: lastInteractive ? interactiveInput : userChatInput,
interactive: lastInteractive,
subAppList,
getSubAppInfo,
systemPrompt,
model,
temperature,
top_p: aiChatTopP,
stream,
isTopPlanAgent: workflowDispatchDeep === 1
});
const assistantResponses: AIChatItemValueItemType[] = [
...(answerText
? [
{
text: {
content: answerText
}
}
]
: []),
...(plan
? [
{
agentPlan: plan
}
]
: [])
];
// SSE response
if (answerText) {
workflowStreamResponse?.({
event: SseResponseEventEnum.answer,
data: textAdaptGptResponse({
text: answerText
})
});
}
if (plan) {
workflowStreamResponse?.({
event: SseResponseEventEnum.agentPlan,
data: { agentPlan: plan }
});
}
agentPlan = plan;
usagePush(usages);
// Sub agent plan 不会有交互响应。Top agent plan 肯定会有。
if (interactiveResponse) {
return {
[DispatchNodeResponseKeyEnum.assistantResponses]: assistantResponses,
[DispatchNodeResponseKeyEnum.memories]: {
[planMessagesKey]: filterMemoryMessages(completeMessages),
[agentPlanKey]: agentPlan
},
[DispatchNodeResponseKeyEnum.interactive]: interactiveResponse
};
} else {
planHistoryMessages = undefined;
}
}
};
const replanCallFn = async ({ plan }: { plan: AgentPlanType }) => {
if (!agentPlan) return;
addLog.debug(`Replan step`);
const {
answerText,
plan: rePlan,
completeMessages,
usages,
interactiveResponse
} = await dispatchReplanAgent({
historyMessages: replanMessages || historiesMessages,
userInput: lastInteractive ? interactiveInput : userChatInput,
plan,
interactive: lastInteractive,
subAppList,
getSubAppInfo,
systemPrompt,
model,
temperature,
top_p: aiChatTopP,
stream,
isTopPlanAgent: workflowDispatchDeep === 1
});
if (rePlan) {
agentPlan.steps.push(...rePlan.steps);
agentPlan.replan = rePlan.replan;
}
const assistantResponses: AIChatItemValueItemType[] = [
...(answerText
? [
{
text: {
content: answerText
}
}
]
: []),
...(rePlan
? [
{
agentPlan: plan
}
]
: [])
];
// SSE response
if (answerText) {
workflowStreamResponse?.({
event: SseResponseEventEnum.answer,
data: textAdaptGptResponse({
text: answerText
})
});
}
if (rePlan) {
workflowStreamResponse?.({
event: SseResponseEventEnum.agentPlan,
data: { agentPlan: plan }
});
}
usagePush(usages);
// Sub agent plan 不会有交互响应。Top agent plan 肯定会有。
if (interactiveResponse) {
return {
[DispatchNodeResponseKeyEnum.assistantResponses]: assistantResponses,
[DispatchNodeResponseKeyEnum.memories]: {
[replanMessagesKey]: filterMemoryMessages(completeMessages),
[agentPlanKey]: agentPlan
},
[DispatchNodeResponseKeyEnum.interactive]: interactiveResponse
};
} else {
replanMessages = undefined;
}
};
// Plan step: 需要生成 plan且还没有完整的 plan
const isPlanStep = isPlanAgent && (!agentPlan || planHistoryMessages);
// Replan step: 已有 plan且有 replan 历史消息
const isReplanStep = isPlanAgent && agentPlan && replanMessages;
// 执行 Plan/replan
if (isPlanStep) {
const result = await planCallFn();
// 有 result 代表 plan 有交互响应check/ask
if (result) return result;
} else if (isReplanStep) {
const result = await replanCallFn({
plan: agentPlan!
});
if (result) return result;
}
addLog.debug(`Start master agent`, {
agentPlan: JSON.stringify(agentPlan, null, 2)
});
/* ===== Master agent, 逐步执行 plan ===== */
if (!agentPlan) return Promise.reject('没有 plan');
let assistantResponses: AIChatItemValueItemType[] = [];
while (agentPlan.steps!.filter((item) => !item.response)!.length) {
for await (const step of agentPlan?.steps) {
if (step.response) continue;
addLog.debug(`Step call: ${step.id}`, step);
// Temp code
workflowStreamResponse?.({
event: SseResponseEventEnum.answer,
data: textAdaptGptResponse({
text: `\n # ${step.id}: ${step.title}\n`
})
});
const tmpAssistantResponses: AIChatItemValueItemType = {
text: {
content: `\n # ${step.id}: ${step.title}\n`
}
};
assistantResponses.push(tmpAssistantResponses);
// Step call
const result = await stepCall({
...props,
getSubAppInfo,
steps: agentPlan.steps, // 传入所有步骤,而不仅仅是未执行的步骤
subAppList,
step,
filesMap,
subAppsMap
});
// Merge response
const assistantResponse = GPTMessages2Chats({
messages: result.assistantMessages,
reserveTool: true,
getToolInfo: getSubAppInfo
})
.map((item) => item.value as AIChatItemValueItemType[])
.flat();
step.response = result.rawResponse;
step.summary = result.summary;
assistantResponses.push(...assistantResponse);
}
// Call replan
if (agentPlan?.replan === true) {
// 内部会修改 agentPlan.steps 的内容,从而使循环重复触发
const replanResult = await replanCallFn({
plan: agentPlan
});
// Replan 里有需要用户交互的内容,直接 return
if (replanResult) return replanResult;
}
}
return {
// 目前 Master 不会触发交互
// [DispatchNodeResponseKeyEnum.interactive]: interactiveResponse,
// TODO: 需要对 memoryMessages 单独建表存储
[DispatchNodeResponseKeyEnum.memories]: {
[agentPlanKey]: agentPlan,
[planMessagesKey]: undefined,
[replanMessagesKey]: undefined
},
[DispatchNodeResponseKeyEnum.assistantResponses]: assistantResponses,
[DispatchNodeResponseKeyEnum.nodeResponse]: {
// 展示的积分消耗
// totalPoints: totalPointsUsage,
// toolCallInputTokens: inputTokens,
// toolCallOutputTokens: outputTokens,
// childTotalPoints: toolTotalPoints,
// model: modelName,
query: userChatInput,
// toolDetail: dispatchFlowResponse,
mergeSignId: nodeId
}
};
}
// 简单 tool call 模式(一轮对话就结束了,不会多轮,所以不会受到连续对话的 taskIsComplexity 影响)
return Promise.reject('目前未支持简单模式');
} catch (error) {
return getNodeErrResponse({ error });
}
};
export const useSubApps = async ({
subApps,
lang,
filesMap
}: {
subApps: FlowNodeTemplateType[];
lang?: localeType;
filesMap: Record<string, string>;
}) => {
// Get sub apps
const runtimeSubApps = await rewriteSubAppsToolset({
subApps: subApps.map<RuntimeNodeItemType>((node) => {
return {
nodeId: node.id,
name: node.name,
avatar: node.avatar,
intro: node.intro,
toolDescription: node.toolDescription,
flowNodeType: node.flowNodeType,
showStatus: node.showStatus,
isEntry: false,
inputs: node.inputs,
outputs: node.outputs,
pluginId: node.pluginId,
version: node.version,
toolConfig: node.toolConfig,
catchError: node.catchError
};
}),
lang
});
const subAppList = getSubApps({
subApps: runtimeSubApps,
addReadFileTool: Object.keys(filesMap).length > 0
});
const subAppsMap = new Map(runtimeSubApps.map((item) => [item.nodeId, item]));
const getSubAppInfo = (id: string) => {
const toolNode = subAppsMap.get(id) || systemSubInfo[id];
return {
name: toolNode?.name || '',
avatar: toolNode?.avatar || '',
toolDescription: toolNode?.toolDescription || toolNode?.name || ''
};
};
return {
subAppList,
subAppsMap,
getSubAppInfo
};
};

View File

@ -16,7 +16,7 @@ import {
} from '@fastgpt/global/core/workflow/runtime/utils';
import { getWorkflowChildResponseWrite } from '../../../utils';
import { SubAppIds } from '../sub/constants';
import { parseToolArgs } from '../../utils';
import { parseToolArgs } from '../../../../../ai/utils';
import { dispatchFileRead } from '../sub/file';
import { NodeInputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { FlowNodeTypeEnum } from '@fastgpt/global/core/workflow/node/constant';
@ -25,8 +25,6 @@ import { dispatchApp, dispatchPlugin } from '../sub/app';
import { getErrText } from '@fastgpt/global/common/error/utils';
import type { DispatchAgentModuleProps } from '..';
import { getLLMModel } from '../../../../../ai/model';
import { createLLMResponse } from '../../../../../ai/llm/request';
import { addLog } from '../../../../../../common/system/log';
import { getStepDependon } from './dependon';
import { getResponseSummary } from './responseSummary';
@ -47,6 +45,7 @@ export const stepCall = async ({
subAppsMap: Map<string, RuntimeNodeItemType>;
}) => {
const {
res,
node: { nodeId },
runtimeNodes,
chatConfig,
@ -55,7 +54,6 @@ export const stepCall = async ({
variables,
externalProvider,
stream,
res,
workflowStreamResponse,
usagePush,
params: { userChatInput, systemPrompt, model, temperature, aiChatTopP }
@ -74,15 +72,17 @@ export const stepCall = async ({
step.depends_on = depends;
}
// addLog.debug(`Step information`, steps);
// Step call system prompt
// TODO: 需要把压缩的 usage 返回
const systemPromptContent = await getMasterAgentSystemPrompt({
steps,
step,
userInput: userChatInput,
model
// background: systemPrompt
model,
background: systemPrompt
});
// Step call request messages
const requestMessages = chats2GPTMessages({
messages: [
{
@ -109,24 +109,22 @@ export const stepCall = async ({
// 'Step call requestMessages',
// JSON.stringify({ requestMessages, subAppList }, null, 2)
// );
// TODO: 阶段性推送账单
const { assistantResponses, inputTokens, outputTokens, subAppUsages, interactiveResponse } =
const { assistantMessages, inputTokens, outputTokens, subAppUsages, interactiveResponse } =
await runAgentCall({
maxRunAgentTimes: 100,
currentStep: step,
// interactiveEntryToolParams: lastInteractive?.toolParams,
body: {
messages: requestMessages,
model: getLLMModel(model),
temperature,
stream,
top_p: aiChatTopP,
subApps: subAppList
tools: subAppList
},
userKey: externalProvider.openaiAccount,
isAborted: res ? () => res.closed : undefined,
getToolInfo: getSubAppInfo,
// childrenInteractiveParams
onReasoning({ text }) {
workflowStreamResponse?.({
@ -160,9 +158,9 @@ export const stepCall = async ({
}
});
},
onToolParam({ call, params }) {
onToolParam({ tool, params }) {
workflowStreamResponse?.({
id: call.id,
id: tool.id,
event: SseResponseEventEnum.toolParams,
data: {
tool: {
@ -172,6 +170,7 @@ export const stepCall = async ({
});
},
// TODO: 对齐最新的方案
handleToolResponse: async ({ call, messages }) => {
const toolId = call.function.name;
const childWorkflowStreamResponse = getWorkflowChildResponseWrite({
@ -338,12 +337,34 @@ export const stepCall = async ({
return {
response,
assistantMessages: [], // TODO
usages
};
},
handleInteractiveTool: async ({ toolParams }) => {
return {
response: 'Interactive tool not supported',
assistantMessages: [], // TODO
usages: []
};
}
});
const answerText = assistantResponses.map((item) => item.text?.content).join('\n');
const answerText = assistantMessages
.map((item) => {
if (item.role === 'assistant' && item.content) {
if (typeof item.content === 'string') {
return item.content;
} else {
return item.content
.map((content) => (content.type === 'text' ? content.text : ''))
.join('\n');
}
}
return '';
})
.join('\n');
// Get step response summary
const { answerText: summary, usage: summaryUsage } = await getResponseSummary({
response: answerText,
model
@ -355,6 +376,6 @@ export const stepCall = async ({
return {
rawResponse: answerText,
summary,
assistantResponses
assistantMessages
};
};

View File

@ -2,7 +2,7 @@ import { getLLMModel } from '../../../../../ai/model';
import type { AgentPlanStepType } from '../sub/plan/type';
import { addLog } from '../../../../../../common/system/log';
import { createLLMResponse } from '../../../../../ai/llm/request';
import { parseToolArgs } from '../../utils';
import { parseToolArgs } from '../../../../../ai/utils';
import type { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
import { formatModelChars2Points } from '../../../../../../support/wallet/usage/utils';

View File

@ -1,5 +1,5 @@
import { createLLMResponse } from '../../../../../ai/llm/request';
import { parseToolArgs } from '../../utils';
import { parseToolArgs } from '../../../../../ai/utils';
import { addLog } from '../../../../../../common/system/log';
import { formatModelChars2Points } from '../../../../../../support/wallet/usage/utils';
import type { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';

View File

@ -16,7 +16,7 @@ import type {
InteractiveNodeResponseType,
WorkflowInteractiveResponseType
} from '@fastgpt/global/core/workflow/template/system/interactive/type';
import { parseToolArgs } from '../../../utils';
import { parseToolArgs } from '../../../../../../ai/utils';
import { PlanAgentAskTool, type AskAgentToolParamsType } from './ask/constants';
import { PlanCheckInteractive } from './constants';
import type { AgentPlanType } from './type';
@ -93,7 +93,7 @@ export const dispatchPlanAgent = async ({
tool_call_id: lastMessages.tool_calls[0].id,
content: userInput
});
// TODO: 是否合理
// TODO: 是否合理,以及模型兼容性问题
requestMessages.push({
role: 'assistant',
content: '请基于以上收集的用户信息,重新生成完整的计划,严格按照 JSON Schema 输出。'

View File

@ -0,0 +1,19 @@
import type { RuntimeNodeItemType } from '@fastgpt/global/core/workflow/runtime/type';
import type { JSONSchemaInputType } from '@fastgpt/global/core/app/jsonschema';
import type { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
export type ToolNodeItemType = RuntimeNodeItemType & {
toolParams: RuntimeNodeItemType['inputs'];
jsonSchema?: JSONSchemaInputType;
};
export type DispatchSubAppResponse = {
response: string;
usages?: ChatNodeUsageType[];
};
export type GetSubAppInfoFnType = (id: string) => {
name: string;
avatar: string;
toolDescription: string;
};

View File

@ -0,0 +1,31 @@
/*
{{@toolId@}}: @name
*/
export const parseSystemPrompt = ({
systemPrompt,
getSubAppInfo
}: {
systemPrompt?: string;
getSubAppInfo: (id: string) => {
name: string;
avatar: string;
toolDescription: string;
};
}): string => {
if (!systemPrompt) return '';
// Match pattern {{@toolId@}} and convert to @name format
const pattern = /\{\{@([^@]+)@\}\}/g;
const processedPrompt = systemPrompt.replace(pattern, (match, toolId) => {
const toolInfo = getSubAppInfo(toolId);
if (!toolInfo) {
console.warn(`Tool not found for ID: ${toolId}`);
return match; // Return original match if tool not found
}
return `@${toolInfo.name}`;
});
return processedPrompt;
};

View File

@ -1,197 +1,14 @@
import type { AgentPlanStepType } from './sub/plan/type';
import type { AgentPlanType } from './sub/plan/type';
import { getLLMModel } from '../../../../ai/model';
import { countPromptTokens } from '../../../../../common/string/tiktoken/index';
import { createLLMResponse } from '../../../../ai/llm/request';
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
import { addLog } from '../../../../../common/system/log';
import { calculateCompressionThresholds } from '../../../../ai/llm/compress/constants';
import { replaceVariable } from '@fastgpt/global/common/string/tools';
export const getMasterAgentSystemPrompt = async ({
steps,
step,
userInput,
background = '',
model
}: {
steps: AgentPlanStepType[];
step: AgentPlanStepType;
userInput: string;
background?: string;
model: string;
export const getMultiplePrompt = (obj: {
fileCount: number;
imgCount: number;
question: string;
}) => {
/**
* Depends on
* stepPrompt token 15% LLM 12%
*/
const compressStepPrompt = async (
stepPrompt: string,
model: string,
currentDescription: string
): Promise<string> => {
if (!stepPrompt) return stepPrompt;
const modelData = getLLMModel(model);
if (!modelData) return stepPrompt;
const tokenCount = await countPromptTokens(stepPrompt);
const thresholds = calculateCompressionThresholds(modelData.maxContext);
const maxTokenThreshold = thresholds.dependsOn.threshold;
if (tokenCount <= maxTokenThreshold) {
return stepPrompt;
}
const targetTokens = thresholds.dependsOn.target;
const compressionSystemPrompt = `<role>
token
</role>
<task_context>
"步骤ID → 步骤标题 → 执行结果"
</task_context>
<compression_workflow>
****
1.
2.
- []
- []
- []
3.
****
1. **** 80-100%
- ID
-
-
2. **** 40-60%
- ID
- 2-3
-
3. **** 10-20%
- ID
-
-
</compression_workflow>
<compression_principles>
-
-
-
-
-
-
</compression_principles>
<quality_check>
1.
2.
3.
4.
</quality_check>`;
const userPrompt = `请对以下工作流步骤的执行历史进行压缩,保留与当前任务最相关的信息。
****${currentDescription}
****
${stepPrompt}
****
- ${tokenCount} tokens
- ${targetTokens} tokens ${Math.round((targetTokens / tokenCount) * 100)}%
****
1. 使"# 步骤ID: [id]\\n\\t - 步骤标题: [title]\\n\\t - 执行结果: [精简后的结果]"
2.
-
-
-
3.
4.
****
-
-
-
`;
try {
const { answerText } = await createLLMResponse({
body: {
model: modelData,
messages: [
{
role: ChatCompletionRequestMessageRoleEnum.System,
content: compressionSystemPrompt
},
{
role: ChatCompletionRequestMessageRoleEnum.User,
content: userPrompt
}
],
temperature: 0.1,
stream: false
}
});
return answerText || stepPrompt;
} catch (error) {
console.error('压缩 stepPrompt 失败:', error);
// 压缩失败时返回原始内容
return stepPrompt;
}
};
let stepPrompt = steps
.filter((item) => step.depends_on && step.depends_on.includes(item.id))
.map(
(item) =>
`# 步骤ID: ${item.id}\n\t - 步骤标题: ${item.title}\n\t - 执行结果: ${item.response}`
)
.filter(Boolean)
.join('\n');
addLog.debug(`Step call depends_on (LLM): ${step.id}`, step.depends_on);
// 压缩依赖的上下文
stepPrompt = await compressStepPrompt(stepPrompt, model, step.description || step.title);
return `请根据任务背景、之前步骤的执行结果和当前步骤要求选择并调用相应的工具。如果是一个总结性质的步骤,请整合之前步骤的结果进行总结。
目标: ${userInput}
前置信息: ${background}
步骤ID: ${step.id}
步骤标题: ${step.title}
${
stepPrompt
? `【之前步骤的执行结果】
${stepPrompt}`
: ''
}
1.
2. 使
3.
4.
5.
6.
7.
`;
const prompt = `Number of session file inputs
Document{{fileCount}}
Image{{imgCount}}
------
{{question}}`;
return replaceVariable(prompt, obj);
};

View File

@ -1,20 +1,23 @@
import type { NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { NodeInputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import {
ConfirmPlanAgentText,
DispatchNodeResponseKeyEnum,
SseResponseEventEnum
} from '@fastgpt/global/core/workflow/runtime/constants';
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import type {
ChatDispatchProps,
DispatchNodeResultType,
ModuleDispatchProps,
RuntimeNodeItemType
} from '@fastgpt/global/core/workflow/runtime/type';
import { getLLMModel } from '../../../../ai/model';
import { getNodeErrResponse, getHistories } from '../../utils';
import type { AIChatItemValueItemType, ChatItemType } from '@fastgpt/global/core/chat/type';
import { filterToolNodeIdByEdges, getNodeErrResponse, getHistories } from '../../utils';
import { runToolCall } from './toolCall';
import { type DispatchToolModuleProps, type ToolNodeItemType } from './type';
import { type ChatItemType, type UserChatItemValueItemType } from '@fastgpt/global/core/chat/type';
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import { chats2GPTMessages, chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt';
import {
GPTMessages2Chats,
chatValue2RuntimePrompt,
chats2GPTMessages,
getSystemPrompt_ChatItemType,
runtimePrompt2ChatsValue
} from '@fastgpt/global/core/chat/adapt';
import { formatModelChars2Points } from '../../../../../support/wallet/usage/utils';
import { getHistoryPreview } from '@fastgpt/global/core/chat/utils';
import { replaceVariable } from '@fastgpt/global/common/string/tools';
@ -34,23 +37,19 @@ type Response = DispatchNodeResultType<{
[NodeOutputKeyEnum.answerText]: string;
}>;
export const dispatchRunAgent = async (props: DispatchAgentModuleProps): Promise<Response> => {
export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<Response> => {
let {
node: { nodeId, name, isEntry, version, inputs },
lang,
runtimeNodes,
runtimeEdges,
histories,
query,
requestOrigin,
chatConfig,
lastInteractive,
runningUserInfo,
runningAppInfo,
externalProvider,
stream,
workflowDispatchDeep,
workflowStreamResponse,
usagePush,
usageId,
params: {
model,
systemPrompt,
@ -62,111 +61,49 @@ export const dispatchRunAgent = async (props: DispatchAgentModuleProps): Promise
isResponseAnswerText = true
}
} = props;
const agentModel = getLLMModel(model);
const chatHistories = getHistories(history, histories);
const historiesMessages = chats2GPTMessages({
messages: chatHistories,
reserveId: false,
reserveTool: false
});
const planMessagesKey = `planMessages-${nodeId}`;
const replanMessagesKey = `replanMessages-${nodeId}`;
const agentPlanKey = `agentPlan-${nodeId}`;
// 交互模式进来的话,这个值才是交互输入的值
const interactiveInput = lastInteractive ? chatValue2RuntimePrompt(query).text : '';
// Get history messages
let { planHistoryMessages, replanMessages, agentPlan } = (() => {
const lastHistory = chatHistories[chatHistories.length - 1];
if (lastHistory && lastHistory.obj === ChatRoleEnum.AI) {
return {
planHistoryMessages: (lastHistory.memories?.[planMessagesKey] ||
[]) as ChatCompletionMessageParam[],
replanMessages: (lastHistory.memories?.[replanMessagesKey] ||
[]) as ChatCompletionMessageParam[],
agentPlan: (lastHistory.memories?.[agentPlanKey] || []) as AgentPlanType
};
}
return {
planHistoryMessages: undefined,
replanMessages: undefined,
agentPlan: undefined
};
})();
// Check task complexity: 第一次进入任务时候进行判断。(有 plan了说明已经开始执行任务了
const isCheckTaskComplexityStep = isPlanAgent && !agentPlan && !planHistoryMessages;
try {
// Get files
const toolModel = getLLMModel(model);
const useVision = aiChatVision && toolModel.vision;
const chatHistories = getHistories(history, histories);
props.params.aiChatVision = aiChatVision && toolModel.vision;
props.params.aiChatReasoning = aiChatReasoning && toolModel.reasoning;
const fileUrlInput = inputs.find((item) => item.key === NodeInputKeyEnum.fileUrlList);
if (!fileUrlInput || !fileUrlInput.value || fileUrlInput.value.length === 0) {
fileLinks = undefined;
}
<<<<<<< HEAD
const { filesMap, prompt: fileInputPrompt } = getFileInputPrompt({
fileUrls: fileLinks,
requestOrigin,
maxFiles: chatConfig?.fileSelectConfig?.maxFiles || 20,
histories: chatHistories
});
// Get sub apps
const { subAppList, subAppsMap, getSubAppInfo } = await useSubApps({
subApps,
lang,
filesMap
});
/* ===== AI Start ===== */
/* ===== Check task complexity ===== */
const taskIsComplexity = await (async () => {
// if (isCheckTaskComplexityStep) {
// const res = await checkTaskComplexity({
// model,
// userChatInput
// });
// if (res.usage) {
// usagePush([res.usage]);
// }
// return res.complex;
// }
// 对轮运行时候,代表都是进入复杂流程
return true;
})();
if (taskIsComplexity) {
/* ===== Plan Agent ===== */
const planCallFn = async () => {
// 点了确认。此时肯定有 agentPlans
if (
lastInteractive?.type === 'agentPlanCheck' &&
interactiveInput === ConfirmPlanAgentText &&
agentPlan
) {
planHistoryMessages = undefined;
} else {
// 临时代码
const tmpText = '正在进行规划生成...\n';
workflowStreamResponse?.({
event: SseResponseEventEnum.answer,
data: textAdaptGptResponse({
text: tmpText
})
});
<<<<<<<< HEAD:packages/service/core/workflow/dispatch/ai/tool/index.ts
const {
toolWorkflowInteractiveResponse,
toolDispatchFlowResponses, // tool flow response
=======
const toolNodeIds = filterToolNodeIdByEdges({ nodeId, edges: runtimeEdges });
const toolNodes = getToolNodesByIds({ toolNodeIds, runtimeNodes });
// Gets the module to which the tool is connected
const toolNodes = toolNodeIds
.map((nodeId) => {
const tool = runtimeNodes.find((item) => item.nodeId === nodeId);
return tool;
})
.filter(Boolean)
.map<ToolNodeItemType>((tool) => {
const toolParams: FlowNodeInputItemType[] = [];
// Raw json schema(MCP tool)
let jsonSchema: JSONSchemaInputType | undefined = undefined;
tool?.inputs.forEach((input) => {
if (input.toolDescription) {
toolParams.push(input);
}
if (input.key === NodeInputKeyEnum.toolData || input.key === 'toolData') {
const value = input.value as McpToolDataType;
jsonSchema = value.inputSchema;
}
});
return {
...(tool as RuntimeNodeItemType),
toolParams,
jsonSchema
};
});
// Check interactive entry
props.node.isEntry = false;
@ -183,6 +120,7 @@ export const dispatchRunAgent = async (props: DispatchAgentModuleProps): Promise
customPdfParse: chatConfig?.fileSelectConfig?.customPdfParse,
fileLinks,
inputFiles: globalFiles,
<<<<<<< HEAD
<<<<<<< HEAD
hasReadFilesTool,
usageId,
@ -192,6 +130,10 @@ export const dispatchRunAgent = async (props: DispatchAgentModuleProps): Promise
=======
hasReadFilesTool
>>>>>>> a48ad2abe (squash: compress all commits into one)
=======
hasReadFilesTool,
usageId
>>>>>>> daaea654e (feat: plan response in ui)
});
const concatenateSystemPrompt = [
@ -250,16 +192,11 @@ export const dispatchRunAgent = async (props: DispatchAgentModuleProps): Promise
const {
toolWorkflowInteractiveResponse,
dispatchFlowResponse, // tool flow response
>>>>>>> 757253617 (squash: compress all commits into one)
toolDispatchFlowResponses, // tool flow response
toolCallInputTokens,
toolCallOutputTokens,
completeMessages = [], // The actual message sent to AI(just save text)
assistantResponses = [], // FastGPT system store assistant.value response
<<<<<<< HEAD
=======
runTimes,
>>>>>>> 757253617 (squash: compress all commits into one)
finish_reason
} = await (async () => {
const adaptMessages = chats2GPTMessages({
@ -267,162 +204,20 @@ export const dispatchRunAgent = async (props: DispatchAgentModuleProps): Promise
reserveId: false
// reserveTool: !!toolModel.toolChoice
});
<<<<<<< HEAD
return runToolCall({
...props,
=======
const requestParams = {
>>>>>>> 757253617 (squash: compress all commits into one)
runtimeNodes,
runtimeEdges,
toolNodes,
toolModel,
messages: adaptMessages,
<<<<<<< HEAD
childrenInteractiveParams:
lastInteractive?.type === 'toolChildrenInteractive' ? lastInteractive.params : undefined
========
const { answerText, plan, completeMessages, usages, interactiveResponse } =
await dispatchPlanAgent({
historyMessages: planHistoryMessages || historiesMessages,
userInput: lastInteractive ? interactiveInput : userChatInput,
interactive: lastInteractive,
subAppList,
getSubAppInfo,
systemPrompt,
model,
temperature,
top_p: aiChatTopP,
stream,
isTopPlanAgent: workflowDispatchDeep === 1
});
const text = `${answerText}${plan ? `\n\`\`\`json\n${JSON.stringify(plan, null, 2)}\n\`\`\`` : ''}`;
workflowStreamResponse?.({
event: SseResponseEventEnum.answer,
data: textAdaptGptResponse({
text
})
});
agentPlan = plan;
usagePush(usages);
// Sub agent plan 不会有交互响应。Top agent plan 肯定会有。
if (interactiveResponse) {
return {
[DispatchNodeResponseKeyEnum.answerText]: `${tmpText}${text}`,
[DispatchNodeResponseKeyEnum.memories]: {
[planMessagesKey]: filterMemoryMessages(completeMessages),
[agentPlanKey]: agentPlan
},
[DispatchNodeResponseKeyEnum.interactive]: interactiveResponse
};
} else {
planHistoryMessages = undefined;
}
}
};
const replanCallFn = async ({ plan }: { plan: AgentPlanType }) => {
if (!agentPlan) return;
addLog.debug(`Replan step`);
// 临时代码
const tmpText = '\n # 正在重新进行规划生成...\n';
workflowStreamResponse?.({
event: SseResponseEventEnum.answer,
data: textAdaptGptResponse({
text: tmpText
})
});
const {
answerText,
plan: rePlan,
completeMessages,
usages,
interactiveResponse
} = await dispatchReplanAgent({
historyMessages: replanMessages || historiesMessages,
userInput: lastInteractive ? interactiveInput : userChatInput,
plan,
interactive: lastInteractive,
subAppList,
getSubAppInfo,
systemPrompt,
model,
temperature,
top_p: aiChatTopP,
stream,
isTopPlanAgent: workflowDispatchDeep === 1
});
if (rePlan) {
agentPlan.steps.push(...rePlan.steps);
agentPlan.replan = rePlan.replan;
}
const text = `${answerText}${agentPlan ? `\n\`\`\`json\n${JSON.stringify(agentPlan, null, 2)}\n\`\`\`\n` : ''}`;
workflowStreamResponse?.({
event: SseResponseEventEnum.answer,
data: textAdaptGptResponse({
text
})
});
usagePush(usages);
// Sub agent plan 不会有交互响应。Top agent plan 肯定会有。
if (interactiveResponse) {
return {
[DispatchNodeResponseKeyEnum.answerText]: `${tmpText}${text}`,
[DispatchNodeResponseKeyEnum.memories]: {
[replanMessagesKey]: filterMemoryMessages(completeMessages),
[agentPlanKey]: agentPlan
},
[DispatchNodeResponseKeyEnum.interactive]: interactiveResponse
};
} else {
replanMessages = undefined;
}
};
// Plan step: 需要生成 plan且还没有完整的 plan
const isPlanStep = isPlanAgent && (!agentPlan || planHistoryMessages);
// Replan step: 已有 plan且有 replan 历史消息
const isReplanStep = isPlanAgent && agentPlan && replanMessages;
// 执行 Plan/replan
if (isPlanStep) {
const result = await planCallFn();
// 有 result 代表 plan 有交互响应check/ask
if (result) return result;
} else if (isReplanStep) {
const result = await replanCallFn({
plan: agentPlan!
});
if (result) return result;
}
addLog.debug(`Start master agent`, {
agentPlan: JSON.stringify(agentPlan, null, 2)
>>>>>>>> 757253617 (squash: compress all commits into one):packages/service/core/workflow/dispatch/ai/agent/index.ts
});
<<<<<<<< HEAD:packages/service/core/workflow/dispatch/ai/tool/index.ts
// Usage computed
=======
interactiveEntryToolParams: lastInteractive?.toolParams
};
return runToolCall({
...props,
...requestParams,
maxRunToolTimes: 100
});
})();
>>>>>>> 757253617 (squash: compress all commits into one)
// Usage computed
const { totalPoints: modelTotalPoints, modelName } = formatModelChars2Points({
model,
inputTokens: toolCallInputTokens,
@ -430,29 +225,13 @@ export const dispatchRunAgent = async (props: DispatchAgentModuleProps): Promise
});
const modelUsage = externalProvider.openaiAccount?.key ? 0 : modelTotalPoints;
<<<<<<< HEAD
const toolUsages = toolDispatchFlowResponses.map((item) => item.flowUsages).flat();
const toolTotalPoints = toolUsages.reduce((sum, item) => sum + item.totalPoints, 0);
========
/* ===== Master agent, 逐步执行 plan ===== */
if (!agentPlan) return Promise.reject('没有 plan');
let assistantResponses: AIChatItemValueItemType[] = [];
>>>>>>>> 757253617 (squash: compress all commits into one):packages/service/core/workflow/dispatch/ai/agent/index.ts
while (agentPlan.steps!.filter((item) => !item.response)!.length) {
const pendingSteps = agentPlan?.steps!.filter((item) => !item.response)!;
<<<<<<<< HEAD:packages/service/core/workflow/dispatch/ai/tool/index.ts
// Preview assistant responses
=======
const toolUsages = dispatchFlowResponse.map((item) => item.flowUsages).flat();
const toolTotalPoints = toolUsages.reduce((sum, item) => sum + item.totalPoints, 0);
// concat tool usage
const totalPointsUsage = modelUsage + toolTotalPoints;
>>>>>>> 757253617 (squash: compress all commits into one)
// Preview assistant responses
const previewAssistantResponses = filterToolResponseToPreview(assistantResponses);
return {
@ -462,11 +241,11 @@ export const dispatchRunAgent = async (props: DispatchAgentModuleProps): Promise
.map((item) => item.text?.content || '')
.join('')
},
<<<<<<< HEAD
[DispatchNodeResponseKeyEnum.runTimes]: toolDispatchFlowResponses.reduce(
(sum, item) => sum + item.runTimes,
0
),
<<<<<<< HEAD
<<<<<<< HEAD
[DispatchNodeResponseKeyEnum.assistantResponses]: isResponseAnswerText
? previewAssistantResponses
@ -475,6 +254,8 @@ export const dispatchRunAgent = async (props: DispatchAgentModuleProps): Promise
=======
[DispatchNodeResponseKeyEnum.runTimes]: runTimes,
>>>>>>> 757253617 (squash: compress all commits into one)
=======
>>>>>>> daaea654e (feat: plan response in ui)
[DispatchNodeResponseKeyEnum.assistantResponses]: previewAssistantResponses,
>>>>>>> a48ad2abe (squash: compress all commits into one)
[DispatchNodeResponseKeyEnum.nodeResponse]: {
@ -490,11 +271,7 @@ export const dispatchRunAgent = async (props: DispatchAgentModuleProps): Promise
10000,
useVision
),
<<<<<<< HEAD
toolDetail: toolDispatchFlowResponses.map((item) => item.flowResponses).flat(),
=======
toolDetail: dispatchFlowResponse.map((item) => item.flowResponses).flat(),
>>>>>>> 757253617 (squash: compress all commits into one)
mergeSignId: nodeId,
finishReason: finish_reason
},
@ -506,134 +283,17 @@ export const dispatchRunAgent = async (props: DispatchAgentModuleProps): Promise
totalPoints: modelUsage,
inputTokens: toolCallInputTokens,
outputTokens: toolCallOutputTokens
<<<<<<< HEAD
========
for await (const step of pendingSteps) {
addLog.debug(`Step call: ${step.id}`, step);
workflowStreamResponse?.({
event: SseResponseEventEnum.answer,
data: textAdaptGptResponse({
text: `\n # ${step.id}: ${step.title}\n`
})
});
const result = await stepCall({
...props,
getSubAppInfo,
steps: agentPlan.steps, // 传入所有步骤,而不仅仅是未执行的步骤
subAppList,
step,
filesMap,
subAppsMap
});
step.response = result.rawResponse;
step.summary = result.summary;
assistantResponses.push(...result.assistantResponses);
}
if (agentPlan?.replan === true) {
const replanResult = await replanCallFn({
plan: agentPlan
});
if (replanResult) return replanResult;
}
}
return {
// 目前 Master 不会触发交互
// [DispatchNodeResponseKeyEnum.interactive]: interactiveResponse,
// TODO: 需要对 memoryMessages 单独建表存储
[DispatchNodeResponseKeyEnum.memories]: {
[agentPlanKey]: agentPlan,
[planMessagesKey]: undefined,
[replanMessagesKey]: undefined
>>>>>>>> 757253617 (squash: compress all commits into one):packages/service/core/workflow/dispatch/ai/agent/index.ts
},
[DispatchNodeResponseKeyEnum.assistantResponses]: assistantResponses,
[DispatchNodeResponseKeyEnum.nodeResponse]: {
// 展示的积分消耗
// totalPoints: totalPointsUsage,
// toolCallInputTokens: inputTokens,
// toolCallOutputTokens: outputTokens,
// childTotalPoints: toolTotalPoints,
// model: modelName,
query: userChatInput,
// toolDetail: dispatchFlowResponse,
mergeSignId: nodeId
}
};
}
// 简单 tool call 模式(一轮对话就结束了,不会多轮,所以不会受到连续对话的 taskIsComplexity 影响)
return Promise.reject('目前未支持简单模式');
=======
},
// 工具的消耗
...toolUsages
],
[DispatchNodeResponseKeyEnum.interactive]: toolWorkflowInteractiveResponse
};
>>>>>>> 757253617 (squash: compress all commits into one)
} catch (error) {
return getNodeErrResponse({ error });
}
};
<<<<<<< HEAD
export const useSubApps = async ({
subApps,
lang,
filesMap
}: {
subApps: FlowNodeTemplateType[];
lang?: localeType;
filesMap: Record<string, string>;
}) => {
// Get sub apps
const runtimeSubApps = await rewriteSubAppsToolset({
subApps: subApps.map<RuntimeNodeItemType>((node) => {
return {
nodeId: node.id,
name: node.name,
avatar: node.avatar,
intro: node.intro,
toolDescription: node.toolDescription,
flowNodeType: node.flowNodeType,
showStatus: node.showStatus,
isEntry: false,
inputs: node.inputs,
outputs: node.outputs,
pluginId: node.pluginId,
version: node.version,
toolConfig: node.toolConfig,
catchError: node.catchError
};
}),
lang
});
const subAppList = getSubApps({
subApps: runtimeSubApps,
addReadFileTool: Object.keys(filesMap).length > 0
});
const subAppsMap = new Map(runtimeSubApps.map((item) => [item.nodeId, item]));
const getSubAppInfo = (id: string) => {
const toolNode = subAppsMap.get(id) || systemSubInfo[id];
return {
name: toolNode?.name || '',
avatar: toolNode?.avatar || '',
toolDescription: toolNode?.toolDescription || toolNode?.name || ''
};
};
return {
subAppList,
subAppsMap,
getSubAppInfo
=======
const getMultiInput = async ({
runningUserInfo,
histories,
@ -642,6 +302,7 @@ const getMultiInput = async ({
maxFiles,
customPdfParse,
inputFiles,
<<<<<<< HEAD
<<<<<<< HEAD
hasReadFilesTool,
usageId,
@ -651,6 +312,10 @@ const getMultiInput = async ({
=======
hasReadFilesTool
>>>>>>> a48ad2abe (squash: compress all commits into one)
=======
hasReadFilesTool,
usageId
>>>>>>> daaea654e (feat: plan response in ui)
}: {
runningUserInfo: ChatDispatchProps['runningUserInfo'];
histories: ChatItemType[];
@ -660,6 +325,7 @@ const getMultiInput = async ({
customPdfParse?: boolean;
inputFiles: UserChatItemValueItemType['file'][];
hasReadFilesTool: boolean;
<<<<<<< HEAD
<<<<<<< HEAD
usageId?: string;
appId: string;
@ -667,6 +333,9 @@ const getMultiInput = async ({
uId: string;
=======
>>>>>>> a48ad2abe (squash: compress all commits into one)
=======
usageId?: string;
>>>>>>> daaea654e (feat: plan response in ui)
}) => {
// Not file quote
if (!fileLinks || hasReadFilesTool) {
@ -693,6 +362,7 @@ const getMultiInput = async ({
requestOrigin,
maxFiles,
customPdfParse,
usageId,
teamId: runningUserInfo.teamId,
tmbId: runningUserInfo.tmbId
});
@ -700,6 +370,53 @@ const getMultiInput = async ({
return {
documentQuoteText: text,
userFiles: fileLinks.map((url) => parseUrlToFileType(url)).filter(Boolean)
>>>>>>> 757253617 (squash: compress all commits into one)
};
};
/*
Tool call auth add file prompt to question
Guide the LLM to call tool.
*/
const toolCallMessagesAdapt = ({
userInput,
skip
}: {
userInput: UserChatItemValueItemType[];
skip?: boolean;
}): UserChatItemValueItemType[] => {
if (skip) return userInput;
const files = userInput.filter((item) => item.file);
if (files.length > 0) {
const filesCount = files.filter((file) => file.file?.type === 'file').length;
const imgCount = files.filter((file) => file.file?.type === 'image').length;
if (userInput.some((item) => item.text)) {
return userInput.map((item) => {
if (item.text) {
const text = item.text?.content || '';
return {
...item,
text: {
content: getMultiplePrompt({ fileCount: filesCount, imgCount, question: text })
}
};
}
return item;
});
}
// Every input is a file
return [
{
text: {
content: getMultiplePrompt({ fileCount: filesCount, imgCount, question: '' })
}
}
];
}
return userInput;
};

View File

@ -1,7 +0,0 @@
import type { DispatchSubAppResponse } from '../../type';
export const dispatchContextAgent = async (props: {}): Promise<DispatchSubAppResponse> => {
return {
response: ''
};
};

View File

@ -1,10 +0,0 @@
import type { ChatCompletionTool } from '@fastgpt/global/core/ai/type';
import { SubAppIds } from '../constants';
export const StopAgentTool: ChatCompletionTool = {
type: 'function',
function: {
name: SubAppIds.stop,
description: '如果完成了所有的任务,可调用此工具。'
}
};

View File

@ -1,6 +1,4 @@
<<<<<<< HEAD
import type { ChatCompletionTool } from '@fastgpt/global/core/ai/type';
import { responseWriteController } from '../../../../../common/response';
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import { textAdaptGptResponse } from '@fastgpt/global/core/workflow/runtime/utils';
import { runWorkflow } from '../../index';
@ -18,88 +16,6 @@ import { runAgentCall } from '../../../../ai/llm/agentCall';
export const runToolCall = async (props: DispatchToolModuleProps): Promise<RunToolResponse> => {
const { messages, toolNodes, toolModel, childrenInteractiveParams, ...workflowProps } = props;
const {
=======
import { filterGPTMessageByMaxContext } from '../../../../ai/llm/utils';
import type {
ChatCompletionToolMessageParam,
ChatCompletionMessageParam,
ChatCompletionTool
} from '@fastgpt/global/core/ai/type';
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import { textAdaptGptResponse } from '@fastgpt/global/core/workflow/runtime/utils';
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
import { runWorkflow } from '../../index';
import type { DispatchToolModuleProps, RunToolResponse, ToolNodeItemType } from './type';
import type { DispatchFlowResponse } from '../../type';
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
import type { AIChatItemType } from '@fastgpt/global/core/chat/type';
import { formatToolResponse, parseToolArgs } from '../utils';
import { initToolNodes, initToolCallEdges } from './utils';
import { computedMaxToken } from '../../../../ai/utils';
import { sliceStrStartEnd } from '@fastgpt/global/common/string/tools';
import type { WorkflowInteractiveResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
import { getErrText } from '@fastgpt/global/common/error/utils';
import { createLLMResponse } from '../../../../ai/llm/request';
import { toolValueTypeList, valueTypeJsonSchemaMap } from '@fastgpt/global/core/workflow/constants';
type ToolRunResponseType = {
toolRunResponse?: DispatchFlowResponse;
toolMsgParams: ChatCompletionToolMessageParam;
}[];
/*
Check
1.
2.
3.
-
-
1. tools
2. messages
3. Load request llm messages: system prompt, histories, human question, assistant responses, tool responses, assistant responses....)
4. LLM
-
1.
2.
3. assistants tool
4. request llm response messages tokens
5. requestllm response tool response
6. assistant responses: history assistant + tool assistant + tool child assistant
7.
-
-
-
1.
2. completeMessages tokens
1. id
2. toolCallId: 本次工具调用的 ID id
3. messagesassistants responses tool responses
*/
export const runToolCall = async (
props: DispatchToolModuleProps & {
maxRunToolTimes: number;
},
response?: RunToolResponse
): Promise<RunToolResponse> => {
const {
messages,
toolNodes,
toolModel,
maxRunToolTimes,
interactiveEntryToolParams,
...workflowProps
} = props;
let {
>>>>>>> 757253617 (squash: compress all commits into one)
res,
checkIsStopping,
requestOrigin,
@ -122,105 +38,7 @@ export const runToolCall = async (
}
} = workflowProps;
<<<<<<< HEAD
// 构建 tools 参数
=======
if (maxRunToolTimes <= 0 && response) {
return response;
}
// Interactive
if (interactiveEntryToolParams) {
initToolNodes(runtimeNodes, interactiveEntryToolParams.entryNodeIds);
initToolCallEdges(runtimeEdges, interactiveEntryToolParams.entryNodeIds);
// Run entry tool
const toolRunResponse = await runWorkflow({
...workflowProps,
usageId: undefined,
isToolCall: true
});
const stringToolResponse = formatToolResponse(toolRunResponse.toolResponses);
// Response to frontend
workflowStreamResponse?.({
event: SseResponseEventEnum.toolResponse,
data: {
tool: {
id: interactiveEntryToolParams.toolCallId,
toolName: '',
toolAvatar: '',
params: '',
response: sliceStrStartEnd(stringToolResponse, 5000, 5000)
}
}
});
// Check stop signal
const hasStopSignal = toolRunResponse.flowResponses?.some((item) => item.toolStop);
// Check interactive response(Only 1 interaction is reserved)
const workflowInteractiveResponse = toolRunResponse.workflowInteractiveResponse;
const requestMessages = [
...messages,
...interactiveEntryToolParams.memoryMessages.map((item) =>
item.role === 'tool' && item.tool_call_id === interactiveEntryToolParams.toolCallId
? {
...item,
content: stringToolResponse
}
: item
)
];
if (hasStopSignal || workflowInteractiveResponse) {
// Get interactive tool data
const toolWorkflowInteractiveResponse: WorkflowInteractiveResponseType | undefined =
workflowInteractiveResponse
? {
...workflowInteractiveResponse,
toolParams: {
entryNodeIds: workflowInteractiveResponse.entryNodeIds,
toolCallId: interactiveEntryToolParams.toolCallId,
memoryMessages: interactiveEntryToolParams.memoryMessages
}
}
: undefined;
return {
dispatchFlowResponse: [toolRunResponse],
toolCallInputTokens: 0,
toolCallOutputTokens: 0,
completeMessages: requestMessages,
assistantResponses: toolRunResponse.assistantResponses,
runTimes: toolRunResponse.runTimes,
toolWorkflowInteractiveResponse
};
}
return runToolCall(
{
...props,
interactiveEntryToolParams: undefined,
maxRunToolTimes: maxRunToolTimes - 1,
// Rewrite toolCall messages
messages: requestMessages
},
{
dispatchFlowResponse: [toolRunResponse],
toolCallInputTokens: 0,
toolCallOutputTokens: 0,
assistantResponses: toolRunResponse.assistantResponses,
runTimes: toolRunResponse.runTimes
}
);
}
// ------------------------------------------------------------
const assistantResponses = response?.assistantResponses || [];
>>>>>>> 757253617 (squash: compress all commits into one)
const toolNodesMap = new Map<string, ToolNodeItemType>();
const tools: ChatCompletionTool[] = toolNodes.map((item) => {
toolNodesMap.set(item.nodeId, item);
@ -272,7 +90,6 @@ export const runToolCall = async (
}
};
});
<<<<<<< HEAD
const getToolInfo = (name: string) => {
const toolNode = toolNodesMap.get(name);
return {
@ -281,8 +98,6 @@ export const runToolCall = async (
};
};
// SSE 响应实例
const write = res ? responseWriteController({ res, readStream: stream }) : undefined;
// 工具响应原始值
const toolRunResponses: DispatchFlowResponse[] = [];
@ -311,74 +126,12 @@ export const runToolCall = async (
requestOrigin,
retainDatasetCite,
useVision: aiChatVision
=======
const max_tokens = computedMaxToken({
model: toolModel,
maxToken,
min: 100
});
// Filter histories by maxToken
const filterMessages = (
await filterGPTMessageByMaxContext({
messages,
maxContext: toolModel.maxContext - (max_tokens || 0) // filter token. not response maxToken
})
).map((item) => {
if (item.role === 'assistant' && item.tool_calls) {
return {
...item,
tool_calls: item.tool_calls.map((tool) => ({
id: tool.id,
type: tool.type,
function: tool.function
}))
};
}
return item;
});
let {
reasoningText: reasoningContent,
answerText: answer,
toolCalls = [],
finish_reason,
usage,
getEmptyResponseTip,
assistantMessage,
completeMessages
} = await createLLMResponse({
body: {
model: toolModel.model,
stream,
messages: filterMessages,
tool_choice: 'auto',
toolCallMode: toolModel.toolChoice ? 'toolChoice' : 'prompt',
tools,
parallel_tool_calls: true,
temperature,
max_tokens,
top_p: aiChatTopP,
stop: aiChatStopSign,
response_format: {
type: aiChatResponseFormat as any,
json_schema: aiChatJsonSchema
},
retainDatasetCite,
useVision: aiChatVision,
requestOrigin
>>>>>>> 757253617 (squash: compress all commits into one)
},
isAborted: checkIsStopping,
userKey: externalProvider.openaiAccount,
onReasoning({ text }) {
if (!aiChatReasoning) return;
workflowStreamResponse?.({
<<<<<<< HEAD
write,
=======
>>>>>>> 757253617 (squash: compress all commits into one)
event: SseResponseEventEnum.answer,
data: textAdaptGptResponse({
reasoning_content: text
@ -388,10 +141,6 @@ export const runToolCall = async (
onStreaming({ text }) {
if (!isResponseAnswerText) return;
workflowStreamResponse?.({
<<<<<<< HEAD
write,
=======
>>>>>>> 757253617 (squash: compress all commits into one)
event: SseResponseEventEnum.answer,
data: textAdaptGptResponse({
text
@ -401,7 +150,6 @@ export const runToolCall = async (
onToolCall({ call }) {
if (!isResponseAnswerText) return;
const toolNode = toolNodesMap.get(call.function.name);
<<<<<<< HEAD
if (toolNode) {
workflowStreamResponse?.({
event: SseResponseEventEnum.toolCall,
@ -421,34 +169,10 @@ export const runToolCall = async (
onToolParam({ tool, params }) {
if (!isResponseAnswerText) return;
workflowStreamResponse?.({
write,
event: SseResponseEventEnum.toolParams,
data: {
tool: {
id: tool.id,
=======
if (!toolNode) return;
workflowStreamResponse?.({
event: SseResponseEventEnum.toolCall,
data: {
tool: {
id: call.id,
toolName: toolNode.name,
toolAvatar: toolNode.avatar,
functionName: call.function.name,
params: call.function.arguments ?? '',
response: ''
}
}
});
},
onToolParam({ call, params }) {
workflowStreamResponse?.({
event: SseResponseEventEnum.toolParams,
data: {
tool: {
id: call.id,
>>>>>>> 757253617 (squash: compress all commits into one)
toolName: '',
toolAvatar: '',
params,
@ -456,7 +180,6 @@ export const runToolCall = async (
}
}
});
<<<<<<< HEAD
},
handleToolResponse: async ({ call, messages }) => {
const toolNode = toolNodesMap.get(call.function?.name);
@ -589,213 +312,4 @@ export const runToolCall = async (
finish_reason,
toolWorkflowInteractiveResponse: interactiveResponse
};
=======
}
});
if (!answer && !reasoningContent && !toolCalls.length) {
return Promise.reject(getEmptyResponseTip());
}
/* Run the selected tool by LLM.
Since only reference parameters are passed, if the same tool is run in parallel, it will get the same run parameters
*/
const toolsRunResponse: ToolRunResponseType = [];
for await (const tool of toolCalls) {
try {
const toolNode = toolNodesMap.get(tool.function?.name);
if (!toolNode) continue;
const startParams = parseToolArgs(tool.function.arguments);
initToolNodes(runtimeNodes, [toolNode.nodeId], startParams);
const toolRunResponse = await runWorkflow({
...workflowProps,
usageId: undefined,
isToolCall: true
});
const stringToolResponse = formatToolResponse(toolRunResponse.toolResponses);
const toolMsgParams: ChatCompletionToolMessageParam = {
tool_call_id: tool.id,
role: ChatCompletionRequestMessageRoleEnum.Tool,
content: stringToolResponse
};
workflowStreamResponse?.({
event: SseResponseEventEnum.toolResponse,
data: {
tool: {
id: tool.id,
toolName: '',
toolAvatar: '',
params: '',
response: sliceStrStartEnd(stringToolResponse, 5000, 5000)
}
}
});
toolsRunResponse.push({
toolRunResponse,
toolMsgParams
});
} catch (error) {
const err = getErrText(error);
workflowStreamResponse?.({
event: SseResponseEventEnum.toolResponse,
data: {
tool: {
id: tool.id,
toolName: '',
toolAvatar: '',
params: '',
response: sliceStrStartEnd(err, 5000, 5000)
}
}
});
toolsRunResponse.push({
toolRunResponse: undefined,
toolMsgParams: {
tool_call_id: tool.id,
role: ChatCompletionRequestMessageRoleEnum.Tool,
content: sliceStrStartEnd(err, 5000, 5000)
}
});
}
}
const flatToolsResponseData = toolsRunResponse
.map((item) => item.toolRunResponse)
.flat()
.filter(Boolean) as DispatchFlowResponse[];
// concat tool responses
const dispatchFlowResponse = response
? response.dispatchFlowResponse.concat(flatToolsResponseData)
: flatToolsResponseData;
const inputTokens = response
? response.toolCallInputTokens + usage.inputTokens
: usage.inputTokens;
const outputTokens = response
? response.toolCallOutputTokens + usage.outputTokens
: usage.outputTokens;
if (toolCalls.length > 0) {
/*
...
user
assistant: tool data
tool: tool response
*/
const nextRequestMessages: ChatCompletionMessageParam[] = [
...completeMessages,
...toolsRunResponse.map((item) => item?.toolMsgParams)
];
/*
Get tool node assistant response
- history assistant
- current tool assistant
- tool child assistant
*/
const toolNodeAssistant = GPTMessages2Chats({
messages: [...assistantMessage, ...toolsRunResponse.map((item) => item?.toolMsgParams)],
getToolInfo: (id) => {
const toolNode = toolNodesMap.get(id);
return {
name: toolNode?.name || '',
avatar: toolNode?.avatar || ''
};
}
})[0] as AIChatItemType;
const toolChildAssistants = flatToolsResponseData
.map((item) => item.assistantResponses)
.flat()
.filter((item) => !item.interactive); // 交互节点留着下次记录
const concatAssistantResponses = [
...assistantResponses,
...toolNodeAssistant.value,
...toolChildAssistants
];
const runTimes =
(response?.runTimes || 0) +
flatToolsResponseData.reduce((sum, item) => sum + item.runTimes, 0);
// Check stop signal
const hasStopSignal = flatToolsResponseData.some(
(item) => !!item.flowResponses?.find((item) => item.toolStop)
);
// Check interactive response(Only 1 interaction is reserved)
const workflowInteractiveResponseItem = toolsRunResponse.find(
(item) => item.toolRunResponse?.workflowInteractiveResponse
);
if (hasStopSignal || workflowInteractiveResponseItem) {
// Get interactive tool data
const workflowInteractiveResponse =
workflowInteractiveResponseItem?.toolRunResponse?.workflowInteractiveResponse;
// Flashback traverses completeMessages, intercepting messages that know the first user
const firstUserIndex = nextRequestMessages.findLastIndex((item) => item.role === 'user');
const newMessages = nextRequestMessages.slice(firstUserIndex + 1);
const toolWorkflowInteractiveResponse: WorkflowInteractiveResponseType | undefined =
workflowInteractiveResponse
? {
...workflowInteractiveResponse,
toolParams: {
entryNodeIds: workflowInteractiveResponse.entryNodeIds,
toolCallId: workflowInteractiveResponseItem?.toolMsgParams.tool_call_id,
memoryMessages: newMessages
}
}
: undefined;
return {
dispatchFlowResponse,
toolCallInputTokens: inputTokens,
toolCallOutputTokens: outputTokens,
completeMessages: nextRequestMessages,
assistantResponses: concatAssistantResponses,
toolWorkflowInteractiveResponse,
runTimes,
finish_reason
};
}
return runToolCall(
{
...props,
maxRunToolTimes: maxRunToolTimes - 1,
messages: nextRequestMessages
},
{
dispatchFlowResponse,
toolCallInputTokens: inputTokens,
toolCallOutputTokens: outputTokens,
assistantResponses: concatAssistantResponses,
runTimes,
finish_reason
}
);
} else {
// concat tool assistant
const toolNodeAssistant = GPTMessages2Chats({
messages: assistantMessage
})[0] as AIChatItemType;
return {
dispatchFlowResponse: response?.dispatchFlowResponse || [],
toolCallInputTokens: inputTokens,
toolCallOutputTokens: outputTokens,
completeMessages,
assistantResponses: [...assistantResponses, ...toolNodeAssistant.value],
runTimes: (response?.runTimes || 0) + 1,
finish_reason
};
}
>>>>>>> 757253617 (squash: compress all commits into one)
};

View File

@ -1,60 +0,0 @@
import type {
ChatCompletionMessageParam,
CompletionFinishReason
} from '@fastgpt/global/core/ai/type';
import type { NodeInputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import type {
ModuleDispatchProps,
DispatchNodeResponseType
} from '@fastgpt/global/core/workflow/runtime/type';
import type { RuntimeNodeItemType } from '@fastgpt/global/core/workflow/runtime/type';
import { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
import type { DispatchFlowResponse } from '../../type';
import type { AIChatItemValueItemType } from '@fastgpt/global/core/chat/type';
import { ChatItemValueItemType } from '@fastgpt/global/core/chat/type';
import type { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import type {
ToolCallChildrenInteractive,
InteractiveNodeResponseType,
WorkflowInteractiveResponseType
} from '@fastgpt/global/core/workflow/template/system/interactive/type';
import type { LLMModelItemType } from '@fastgpt/global/core/ai/model';
import type { JSONSchemaInputType } from '@fastgpt/global/core/app/jsonschema';
export type DispatchToolModuleProps = ModuleDispatchProps<{
[NodeInputKeyEnum.history]?: ChatItemType[];
[NodeInputKeyEnum.userChatInput]: string;
[NodeInputKeyEnum.fileUrlList]?: string[];
[NodeInputKeyEnum.aiModel]: string;
[NodeInputKeyEnum.aiSystemPrompt]: string;
[NodeInputKeyEnum.aiChatTemperature]: number;
[NodeInputKeyEnum.aiChatMaxToken]: number;
[NodeInputKeyEnum.aiChatIsResponseText]: boolean;
[NodeInputKeyEnum.aiChatVision]?: boolean;
[NodeInputKeyEnum.aiChatReasoning]?: boolean;
[NodeInputKeyEnum.aiChatTopP]?: number;
[NodeInputKeyEnum.aiChatStopSign]?: string;
[NodeInputKeyEnum.aiChatResponseFormat]?: string;
[NodeInputKeyEnum.aiChatJsonSchema]?: string;
}> & {
messages: ChatCompletionMessageParam[];
toolNodes: ToolNodeItemType[];
toolModel: LLMModelItemType;
childrenInteractiveParams?: ToolCallChildrenInteractive['params'];
};
export type RunToolResponse = {
toolDispatchFlowResponses: DispatchFlowResponse[];
toolCallInputTokens: number;
toolCallOutputTokens: number;
completeMessages: ChatCompletionMessageParam[];
assistantResponses: AIChatItemValueItemType[];
finish_reason: CompletionFinishReason;
toolWorkflowInteractiveResponse?: ToolCallChildrenInteractive;
};
export type ToolNodeItemType = RuntimeNodeItemType & {
toolParams: RuntimeNodeItemType['inputs'];
jsonSchema?: JSONSchemaInputType;
};

View File

@ -1,19 +1,48 @@
import type {
ChatCompletionMessageParam,
CompletionFinishReason
} from '@fastgpt/global/core/ai/type';
import type { NodeInputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type';
import type { RuntimeNodeItemType } from '@fastgpt/global/core/workflow/runtime/type';
import type { DispatchFlowResponse } from '../../type';
import type { AIChatItemValueItemType, ChatItemType } from '@fastgpt/global/core/chat/type';
import type { ToolCallChildrenInteractive } from '@fastgpt/global/core/workflow/template/system/interactive/type';
import type { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
import type { JSONSchemaInputType } from '@fastgpt/global/core/app/jsonschema';
import type { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
export type DispatchToolModuleProps = ModuleDispatchProps<{
[NodeInputKeyEnum.history]?: ChatItemType[];
[NodeInputKeyEnum.userChatInput]: string;
[NodeInputKeyEnum.fileUrlList]?: string[];
[NodeInputKeyEnum.aiModel]: string;
[NodeInputKeyEnum.aiSystemPrompt]: string;
[NodeInputKeyEnum.aiChatTemperature]: number;
[NodeInputKeyEnum.aiChatMaxToken]: number;
[NodeInputKeyEnum.aiChatVision]?: boolean;
[NodeInputKeyEnum.aiChatReasoning]?: boolean;
[NodeInputKeyEnum.aiChatTopP]?: number;
[NodeInputKeyEnum.aiChatStopSign]?: string;
[NodeInputKeyEnum.aiChatResponseFormat]?: string;
[NodeInputKeyEnum.aiChatJsonSchema]?: string;
}> & {
messages: ChatCompletionMessageParam[];
toolNodes: ToolNodeItemType[];
toolModel: LLMModelItemType;
childrenInteractiveParams?: ToolCallChildrenInteractive['params'];
};
export type RunToolResponse = {
toolDispatchFlowResponses: DispatchFlowResponse[];
toolCallInputTokens: number;
toolCallOutputTokens: number;
completeMessages: ChatCompletionMessageParam[];
assistantResponses: AIChatItemValueItemType[];
finish_reason: CompletionFinishReason;
toolWorkflowInteractiveResponse?: ToolCallChildrenInteractive;
};
export type ToolNodeItemType = RuntimeNodeItemType & {
toolParams: RuntimeNodeItemType['inputs'];
jsonSchema?: JSONSchemaInputType;
};
export type DispatchSubAppResponse = {
response: string;
usages?: ChatNodeUsageType[];
};
export type GetSubAppInfoFnType = (id: string) => {
name: string;
avatar: string;
toolDescription: string;
};

View File

@ -1,34 +1,41 @@
<<<<<<< HEAD
/*
{{@toolId@}}: @name
*/
export const parseSystemPrompt = ({
systemPrompt,
getSubAppInfo
import { sliceStrStartEnd } from '@fastgpt/global/common/string/tools';
import { type AIChatItemValueItemType } from '@fastgpt/global/core/chat/type';
import { type FlowNodeInputItemType } from '@fastgpt/global/core/workflow/type/io';
import { type RuntimeEdgeItemType } from '@fastgpt/global/core/workflow/type/edge';
import { type RuntimeNodeItemType } from '@fastgpt/global/core/workflow/runtime/type';
export const updateToolInputValue = ({
params,
inputs
}: {
systemPrompt?: string;
getSubAppInfo: (id: string) => {
name: string;
avatar: string;
toolDescription: string;
};
}): string => {
if (!systemPrompt) return '';
params: Record<string, any>;
inputs: FlowNodeInputItemType[];
}) => {
return inputs.map((input) => ({
...input,
value: params[input.key] ?? input.value
}));
};
// Match pattern {{@toolId@}} and convert to @name format
const pattern = /\{\{@([^@]+)@\}\}/g;
const processedPrompt = systemPrompt.replace(pattern, (match, toolId) => {
const toolInfo = getSubAppInfo(toolId);
if (!toolInfo) {
console.warn(`Tool not found for ID: ${toolId}`);
return match; // Return original match if tool not found
export const filterToolResponseToPreview = (response: AIChatItemValueItemType[]) => {
return response.map((item) => {
if (item.tools) {
const formatTools = item.tools?.map((tool) => {
return {
...tool,
response: sliceStrStartEnd(tool.response, 500, 500)
};
});
return {
...item,
tools: formatTools
};
}
return `@${toolInfo.name}`;
return item;
});
};
<<<<<<<< HEAD:packages/service/core/workflow/dispatch/ai/tool/utils.ts
export const formatToolResponse = (toolResponses: any) => {
if (typeof toolResponses === 'object') {
return JSON.stringify(toolResponses, null, 2);
@ -37,42 +44,6 @@ export const formatToolResponse = (toolResponses: any) => {
return toolResponses ? String(toolResponses) : 'none';
};
=======
import type { RuntimeNodeItemType } from '@fastgpt/global/core/workflow/runtime/type';
import type { RuntimeEdgeItemType } from '@fastgpt/global/core/workflow/type/edge';
import type { FlowNodeInputItemType } from '@fastgpt/global/core/workflow/type/io';
export const initToolNodes = (
nodes: RuntimeNodeItemType[],
entryNodeIds: string[],
startParams?: Record<string, any>
) => {
const updateToolInputValue = ({
params,
inputs
}: {
params: Record<string, any>;
inputs: FlowNodeInputItemType[];
}) => {
return inputs.map((input) => ({
...input,
value: params[input.key] ?? input.value
}));
};
nodes.forEach((node) => {
if (entryNodeIds.includes(node.nodeId)) {
node.isEntry = true;
node.isStart = true;
if (startParams) {
node.inputs = updateToolInputValue({ params: startParams, inputs: node.inputs });
}
} else {
node.isStart = false;
}
});
};
>>>>>>> 757253617 (squash: compress all commits into one)
// 在原参上改变值不修改原对象tool workflow 中,使用的还是原对象
export const initToolCallEdges = (edges: RuntimeEdgeItemType[], entryNodeIds: string[]) => {
edges.forEach((edge) => {
@ -81,7 +52,6 @@ export const initToolCallEdges = (edges: RuntimeEdgeItemType[], entryNodeIds: st
}
});
};
<<<<<<< HEAD
export const initToolNodes = (
nodes: RuntimeNodeItemType[],
@ -96,9 +66,4 @@ export const initToolNodes = (
}
}
});
========
return processedPrompt;
>>>>>>>> 757253617 (squash: compress all commits into one):packages/service/core/workflow/dispatch/ai/agent/utils.ts
};
=======
>>>>>>> 757253617 (squash: compress all commits into one)

View File

@ -139,11 +139,3 @@ export const getToolNodesByIds = ({
};
});
};
export const parseToolArgs = <T = Record<string, any>>(toolArgs: string) => {
try {
return json5.parse(sliceJsonStr(toolArgs)) as T;
} catch {
return;
}
};

View File

@ -29,8 +29,8 @@
"config_input_guide_lexicon": "Set Up Lexicon",
"config_input_guide_lexicon_title": "Set Up Lexicon",
"confirm_clear_input_value": "Are you sure to clear the form content? \nDefault values will be restored!",
"confirm_plan": "Confirm plan",
"confirm_to_clear_share_chat_history": "Are you sure you want to clear all chat history?",
"confirm_plan_agent": "Please confirm whether the change plan meets expectations. If you need to modify it, you can send the modification requirements in the input box at the bottom.",
"content_empty": "No Content",
"contextual": "{{num}} Contexts",
"contextual_preview": "Contextual Preview {{num}} Items",
@ -81,6 +81,7 @@
"not_query": "Missing query content",
"not_select_file": "No file selected",
"plan_agent": "Plan agent",
"plan_check_tip": "Please confirm your plan or enter new requirements in the input box",
"plugins_output": "Plugin Output",
"press_to_speak": "Hold down to speak",
"query_extension_IO_tokens": "Problem Optimization Input/Output Tokens",

View File

@ -29,8 +29,8 @@
"config_input_guide_lexicon": "配置词库",
"config_input_guide_lexicon_title": "配置词库",
"confirm_clear_input_value": "确认清空表单内容?将会恢复默认值!",
"confirm_plan": "确认计划",
"confirm_to_clear_share_chat_history": "确认清空所有聊天记录?",
"confirm_plan_agent": "请确认改计划是否符合预期,如需修改,可在底部输入框中发送修改要求。",
"content_empty": "内容为空",
"contextual": "{{num}}条上下文",
"contextual_preview": "上下文预览 {{num}} 条",
@ -81,6 +81,7 @@
"not_query": "缺少查询内容",
"not_select_file": "未选择文件",
"plan_agent": "任务规划",
"plan_check_tip": "请确认计划或在输入框中输入新要求",
"plugins_output": "插件输出",
"press_to_speak": "按住说话",
"query_extension_IO_tokens": "问题优化输入/输出 Tokens",

View File

@ -29,8 +29,8 @@
"config_input_guide_lexicon": "設定詞彙庫",
"config_input_guide_lexicon_title": "設定詞彙庫",
"confirm_clear_input_value": "確認清空表單內容?\n將會恢復默認值",
"confirm_plan": "確認計劃",
"confirm_to_clear_share_chat_history": "確認清空所有聊天記錄?",
"confirm_plan_agent": "請確認改計劃是否符合預期,如需修改,可在底部輸入框中發送修改要求。",
"content_empty": "無內容",
"contextual": "{{num}} 筆上下文",
"contextual_preview": "上下文預覽 {{num}} 筆",
@ -81,6 +81,7 @@
"not_query": "缺少查詢內容",
"not_select_file": "尚未選取檔案",
"plan_agent": "任務規劃",
"plan_check_tip": "請確認計劃或在輸入框中輸入新要求",
"plugins_output": "外掛程式輸出",
"press_to_speak": "按住說話",
"query_extension_IO_tokens": "問題最佳化輸入/輸出 Tokens",

View File

@ -31,6 +31,8 @@ import { addStatisticalDataToHistoryItem } from '@/global/core/chat/utils';
import dynamic from 'next/dynamic';
import { useMemoizedFn } from 'ahooks';
import ChatBoxDivider from '../../../Divider';
import { eventBus, EventNameEnum } from '@/web/common/utils/eventbus';
import { ConfirmPlanAgentText } from '@fastgpt/global/core/workflow/runtime/constants';
import { useMemoEnhance } from '@fastgpt/web/hooks/useMemoEnhance';
const ResponseTags = dynamic(() => import('./ResponseTags'));
@ -58,6 +60,7 @@ type Props = {
};
questionGuides?: string[];
children?: React.ReactNode;
hasPlanCheck?: boolean;
} & ChatControllerProps;
const RenderQuestionGuide = ({ questionGuides }: { questionGuides: string[] }) => {
@ -122,7 +125,7 @@ const AIContentCard = React.memo(function AIContentCard({
);
});
const ChatItem = (props: Props) => {
const ChatItem = ({ hasPlanCheck, ...props }: Props) => {
const { avatar, statusBoxData, children, isLastChild, questionGuides = [], chat } = props;
const { t } = useTranslation();
@ -163,7 +166,7 @@ const ChatItem = (props: Props) => {
const outLinkAuthData = useContextSelector(WorkflowRuntimeContext, (v) => v.outLinkAuthData);
const isShowReadRawSource = useContextSelector(ChatItemContext, (v) => v.isShowReadRawSource);
const { totalQuoteList: quoteList = [] } = useMemo(
const { totalQuoteList: quoteList = [] } = useMemoEnhance(
() => addStatisticalDataToHistoryItem(chat),
[chat]
);
@ -172,7 +175,7 @@ const ChatItem = (props: Props) => {
const { copyData } = useCopyData();
const chatStatusMap = useMemo(() => {
const chatStatusMap = useMemoEnhance(() => {
if (!statusBoxData?.status) return;
return colorMap[statusBoxData.status];
}, [statusBoxData?.status]);
@ -181,8 +184,12 @@ const ChatItem = (props: Props) => {
1. The interactive node is divided into n dialog boxes.
2. Auto-complete the last textnode
*/
const splitAiResponseResults = useMemo(() => {
if (chat.obj === ChatRoleEnum.Human) return [chat.value];
const { responses: splitAiResponseResults } = useMemo(() => {
if (chat.obj === ChatRoleEnum.Human) {
return {
responses: [chat.value]
};
}
if (chat.obj === ChatRoleEnum.AI) {
// Remove empty text node
@ -200,7 +207,11 @@ const ChatItem = (props: Props) => {
let currentGroup: AIChatItemValueItemType[] = [];
filterList.forEach((value) => {
// 每次遇到交互节点,则推送一个全新的分组
if (value.interactive) {
if (value.interactive.type === 'agentPlanCheck') {
return;
}
if (currentGroup.length > 0) {
groupedValues.push(currentGroup);
currentGroup = [];
@ -235,10 +246,14 @@ const ChatItem = (props: Props) => {
}
}
return groupedValues;
return {
responses: groupedValues
};
}
return [];
return {
responses: []
};
}, [chat.obj, chat.value, isChatting]);
const setCiteModalData = useContextSelector(ChatItemContext, (v) => v.setCiteModalData);
@ -283,8 +298,6 @@ const ChatItem = (props: Props) => {
}
);
const aiSubApps = 'subApps' in chat ? chat.subApps : undefined;
return (
<Box
data-chat-id={chat.dataId}
@ -463,6 +476,23 @@ const ChatItem = (props: Props) => {
</Box>
);
})}
{hasPlanCheck && isLastChild && (
<Flex mt={3}>
<Button
leftIcon={<MyIcon name={'common/check'} w={'16px'} />}
variant={'primaryOutline'}
onClick={() => {
eventBus.emit(EventNameEnum.sendQuestion, {
text: ConfirmPlanAgentText,
focus: true
});
}}
>
{t('chat:confirm_plan')}
</Button>
</Flex>
)}
</Box>
);
};

View File

@ -14,7 +14,7 @@ import type {
} from '@fastgpt/global/core/chat/type.d';
import { useToast } from '@fastgpt/web/hooks/useToast';
import { getErrText } from '@fastgpt/global/common/error/utils';
import { Box, Checkbox, Flex } from '@chakra-ui/react';
import { Box, Button, Checkbox, Flex } from '@chakra-ui/react';
import { EventNameEnum, eventBus } from '@/web/common/utils/eventbus';
import { chats2GPTMessages } from '@fastgpt/global/core/chat/adapt';
import { useForm } from 'react-hook-form';
@ -249,6 +249,7 @@ const ChatBox = ({
tool,
subAppId,
interactive,
agentPlan,
variables,
nodeResponse,
durationSeconds,
@ -488,6 +489,14 @@ const ChatBox = ({
value: item.value.concat(val)
};
}
if (event === SseResponseEventEnum.agentPlan && agentPlan) {
return {
...item,
value: item.value.concat({
agentPlan
})
};
}
if (event === SseResponseEventEnum.workflowDuration && durationSeconds) {
return {
...item,
@ -1134,6 +1143,8 @@ const ChatBox = ({
}, [chatType, chatRecords.length, chatStartedWatch]);
//chat history
const hasPlanCheck =
lastInteractive?.type === 'agentPlanCheck' && !lastInteractive.params.confirmed;
const RecordsBox = useMemo(() => {
return (
<Box id={'history'}>
@ -1166,6 +1177,7 @@ const ChatBox = ({
avatar={appAvatar}
chat={item}
isLastChild={index === chatRecords.length - 1}
hasPlanCheck={hasPlanCheck}
{...{
showVoiceIcon,
statusBoxData,
@ -1236,7 +1248,8 @@ const ChatBox = ({
t,
showMarkIcon,
itemRefs,
onCloseCustomFeedback
onCloseCustomFeedback,
hasPlanCheck
]);
// Child box

View File

@ -8,6 +8,7 @@ import type {
import { ChatSiteItemType } from '@fastgpt/global/core/chat/type';
import type { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import type { WorkflowInteractiveResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
import type { AgentPlanType } from '@fastgpt/service/core/workflow/dispatch/ai/agent/sub/plan/type';
export type generatingMessageProps = {
event: SseResponseEventEnum;
@ -19,6 +20,7 @@ export type generatingMessageProps = {
status?: 'running' | 'finish';
tool?: ToolModuleResponseItemType;
interactive?: WorkflowInteractiveResponseType;
agentPlan?: AgentPlanType;
variables?: Record<string, any>;
nodeResponse?: ChatHistoryItemResType;
durationSeconds?: number;

View File

@ -26,18 +26,15 @@ import type {
import { isEqual } from 'lodash';
import { useTranslation } from 'next-i18next';
import { eventBus, EventNameEnum } from '@/web/common/utils/eventbus';
import {
SelectOptionsComponent,
FormInputComponent,
AgentPlanCheckComponent
} from './Interactive/InteractiveComponents';
import { SelectOptionsComponent, FormInputComponent } from './Interactive/InteractiveComponents';
import { extractDeepestInteractive } from '@fastgpt/global/core/workflow/runtime/utils';
import { useContextSelector } from 'use-context-selector';
import { type OnOpenCiteModalProps } from '@/web/core/chat/context/chatItemContext';
import { WorkflowRuntimeContext } from '../ChatContainer/context/workflowRuntimeContext';
import { useCreation } from 'ahooks';
import { useSafeTranslation } from '@fastgpt/web/hooks/useSafeTranslation';
import { ConfirmPlanAgentText } from '@fastgpt/global/core/workflow/runtime/constants';
import type { AgentPlanType } from '@fastgpt/service/core/workflow/dispatch/ai/agent/sub/plan/type';
import MyDivider from '@fastgpt/web/components/common/MyDivider';
const accordionButtonStyle = {
w: 'auto',
@ -334,6 +331,33 @@ const RenderPaymentPauseInteractive = React.memo(function RenderPaymentPauseInte
);
});
const RenderAgentPlan = React.memo(function RenderAgentPlan({
agentPlan
}: {
agentPlan: AgentPlanType;
}) {
const { t } = useTranslation();
return (
<Box>
<Box fontSize={'xl'} color={'myGray.900'} fontWeight={'bold'}>
{agentPlan.task}
</Box>
<Box>
{agentPlan.steps.map((step, index) => (
<Box key={step.id} mt={3}>
<Box fontSize={'lg'} fontWeight={'bold'}>
{`${index + 1}. ${step.title}`}
</Box>
<Box>{step.description}</Box>
</Box>
))}
</Box>
<MyDivider />
<Box>{t('chat:plan_check_tip')}</Box>
</Box>
);
});
const AIResponseBox = ({
chatItemDataId,
value,
@ -391,14 +415,7 @@ const AIResponseBox = ({
);
}
if (interactive.type === 'agentPlanCheck') {
return (
<AgentPlanCheckComponent
interactiveParams={interactive.params}
onConfirm={() => {
onSendPrompt(ConfirmPlanAgentText);
}}
/>
);
return null;
}
if (interactive.type === 'agentPlanAskQuery') {
return <Box>{interactive.params.content}</Box>;
@ -407,6 +424,9 @@ const AIResponseBox = ({
return <RenderPaymentPauseInteractive interactive={interactive} />;
}
}
if ('agentPlan' in value && value.agentPlan) {
return <RenderAgentPlan agentPlan={value.agentPlan} />;
}
// Abandon
if ('tools' in value && value.tools) {

View File

@ -247,23 +247,3 @@ export const FormInputComponent = React.memo(function FormInputComponent({
</Box>
);
});
// Agent interactive
export const AgentPlanCheckComponent = React.memo(function AgentPlanCheckComponent({
interactiveParams,
onConfirm
}: {
interactiveParams: AgentPlanCheckInteractive['params'];
onConfirm: () => void;
}) {
const { t } = useTranslation();
return interactiveParams?.confirmed ? (
// TODO临时 UI
<Box></Box>
) : (
<Box>
<Box>{t('chat:confirm_plan_agent')}</Box>
<Button onClick={onConfirm}>{t('common:Confirm')}</Button>
</Box>
);
});

View File

@ -87,7 +87,7 @@ const AppListContextProvider = ({ children }: { children: ReactNode }) => {
// agent page
if (router.pathname.includes('/agent')) {
return !type || type === 'all'
? [AppTypeEnum.folder, AppTypeEnum.simple, AppTypeEnum.workflow]
? [AppTypeEnum.folder, AppTypeEnum.simple, AppTypeEnum.workflow, AppTypeEnum.agent]
: [AppTypeEnum.folder, type];
}

View File

@ -2,6 +2,10 @@ import { AppTypeEnum } from '@fastgpt/global/core/app/constants';
import { i18nT } from '@fastgpt/web/i18n/utils';
export const appTypeTagMap = {
[AppTypeEnum.agent]: {
label: 'Agent',
icon: 'core/app/type/mcpTools'
},
[AppTypeEnum.simple]: {
label: i18nT('app:type.Chat_Agent'),
icon: 'core/app/type/simple'
@ -28,6 +32,5 @@ export const appTypeTagMap = {
},
[AppTypeEnum.tool]: undefined,
[AppTypeEnum.folder]: undefined,
[AppTypeEnum.hidden]: undefined,
[AppTypeEnum.agent]: undefined
[AppTypeEnum.hidden]: undefined
};

View File

@ -12,6 +12,7 @@ import { formatTime2YMDHMW } from '@fastgpt/global/common/string/time';
import { getWebReqUrl } from '@fastgpt/web/common/system/utils';
import type { OnOptimizePromptProps } from '@/components/common/PromptEditor/OptimizerPopover';
import type { OnOptimizeCodeProps } from '@/pageComponents/app/detail/WorkflowComponents/Flow/nodes/NodeCode/Copilot';
import type { AgentPlanType } from '@fastgpt/service/core/workflow/dispatch/ai/agent/sub/plan/type';
type StreamFetchProps = {
url?: string;
@ -36,6 +37,12 @@ type ResponseQueueItemType =
event: SseResponseEventEnum.interactive;
[key: string]: any;
}
| {
responseValueId?: string;
subAppId?: string;
event: SseResponseEventEnum.agentPlan;
agentPlan: AgentPlanType;
}
| {
responseValueId?: string;
subAppId?: string;
@ -256,6 +263,13 @@ export const streamFetch = ({
event,
...rest
});
} else if (event === SseResponseEventEnum.agentPlan) {
pushDataToQueue({
responseValueId,
subAppId,
event,
agentPlan: rest.agentPlan
});
} else if (event === SseResponseEventEnum.error) {
if (rest.statusText === TeamErrEnum.aiPointsNotEnough) {
useSystemStore.getState().setNotSufficientModalType(TeamErrEnum.aiPointsNotEnough);