perf: top agent code

This commit is contained in:
archer 2025-12-08 15:34:01 +08:00
parent 3fd837c221
commit 4ff4b6e9e7
No known key found for this signature in database
GPG Key ID: 4446499B846D4A9E
29 changed files with 283 additions and 260 deletions

View File

@ -1243,7 +1243,7 @@ async function dispatchToolCall(params: {
if (toolId === SubAppIds.model) {
// 调用 Model Agent
const toolParams = parseToolArgs<{
const toolParams = parseJsonArgs<{
systemPrompt: string;
task: string;
}>(params.call.function.arguments);
@ -1258,7 +1258,7 @@ async function dispatchToolCall(params: {
if (toolId === SubAppIds.fileRead) {
// 调用文件读取工具
const toolParams = parseToolArgs<{
const toolParams = parseJsonArgs<{
file_indexes: string[];
}>(params.call.function.arguments);
@ -1285,7 +1285,7 @@ async function dispatchToolCall(params: {
};
}
const toolCallParams = parseToolArgs(params.call.function.arguments);
const toolCallParams = parseJsonArgs(params.call.function.arguments);
if (!toolCallParams) {
return {
response: 'params is not object',
@ -1363,7 +1363,7 @@ LLM 可能生成不符合 JSON Schema 的 Plan或者生成的 Plan 结构不
} {
try {
// 尝试解析为 JSON
const parsed = parseToolArgs<AgentPlanType>(answerText);
const parsed = parseJsonArgs<AgentPlanType>(answerText);
// 验证必要字段
if (parsed && parsed.task && Array.isArray(parsed.steps) && parsed.steps.length > 0) {

View File

@ -1,8 +1,11 @@
export type I18nStringType = {
'zh-CN'?: string;
'zh-Hant'?: string;
en: string;
};
import z from 'zod';
export const I18nStringSchema = z.object({
en: z.string(),
'zh-CN': z.string().optional(),
'zh-Hant': z.string().optional()
});
export type I18nStringType = z.infer<typeof I18nStringSchema>;
export enum LangEnum {
'zh_CN' = 'zh-CN',

View File

@ -1,20 +1,13 @@
import { ChatCompletionRequestMessageRoleEnum } from '../../ai/constants';
import type {
ChatCompletionContentPart,
ChatCompletionFunctionMessageParam,
ChatCompletionMessageFunctionCall,
ChatCompletionMessageParam,
ChatCompletionMessageToolCall,
ChatCompletionToolMessageParam
} from '../../ai/type';
import { ChatFileTypeEnum, ChatRoleEnum } from '../constants';
import type { HelperBotChatItemType } from './type';
import { GPT2Chat, simpleUserContentPart } from '../adapt';
import type {
AIChatItemValueItemType,
SystemChatItemValueItemType,
UserChatItemValueItemType
} from '../type';
import { simpleUserContentPart } from '../adapt';
export const helperChats2GPTMessages = ({
messages,

View File

@ -1,13 +1,7 @@
import { ObjectIdSchema } from '../../../common/type/mongo';
import { z } from 'zod';
import { ChatRoleEnum } from '../constants';
import {
UserChatItemSchema,
SystemChatItemSchema,
type ChatItemObjItemType,
type ChatItemValueItemType,
ToolModuleResponseItemSchema
} from '../type';
import { UserChatItemSchema, SystemChatItemSchema, ToolModuleResponseItemSchema } from '../type';
export enum HelperBotTypeEnum {
topAgent = 'topAgent'
@ -79,21 +73,11 @@ export type HelperBotChatItemSiteType = z.infer<typeof HelperBotChatItemSiteSche
/* 具体的 bot 的特有参数 */
// AI 模型配置
export const AIModelConfigSchema = z.object({
model: z.string(),
temperature: z.number().nullish(),
maxToken: z.number().nullish(),
stream: z.boolean().nullish()
});
export type AIModelConfigType = z.infer<typeof AIModelConfigSchema>;
export const topAgentParamsSchema = z.object({
role: z.string().nullish(),
taskObject: z.string().nullish(),
selectedTools: z.array(z.string()).nullish(),
selectedDatasets: z.array(z.string()).nullish(),
fileUpload: z.boolean().nullish(),
// AI 模型配置
modelConfig: AIModelConfigSchema.nullish()
fileUpload: z.boolean().nullish()
});
export type TopAgentParamsType = z.infer<typeof topAgentParamsSchema>;

View File

@ -9,7 +9,7 @@ import { getCompressRequestMessagesPrompt } from './prompt';
import type { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
import { formatModelChars2Points } from '../../../../support/wallet/usage/utils';
import { i18nT } from '../../../../../web/i18n/utils';
import { parseToolArgs } from '../../utils';
import { parseJsonArgs } from '../../utils';
/**
*
@ -102,7 +102,7 @@ export const compressRequestMessages = async ({
outputTokens: usage.outputTokens
};
const compressResult = parseToolArgs<{
const compressResult = parseJsonArgs<{
compressed_messages: ChatCompletionMessageParam[];
compression_summary: string;
}>(answerText);

View File

@ -320,9 +320,9 @@ export const parseLLMStreamResponse = () => {
};
};
export const parseToolArgs = <T = Record<string, any>>(toolArgs: string) => {
export const parseJsonArgs = <T = Record<string, any>>(str: string) => {
try {
return json5.parse(sliceJsonStr(toolArgs)) as T;
return json5.parse(sliceJsonStr(str)) as T;
} catch {
return;
}

View File

@ -70,7 +70,7 @@ export const getSystemToolsWithInstalled = async ({
}: {
teamId: string;
isRoot: boolean;
}) => {
}): Promise<(AppToolTemplateItemType & { installed: boolean })[]> => {
const [tools, { installedSet, uninstalledSet }] = await Promise.all([
getSystemTools(),
MongoTeamInstalledPlugin.find({ teamId, pluginType: 'tool' }, 'pluginId installed')

View File

@ -5,40 +5,37 @@ import { createLLMResponse } from '../../../../ai/llm/request';
import { getLLMModel } from '../../../../ai/model';
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import { textAdaptGptResponse } from '@fastgpt/global/core/workflow/runtime/utils';
import type { AIChatItemValueItemType } from '@fastgpt/global/core/chat/helperBot/type';
import { getSystemToolsWithInstalled } from '../../../../app/tool/controller';
import { generateResourceList } from './utils';
import { TopAgentFormDataSchema } from './type';
import { addLog } from '../../../../../common/system/log';
import { formatAIResponse } from '../utils';
export const dispatchTopAgent = async (
props: HelperBotDispatchParamsType
): Promise<HelperBotDispatchResponseType> => {
const { query, files, metadata, histories, workflowResponseWrite, teamId, userId } = props;
const { query, files, metadata, histories, workflowResponseWrite, user } = props;
const modelConfig = metadata.data?.modelConfig;
const modelName = modelConfig?.model || global.systemDefaultModel?.llm?.model;
if (!modelName) {
throw new Error('未配置 LLM 模型,请在前端选择模型或在系统中配置默认模型');
}
const modelData = getLLMModel(modelName);
const modelData = getLLMModel();
if (!modelData) {
throw new Error(`模型 ${modelName} 未找到`);
return Promise.reject('Can not get model data');
}
const temperature = modelConfig?.temperature ?? 0.7;
const maxToken = modelConfig?.maxToken ?? 4000;
const stream = modelConfig?.stream ?? true;
const usage = {
model: modelData.model,
inputTokens: 0,
outputTokens: 0
};
const resourceList = await generateResourceList({
teamId,
userId
teamId: user.teamId,
isRoot: user.isRoot
});
const systemPrompt = getPrompt({ resourceList });
const historyMessages = helperChats2GPTMessages({
messages: histories,
reserveTool: false
});
const systemPrompt = getPrompt({ resourceList });
const conversationMessages = [
{ role: 'system' as const, content: systemPrompt },
...historyMessages,
@ -51,10 +48,8 @@ export const dispatchTopAgent = async (
const llmResponse = await createLLMResponse({
body: {
messages: conversationMessages,
model: modelName,
temperature,
stream,
max_tokens: maxToken
model: modelData,
stream: true
},
onStreaming: ({ text }) => {
workflowResponseWrite?.({
@ -69,7 +64,15 @@ export const dispatchTopAgent = async (
});
}
});
usage.inputTokens = llmResponse.usage.inputTokens;
usage.outputTokens = llmResponse.usage.outputTokens;
/*
3
1.
2. JSON { reasoning?: string; question?: string }
3.
*/
const firstPhaseAnswer = llmResponse.answerText;
const firstPhaseReasoning = llmResponse.reasoningText;
@ -83,7 +86,7 @@ export const dispatchTopAgent = async (
}
if (firstPhaseAnswer.includes('「信息收集已完成」')) {
console.log('🔄 TopAgent: 检测到信息收集完成信号,切换到计划生成阶段');
addLog.debug('🔄 TopAgent: 检测到信息收集完成信号,切换到计划生成阶段');
const newMessages = [
...conversationMessages,
@ -91,15 +94,11 @@ export const dispatchTopAgent = async (
{ role: 'user' as const, content: '请你直接生成规划方案' }
];
// console.log('📋 TopAgent 阶段 2: 计划生成');
const planResponse = await createLLMResponse({
body: {
messages: newMessages,
model: modelName,
temperature,
stream,
max_tokens: maxToken
model: modelData,
stream: true
},
onStreaming: ({ text }) => {
workflowResponseWrite?.({
@ -114,94 +113,42 @@ export const dispatchTopAgent = async (
});
}
});
usage.inputTokens = planResponse.usage.inputTokens;
usage.outputTokens = planResponse.usage.outputTokens;
let formData;
try {
const planJson = JSON.parse(planResponse.answerText);
// console.log('解析的计划 JSON:', planJson);
formData = {
role: planJson.task_analysis?.role || '',
taskObject: planJson.task_analysis?.goal || '',
tools: planJson.resources?.tools?.map((tool: any) => tool.id) || [],
const formData = TopAgentFormDataSchema.parse({
role: planJson.task_analysis?.role,
taskObject: planJson.task_analysis?.goal,
tools: planJson.resources?.tools?.map((tool: any) => tool.id),
fileUploadEnabled: planJson.resources?.system_features?.file_upload?.enabled || false
};
});
// Send formData if exists
if (formData) {
workflowResponseWrite?.({
event: SseResponseEventEnum.formData,
data: formData
});
}
} catch (e) {
console.error('解析计划 JSON 失败:', e);
addLog.warn(`[Top agent] parse answer faield`, { text: planResponse.answerText });
}
return {
aiResponse: formatAIResponse(planResponse.answerText, planResponse.reasoningText),
formData
aiResponse: formatAIResponse({
text: planResponse.answerText,
reasoning: planResponse.reasoningText
}),
usage
};
}
const displayText = parsedResponse?.question || firstPhaseAnswer;
return {
aiResponse: formatAIResponse(displayText, firstPhaseReasoning)
aiResponse: formatAIResponse({ text: displayText, reasoning: firstPhaseReasoning }),
usage
};
};
const generateResourceList = async ({
teamId,
userId
}: {
teamId: string;
userId: string;
}): Promise<string> => {
let result = '\n## 可用资源列表\n';
const tools = await getSystemToolsWithInstalled({
teamId,
isRoot: true // TODO: 需要传入实际的 isRoot 值
});
const installedTools = tools.filter((tool) => {
return tool.installed && !tool.isFolder;
});
if (installedTools.length > 0) {
result += '### 工具\n';
installedTools.forEach((tool) => {
const toolId = tool.id;
const name =
typeof tool.name === 'string'
? tool.name
: tool.name?.en || tool.name?.['zh-CN'] || '未命名';
const intro =
typeof tool.intro === 'string' ? tool.intro : tool.intro?.en || tool.intro?.['zh-CN'] || '';
const description = tool.toolDescription || intro || '暂无描述';
result += `- **${toolId}** [工具]: ${name} - ${description}\n`;
});
} else {
result += '### 工具\n暂无已安装的工具\n';
}
// TODO: 知识库
result += '\n### 知识库\n暂未配置知识库\n';
result += '\n### 系统功能\n';
result += '- **file_upload**: 文件上传功能 (enabled, purpose, file_types)\n';
return result;
};
const formatAIResponse = (text: string, reasoning?: string): AIChatItemValueItemType[] => {
const result: AIChatItemValueItemType[] = [];
if (reasoning) {
result.push({
reasoning: {
content: reasoning
}
});
}
result.push({
text: {
content: text
}
});
return result;
};

View File

@ -0,0 +1,9 @@
import { z } from 'zod';
export const TopAgentFormDataSchema = z.object({
role: z.string().optional(),
taskObject: z.string().optional(),
tools: z.array(z.string()).optional().default([]),
fileUploadEnabled: z.boolean().optional().default(false)
});
export type TopAgentFormDataType = z.infer<typeof TopAgentFormDataSchema>;

View File

@ -0,0 +1,48 @@
import type { localeType } from '@fastgpt/global/common/i18n/type';
import { getSystemToolsWithInstalled } from '../../../../app/tool/controller';
export const generateResourceList = async ({
teamId,
isRoot,
lang = 'zh-CN'
}: {
teamId: string;
isRoot: boolean;
lang?: localeType;
}): Promise<string> => {
const getPrompt = ({ tool }: { tool: string }) => {
return `## 可用资源列表
###
${tool}
###
###
- **file_upload**: (enabled, purpose, file_types)
`;
};
const tools = await getSystemToolsWithInstalled({
teamId,
isRoot
});
const installedTools = tools
.filter((tool) => {
return tool.installed && !tool.isFolder;
})
.map((tool) => {
const toolId = tool.id;
const name =
typeof tool.name === 'string' ? tool.name : tool.name?.en || tool.name?.[lang] || '未命名';
const intro =
typeof tool.intro === 'string' ? tool.intro : tool.intro?.en || tool.intro?.[lang] || '';
const description = tool.toolDescription || intro || '暂无描述';
return `- **${toolId}** [工具]: ${name} - ${description}`;
});
return getPrompt({
tool: installedTools.length > 0 ? installedTools.join('\n') : '暂无已安装的工具'
});
};

View File

@ -5,6 +5,7 @@ import {
HelperBotChatItemSchema
} from '@fastgpt/global/core/chat/helperBot/type';
import { WorkflowResponseFnSchema } from '../../../workflow/dispatch/type';
import { LocaleList } from '@fastgpt/global/common/i18n/type';
export const HelperBotDispatchParamsSchema = z.object({
query: z.string(),
@ -12,20 +13,23 @@ export const HelperBotDispatchParamsSchema = z.object({
metadata: HelperBotCompletionsParamsSchema.shape.metadata,
histories: z.array(HelperBotChatItemSchema),
workflowResponseWrite: WorkflowResponseFnSchema,
teamId: z.string(),
userId: z.string()
user: z.object({
teamId: z.string(),
tmbId: z.string(),
userId: z.string(),
isRoot: z.boolean(),
lang: z.enum(LocaleList)
})
});
export type HelperBotDispatchParamsType = z.infer<typeof HelperBotDispatchParamsSchema>;
export const HelperBotDispatchResponseSchema = z.object({
aiResponse: z.array(AIChatItemValueItemSchema),
formData: z
.object({
role: z.string().optional(),
taskObject: z.string().optional(),
tools: z.array(z.string()).optional(),
fileUploadEnabled: z.boolean().optional()
})
.optional()
usage: z.object({
model: z.string(),
inputTokens: z.number(),
outputTokens: z.number()
})
});
export type HelperBotDispatchResponseType = z.infer<typeof HelperBotDispatchResponseSchema>;

View File

@ -0,0 +1,27 @@
import type { AIChatItemValueItemType } from '@fastgpt/global/core/chat/helperBot/type';
export const formatAIResponse = ({
text,
reasoning
}: {
text: string;
reasoning?: string;
}): AIChatItemValueItemType[] => {
const result: AIChatItemValueItemType[] = [];
if (reasoning) {
result.push({
reasoning: {
content: reasoning
}
});
}
result.push({
text: {
content: text
}
});
return result;
};

View File

@ -16,7 +16,7 @@ import {
} from '@fastgpt/global/core/workflow/runtime/utils';
import { getWorkflowChildResponseWrite } from '../../../utils';
import { SubAppIds } from '../sub/constants';
import { parseToolArgs } from '../../../../../ai/utils';
import { parseJsonArgs } from '../../../../../ai/utils';
import { dispatchFileRead } from '../sub/file';
import { NodeInputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { FlowNodeTypeEnum } from '@fastgpt/global/core/workflow/node/constant';
@ -186,7 +186,7 @@ export const stepCall = async ({
const { response, usages = [] } = await (async () => {
try {
if (toolId === SubAppIds.fileRead) {
const params = parseToolArgs<{
const params = parseJsonArgs<{
file_indexes: string[];
}>(call.function.arguments);
if (!params) {
@ -227,7 +227,7 @@ export const stepCall = async ({
};
}
const toolCallParams = parseToolArgs(call.function.arguments);
const toolCallParams = parseJsonArgs(call.function.arguments);
if (!toolCallParams) {
return {

View File

@ -2,7 +2,7 @@ import { getLLMModel } from '../../../../../ai/model';
import type { AgentPlanStepType } from '../sub/plan/type';
import { addLog } from '../../../../../../common/system/log';
import { createLLMResponse } from '../../../../../ai/llm/request';
import { parseToolArgs } from '../../../../../ai/utils';
import { parseJsonArgs } from '../../../../../ai/utils';
import type { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
import { formatModelChars2Points } from '../../../../../../support/wallet/usage/utils';
@ -68,7 +68,7 @@ export const getStepDependon = async ({
stream: false
}
});
const params = parseToolArgs<{
const params = parseJsonArgs<{
needed_step_ids: string[];
reason: string;
}>(answerText);

View File

@ -1,5 +1,5 @@
import { createLLMResponse } from '../../../../../ai/llm/request';
import { parseToolArgs } from '../../../../../ai/utils';
import { parseJsonArgs } from '../../../../../ai/utils';
import { addLog } from '../../../../../../common/system/log';
import { formatModelChars2Points } from '../../../../../../support/wallet/usage/utils';
import type { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
@ -71,7 +71,7 @@ export const checkTaskComplexity = async ({
}
});
const checkResponse = parseToolArgs<{ complex: boolean; reason: string }>(checkResult);
const checkResponse = parseJsonArgs<{ complex: boolean; reason: string }>(checkResult);
const { totalPoints, modelName } = formatModelChars2Points({
model,

View File

@ -16,7 +16,7 @@ import type {
InteractiveNodeResponseType,
WorkflowInteractiveResponseType
} from '@fastgpt/global/core/workflow/template/system/interactive/type';
import { parseToolArgs } from '../../../../../../ai/utils';
import { parseJsonArgs } from '../../../../../../ai/utils';
import { PlanAgentAskTool, type AskAgentToolParamsType } from './ask/constants';
import { PlanCheckInteractive } from './constants';
import type { AgentPlanType } from './type';
@ -148,7 +148,7 @@ export const dispatchPlanAgent = async ({
return;
}
const params = parseToolArgs<AgentPlanType>(answerText);
const params = parseJsonArgs<AgentPlanType>(answerText);
if (toolCalls.length === 0 && (!params || !params.task || !params.steps)) {
throw new Error('Plan response is not valid');
}
@ -164,7 +164,7 @@ export const dispatchPlanAgent = async ({
const tooCall = toolCalls[0];
if (tooCall) {
const params = parseToolArgs<AskAgentToolParamsType>(tooCall.function.arguments);
const params = parseJsonArgs<AskAgentToolParamsType>(tooCall.function.arguments);
if (params) {
return {
type: 'agentPlanAskQuery',
@ -321,7 +321,7 @@ export const dispatchReplanAgent = async ({
return;
}
const params = parseToolArgs<AgentPlanType>(answerText);
const params = parseJsonArgs<AgentPlanType>(answerText);
if (toolCalls.length === 0 && (!params || !params.steps)) {
throw new Error('Replan response is not valid');
}
@ -337,7 +337,7 @@ export const dispatchReplanAgent = async ({
const tooCall = toolCalls[0];
if (tooCall) {
const params = parseToolArgs<AskAgentToolParamsType>(tooCall.function.arguments);
const params = parseJsonArgs<AskAgentToolParamsType>(tooCall.function.arguments);
if (params) {
return {
type: 'agentPlanAskQuery',

View File

@ -7,7 +7,7 @@ import type { DispatchFlowResponse } from '../../type';
import { chats2GPTMessages, GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
import type { AIChatItemValueItemType } from '@fastgpt/global/core/chat/type';
import { formatToolResponse, initToolCallEdges, initToolNodes } from './utils';
import { parseToolArgs } from '../../../../ai/utils';
import { parseJsonArgs } from '../../../../ai/utils';
import { sliceStrStartEnd } from '@fastgpt/global/common/string/tools';
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import { toolValueTypeList, valueTypeJsonSchemaMap } from '@fastgpt/global/core/workflow/constants';
@ -193,7 +193,7 @@ export const runToolCall = async (props: DispatchToolModuleProps): Promise<RunTo
}
// Init tool params and run
const startParams = parseToolArgs(call.function.arguments);
const startParams = parseJsonArgs(call.function.arguments);
initToolNodes(runtimeNodes, [toolNode.nodeId], startParams);
initToolCallEdges(runtimeEdges, [toolNode.nodeId]);

View File

@ -23,6 +23,7 @@
"export_title": "Time,Members,Type,Project name,AI points",
"feishu": "Feishu",
"generation_time": "Generation time",
"helper_bot": "Assistive robot",
"image_index": "Image index",
"image_parse": "Image tagging",
"input_token_length": "input tokens",

View File

@ -25,6 +25,7 @@
"feishu": "飞书",
"generate_answer": "生成应用回答",
"generation_time": "生成时间",
"helper_bot": "辅助机器人",
"image_index": "图片索引",
"image_parse": "图片标注",
"input_token_length": "输入 tokens",

View File

@ -23,6 +23,7 @@
"export_title": "時間,成員,類型,項目名,AI 積分消耗",
"feishu": "飛書",
"generation_time": "生成時間",
"helper_bot": "輔助機器人",
"image_index": "圖片索引",
"image_parse": "圖片標註",
"input_token_length": "輸入 tokens",

View File

@ -7,6 +7,7 @@ import type {
} from '@fastgpt/global/core/chat/type';
import type { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import type { WorkflowInteractiveResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
import type { TopAgentFormDataType } from '@fastgpt/service/core/chat/HelperBot/dispatch/topAgent/type';
export type generatingMessageProps = {
event: SseResponseEventEnum;
@ -23,6 +24,9 @@ export type generatingMessageProps = {
variables?: Record<string, any>;
nodeResponse?: ChatHistoryItemResType;
durationSeconds?: number;
// Agent
formData?: TopAgentFormDataType;
};
export type StartChatFnProps = {

View File

@ -33,7 +33,6 @@ const HumanItem = ({ chat }: { chat: HelperBotChatItemSiteType }) => {
py={3}
borderRadius={'sm'}
display="inline-block"
textAlign="right"
maxW={['calc(100% - 25px)', 'calc(100% - 40px)']}
color={'myGray.900'}
bg={'primary.100'}

View File

@ -6,10 +6,8 @@ import {
type HelperBotTypeEnumType,
type TopAgentParamsType
} from '@fastgpt/global/core/chat/helperBot/type';
import type { ChatItemType } from '@fastgpt/global/core/chat/type';
import { getNanoid } from '@fastgpt/global/common/string/tools';
import { useRequest2 } from '@fastgpt/web/hooks/useRequest';
import type { AppFileSelectConfigType } from '@fastgpt/global/core/app/type';
import type { TopAgentFormDataType } from '@fastgpt/service/core/chat/HelperBot/dispatch/topAgent/type';
export type HelperBotProps = {
emptyDom?: ReactNode;
@ -17,7 +15,7 @@ export type HelperBotProps = {
} & {
type: HelperBotTypeEnumType;
metadata: TopAgentParamsType;
onApply: (e: TopAgentParamsType) => void;
onApply: (e: TopAgentFormDataType) => void;
};
type HelperBotContextType = HelperBotProps & {};
@ -30,7 +28,7 @@ export const HelperBotContext = createContext<HelperBotContextType>({
selectedDatasets: [],
fileUpload: false
},
onApply: function (e: TopAgentParamsType): void {
onApply: function (e): void {
throw new Error('Function not implemented.');
}
});

View File

@ -144,19 +144,7 @@ const ChatBox = ({ type, metadata, onApply, ...props }: HelperBotProps) => {
}
);
const generatingMessage = useMemoizedFn(
({ event, text = '', reasoningText, tool, data }: generatingMessageProps & { data?: any }) => {
if (event === SseResponseEventEnum.formData && data) {
const formData = {
role: data.role || '',
taskObject: data.taskObject || '',
selectedTools: data.tools || [],
selectedDatasets: [],
fileUpload: data.fileUploadEnabled || false
};
onApply?.(formData);
return;
}
({ event, text = '', reasoningText, tool, formData }: generatingMessageProps) => {
setChatRecords((state) =>
state.map((item, index) => {
if (index !== state.length - 1) return item;
@ -165,6 +153,12 @@ const ChatBox = ({ type, metadata, onApply, ...props }: HelperBotProps) => {
const updateIndex = item.value.length - 1;
const updateValue: AIChatItemValueItemType = item.value[updateIndex];
// Special event: form data
if (event === SseResponseEventEnum.formData && formData) {
onApply?.(formData);
return item;
}
if (event === SseResponseEventEnum.answer || event === SseResponseEventEnum.fastAnswer) {
if (reasoningText) {
if (updateValue?.reasoning) {

View File

@ -27,7 +27,7 @@ import { useToast } from '@fastgpt/web/hooks/useToast';
type Props = {
appForm: AppFormEditFormType;
setAppForm?: React.Dispatch<React.SetStateAction<AppFormEditFormType>>;
setAppForm: React.Dispatch<React.SetStateAction<AppFormEditFormType>>;
setRenderEdit: React.Dispatch<React.SetStateAction<boolean>>;
form2WorkflowFn: Form2WorkflowFnType;
};
@ -137,86 +137,50 @@ const ChatTest = ({ appForm, setAppForm, setRenderEdit, form2WorkflowFn }: Props
type={HelperBotTypeEnum.topAgent}
metadata={topAgentMetadata}
onApply={async (formData) => {
if (!setAppForm) {
console.warn('⚠️ setAppForm 未传入,无法更新表单');
return;
}
// Compute tools
const existingToolIds = new Set(
appForm.selectedTools.map((tool) => tool.pluginId).filter(Boolean)
);
// console.log('📋 当前已存在的工具 pluginId:', Array.from(existingToolIds));
// console.log('📋 formData.selectedTools:', formData.selectedTools);
const newToolIds = (formData.selectedTools || []).filter(
const newToolIds = (formData.tools || []).filter(
(toolId: string) => !existingToolIds.has(toolId)
);
if (newToolIds.length === 0) {
// 没有新工具需要添加,仍然更新 role、taskObject 和文件上传配置
setAppForm((prev) => ({
...prev,
aiSettings: {
...prev.aiSettings,
aiRole: formData.role || '',
aiTaskObject: formData.taskObject || ''
},
chatConfig: {
...prev.chatConfig,
fileSelectConfig: {
...prev.chatConfig.fileSelectConfig,
canSelectFile: formData.fileUpload || false
}
}
}));
return;
}
let newTools: FlowNodeTemplateType[] = [];
const newTools: FlowNodeTemplateType[] = [];
const failedToolIds: string[] = [];
// 使用 Promise.allSettled 并行请求所有工具
const toolPromises = newToolIds.map((toolId: string) =>
getToolPreviewNode({ appId: toolId })
.then((tool) => ({ status: 'fulfilled' as const, toolId, tool }))
.catch((error) => ({ status: 'rejected' as const, toolId, error }))
const results = await Promise.all(
newToolIds.map((toolId: string) =>
getToolPreviewNode({ appId: toolId })
.then((tool) => ({ status: 'fulfilled' as const, toolId, tool }))
.catch((error) => ({ status: 'rejected' as const, toolId, error }))
)
);
const results = await Promise.allSettled(toolPromises);
results.forEach((result: any) => {
if (result.status === 'fulfilled' && result.value.status === 'fulfilled') {
newTools.push(result.value.tool);
} else if (result.status === 'fulfilled' && result.value.status === 'rejected') {
failedToolIds.push(result.value.toolId);
console.error(`❌ 工具 ${result.value.toolId} 获取失败:`, result.value.error);
results.forEach((result) => {
if (result.status === 'fulfilled') {
newTools.push(result.tool);
} else if (result.status === 'rejected') {
failedToolIds.push(result.toolId);
}
});
if (failedToolIds.length > 0) {
toast({
title: t('app:tool_load_failed'),
description: `${t('app:failed_tools')}: ${failedToolIds.join(', ')}`,
status: 'warning',
duration: 5000
});
}
setAppForm((prev) => {
const newForm: AppFormEditFormType = {
...prev,
selectedTools: [...prev.selectedTools, ...newTools],
aiSettings: {
...prev.aiSettings,
aiRole: formData.role || '',
aiTaskObject: formData.taskObject || ''
aiRole: formData.role || prev.aiSettings.aiRole,
aiTaskObject: formData.taskObject || prev.aiSettings.aiTaskObject
},
selectedTools: [...prev.selectedTools, ...newTools],
chatConfig: {
...prev.chatConfig,
fileSelectConfig: {
...prev.chatConfig.fileSelectConfig,
canSelectFile: formData.fileUpload || false
canSelectFile:
formData.fileUploadEnabled ||
prev.chatConfig.fileSelectConfig?.canSelectFile ||
false
}
}
};

View File

@ -280,6 +280,7 @@ export const validateToolConfiguration = ({
return true;
};
export const checkNeedsUserConfiguration = (toolTemplate: FlowNodeTemplateType): boolean => {
const formRenderTypesMap: Record<string, boolean> = {
[FlowNodeInputTypeEnum.input]: true,
@ -293,8 +294,8 @@ export const checkNeedsUserConfiguration = (toolTemplate: FlowNodeTemplateType):
[FlowNodeInputTypeEnum.timeRangeSelect]: true
};
return (
((toolTemplate.inputs?.length ?? 0) > 0 &&
toolTemplate.inputs?.some((input) => {
(toolTemplate.inputs.length > 0 &&
toolTemplate.inputs.some((input) => {
// 有工具描述的不需要配置
if (input.toolDescription) return false;
// 禁用流的不需要配置

View File

@ -9,7 +9,8 @@ import { MongoHelperBotChatItem } from '@fastgpt/service/core/chat/HelperBot/cha
import { getWorkflowResponseWrite } from '@fastgpt/service/core/workflow/dispatch/utils';
import { dispatchMap } from '@fastgpt/service/core/chat/HelperBot/dispatch/index';
import { pushChatRecords } from '@fastgpt/service/core/chat/HelperBot/utils';
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import { pushHelperBotUsage } from '@/service/support/wallet/usage/push';
import { getLocale } from '@fastgpt/service/common/middle/i18n';
export type completionsBody = HelperBotCompletionsParamsType;
@ -18,7 +19,7 @@ async function handler(req: ApiRequestProps<completionsBody>, res: ApiResponseTy
req.body
);
const { teamId, userId } = await authCert({ req, authToken: true });
const { teamId, tmbId, userId, isRoot } = await authCert({ req, authToken: true });
const histories = await MongoHelperBotChatItem.find({
userId,
@ -45,18 +46,15 @@ async function handler(req: ApiRequestProps<completionsBody>, res: ApiResponseTy
metadata,
histories,
workflowResponseWrite,
teamId,
userId
user: {
teamId,
tmbId,
userId,
isRoot,
lang: getLocale(req)
}
});
// Send formData if exists
if (result.formData) {
workflowResponseWrite?.({
event: SseResponseEventEnum.formData,
data: result.formData
});
}
// Save chat
await pushChatRecords({
type: metadata.type,
@ -68,6 +66,13 @@ async function handler(req: ApiRequestProps<completionsBody>, res: ApiResponseTy
aiResponse: result.aiResponse
});
// Push usage
pushHelperBotUsage({
teamId,
tmbId,
model: result.usage.model,
inputTokens: result.usage.inputTokens,
outputTokens: result.usage.outputTokens
});
}
export default NextAPI(handler);

View File

@ -4,6 +4,44 @@ import { formatModelChars2Points } from '@fastgpt/service/support/wallet/usage/u
import { i18nT } from '@fastgpt/web/i18n/utils';
import { getDefaultTTSModel } from '@fastgpt/service/core/ai/model';
import type { UsageItemType } from '@fastgpt/global/support/wallet/usage/type';
import type { HelperBotTypeEnumType } from '@fastgpt/global/core/chat/helperBot/type';
export const pushHelperBotUsage = ({
teamId,
tmbId,
model,
inputTokens,
outputTokens
}: {
teamId: string;
tmbId: string;
model: string;
inputTokens: number;
outputTokens: number;
}) => {
const { totalPoints, modelName } = formatModelChars2Points({
model,
inputTokens,
outputTokens
});
createUsage({
teamId,
tmbId,
appName: i18nT('account_usage:helper_bot'),
totalPoints,
source: UsageSourceEnum.fastgpt,
list: [
{
moduleName: i18nT('account_usage:helper_bot'),
amount: totalPoints,
model: modelName,
inputTokens,
outputTokens
}
]
});
};
export const pushGenerateVectorUsage = ({
usageId,

View File

@ -13,6 +13,7 @@ import { getWebReqUrl } from '@fastgpt/web/common/system/utils';
import type { OnOptimizePromptProps } from '@/components/common/PromptEditor/OptimizerPopover';
import type { OnOptimizeCodeProps } from '@/pageComponents/app/detail/WorkflowComponents/Flow/nodes/NodeCode/Copilot';
import type { AIChatItemValueItemType } from '@fastgpt/global/core/chat/type';
import type { TopAgentFormDataType } from '@fastgpt/service/core/chat/HelperBot/dispatch/topAgent/type';
type StreamFetchProps = {
url?: string;
@ -52,6 +53,7 @@ type ResponseQueueItemType = CommonResponseType &
}
| {
event: SseResponseEventEnum.formData;
data: TopAgentFormDataType;
}
);
@ -276,7 +278,7 @@ export const streamFetch = ({
// Directly call onMessage for formData, no need to queue
onMessage({
event,
data: rest
formData: rest
});
} else if (event === SseResponseEventEnum.error) {
if (rest.statusText === TeamErrEnum.aiPointsNotEnough) {