V4.14.2 fearured (#5922)

* fix: chat agent template create (#5912)

* doc

* template market ui (#5917)

* Compress tool (#5919)

* Compress tool (#5914)

* rename file

* feat: agent call request

* perf: Agent call  (#5916)

* fix: interactive in tool call

* doc

* fix: merge node response

* fix: test

* fix:修改 message 对话中的压缩提示词 (#5918)

Co-authored-by: xxyyh <2289112474@qq>

* perf: compress code

* perf: agent call comment

---------

Co-authored-by: YeYuheng <57035043+YYH211@users.noreply.github.com>
Co-authored-by: xxyyh <2289112474@qq>

* remove pr

* feat: auto password

* perf: app template cache

* fix template market ui (#5921)

---------

Co-authored-by: heheer <heheer@sealos.io>
Co-authored-by: YeYuheng <57035043+YYH211@users.noreply.github.com>
Co-authored-by: xxyyh <2289112474@qq>
This commit is contained in:
Archer 2025-11-14 13:21:17 +08:00 committed by GitHub
parent 21de152fd7
commit 48c0c150eb
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
51 changed files with 1826 additions and 671 deletions

View File

@ -16,7 +16,7 @@ description: FastGPT OpenAPI 对话接口
{/* * 对话现在有`v1`和`v2`两个接口可以按需使用v2 自 4.9.4 版本新增v1 接口同时不再维护 */}
## 请求简易应用和工作流
## 请求对话 Agent 和工作流
`v1`对话接口兼容`GPT`的接口!如果你的项目使用的是标准的`GPT`官方接口,可以直接通过修改`BaseUrl`和 `Authorization`来访问 FastGpt 应用,不过需要注意下面几个规则:

View File

@ -112,6 +112,7 @@ description: FastGPT 文档目录
- [/docs/upgrading/4-13/4132](/docs/upgrading/4-13/4132)
- [/docs/upgrading/4-14/4140](/docs/upgrading/4-14/4140)
- [/docs/upgrading/4-14/4141](/docs/upgrading/4-14/4141)
- [/docs/upgrading/4-14/4142](/docs/upgrading/4-14/4142)
- [/docs/upgrading/4-8/40](/docs/upgrading/4-8/40)
- [/docs/upgrading/4-8/41](/docs/upgrading/4-8/41)
- [/docs/upgrading/4-8/42](/docs/upgrading/4-8/42)

View File

@ -0,0 +1,20 @@
---
title: 'V4.14.2(进行中)'
description: 'FastGPT V4.14.2 更新说明'
---
## 🚀 新增内容
1. 封装底层 Agent Call 方式,支持工具连续调用时上下文的压缩,以及单个工具长响应的压缩。
2. 模板市场新 UI。
## ⚙️ 优化
1. 30 分钟模板市场缓存时长。
## 🐛 修复
1. 简易应用模板未正常转化。
2. 工具调用中,包含两个以上连续用户选择时候,第二个用户选择异常。

View File

@ -1,5 +1,5 @@
{
"title": "4.14.x",
"description": "",
"pages": ["4141", "4140"]
"pages": ["4142", "4141", "4140"]
}

View File

@ -101,7 +101,7 @@
"document/content/docs/protocol/terms.en.mdx": "2025-08-03T22:37:45+08:00",
"document/content/docs/protocol/terms.mdx": "2025-08-03T22:37:45+08:00",
"document/content/docs/toc.en.mdx": "2025-08-04T13:42:36+08:00",
"document/content/docs/toc.mdx": "2025-11-06T14:47:55+08:00",
"document/content/docs/toc.mdx": "2025-11-13T13:36:41+08:00",
"document/content/docs/upgrading/4-10/4100.mdx": "2025-08-02T19:38:37+08:00",
"document/content/docs/upgrading/4-10/4101.mdx": "2025-09-08T20:07:20+08:00",
"document/content/docs/upgrading/4-11/4110.mdx": "2025-08-05T23:20:39+08:00",
@ -115,7 +115,8 @@
"document/content/docs/upgrading/4-13/4131.mdx": "2025-09-30T15:47:06+08:00",
"document/content/docs/upgrading/4-13/4132.mdx": "2025-10-21T11:46:53+08:00",
"document/content/docs/upgrading/4-14/4140.mdx": "2025-11-06T15:43:00+08:00",
"document/content/docs/upgrading/4-14/4141.mdx": "2025-11-11T14:05:02+08:00",
"document/content/docs/upgrading/4-14/4141.mdx": "2025-11-12T12:19:02+08:00",
"document/content/docs/upgrading/4-14/4142.mdx": "2025-11-13T20:49:04+08:00",
"document/content/docs/upgrading/4-8/40.mdx": "2025-08-02T19:38:37+08:00",
"document/content/docs/upgrading/4-8/41.mdx": "2025-08-02T19:38:37+08:00",
"document/content/docs/upgrading/4-8/42.mdx": "2025-08-02T19:38:37+08:00",

View File

@ -143,10 +143,7 @@ export const getRegQueryStr = (text: string, flags = 'i') => {
/* slice json str */
export const sliceJsonStr = (str: string) => {
str = str
.trim()
.replace(/(\\n|\\)/g, '')
.replace(/ /g, '');
str = str.trim();
// Find first opening bracket
let start = -1;

View File

@ -80,7 +80,8 @@ export type CompletionFinishReason =
| 'tool_calls'
| 'content_filter'
| 'function_call'
| null;
| null
| undefined;
export default openai;
export * from 'openai';

View File

@ -230,6 +230,8 @@ export type AppTemplateSchemaType = {
type: string;
author?: string;
isActive?: boolean;
isPromoted?: boolean;
recommendText?: string;
userGuide?: {
type: 'markdown' | 'link';
content?: string;
@ -237,6 +239,7 @@ export type AppTemplateSchemaType = {
};
isQuickTemplate?: boolean;
order?: number;
// TODO: 对于建议应用,是另一个格式
workflow: WorkflowTemplateBasicType;
};

View File

@ -213,21 +213,10 @@ export const getChatSourceByPublishChannel = (publishChannel: PublishChannelEnum
export const mergeChatResponseData = (
responseDataList: ChatHistoryItemResType[]
): ChatHistoryItemResType[] => {
// Merge children reponse data(Children has interactive response)
const responseWithMergedPlugins = responseDataList.map((item) => {
if (item.pluginDetail && item.pluginDetail.length > 1) {
return {
...item,
pluginDetail: mergeChatResponseData(item.pluginDetail)
};
}
return item;
});
const result: ChatHistoryItemResType[] = [];
const mergeMap = new Map<string, number>(); // mergeSignId -> result index
for (const item of responseWithMergedPlugins) {
for (const item of responseDataList) {
if (item.mergeSignId && mergeMap.has(item.mergeSignId)) {
// Merge with existing item
const existingIndex = mergeMap.get(item.mergeSignId)!;
@ -238,9 +227,18 @@ export const mergeChatResponseData = (
runningTime: +((existing.runningTime || 0) + (item.runningTime || 0)).toFixed(2),
totalPoints: (existing.totalPoints || 0) + (item.totalPoints || 0),
childTotalPoints: (existing.childTotalPoints || 0) + (item.childTotalPoints || 0),
toolDetail: [...(existing.toolDetail || []), ...(item.toolDetail || [])],
loopDetail: [...(existing.loopDetail || []), ...(item.loopDetail || [])],
pluginDetail: [...(existing.pluginDetail || []), ...(item.pluginDetail || [])]
toolDetail: mergeChatResponseData([
...(existing.toolDetail || []),
...(item.toolDetail || [])
]),
loopDetail: mergeChatResponseData([
...(existing.loopDetail || []),
...(item.loopDetail || [])
]),
pluginDetail: mergeChatResponseData([
...(existing.pluginDetail || []),
...(item.pluginDetail || [])
])
};
} else {
// Add new item

View File

@ -2,9 +2,9 @@ import type { ChatNodeUsageType } from '../../../support/wallet/bill/type';
import type {
ChatItemType,
ToolRunResponseItemType,
AIChatItemValueItemType
AIChatItemValueItemType,
ChatHistoryItemResType
} from '../../chat/type';
import { NodeOutputItemType } from '../../chat/type';
import type { FlowNodeInputItemType, FlowNodeOutputItemType } from '../type/io.d';
import type { NodeToolConfigType, StoreNodeItemType } from '../type/node';
import type { DispatchNodeResponseKeyEnum } from './constants';
@ -112,7 +112,6 @@ export type RuntimeNodeItemType = {
flowNodeType: StoreNodeItemType['flowNodeType'];
showStatus?: StoreNodeItemType['showStatus'];
isEntry?: boolean;
isStart?: boolean;
version?: string;
inputs: FlowNodeInputItemType[];

View File

@ -20,6 +20,7 @@ import type { StoreNodeItemType } from '../type/node';
import { isValidReferenceValueFormat } from '../utils';
import type { RuntimeEdgeItemType, RuntimeNodeItemType } from './type';
import { isSecretValue } from '../../../common/secret/utils';
import { isChildInteractive } from '../template/system/interactive/constants';
export const extractDeepestInteractive = (
interactive: WorkflowInteractiveResponseType
@ -28,11 +29,7 @@ export const extractDeepestInteractive = (
let current = interactive;
let depth = 0;
while (
depth < MAX_DEPTH &&
(current?.type === 'childrenInteractive' || current?.type === 'loopInteractive') &&
current.params?.childrenResponse
) {
while (depth < MAX_DEPTH && 'childrenResponse' in current.params) {
current = current.params.childrenResponse;
depth++;
}
@ -181,10 +178,7 @@ export const getLastInteractiveValue = (
return;
}
if (
lastValue.interactive.type === 'childrenInteractive' ||
lastValue.interactive.type === 'loopInteractive'
) {
if (isChildInteractive(lastValue.interactive.type)) {
return lastValue.interactive;
}
@ -297,7 +291,6 @@ export const checkNodeRunStatus = ({
node: RuntimeNodeItemType;
runtimeEdges: RuntimeEdgeItemType[];
}) => {
const filterRuntimeEdges = filterWorkflowEdges(runtimeEdges);
const isStartNode = (nodeType: string) => {
const map: Record<any, boolean> = {
[FlowNodeTypeEnum.workflowStart]: true,
@ -310,7 +303,7 @@ export const checkNodeRunStatus = ({
const commonEdges: RuntimeEdgeItemType[] = [];
const recursiveEdgeGroupsMap = new Map<string, RuntimeEdgeItemType[]>();
const sourceEdges = filterRuntimeEdges.filter((item) => item.target === targetNode.nodeId);
const sourceEdges = runtimeEdges.filter((item) => item.target === targetNode.nodeId);
sourceEdges.forEach((sourceEdge) => {
const stack: Array<{
@ -333,7 +326,7 @@ export const checkNodeRunStatus = ({
const sourceNode = nodesMap.get(edge.source);
if (!sourceNode) continue;
if (isStartNode(sourceNode.flowNodeType) || sourceNode.isStart) {
if (isStartNode(sourceNode.flowNodeType) || sourceEdge.sourceHandle === 'selectedTools') {
commonEdges.push(sourceEdge);
continue;
}
@ -355,7 +348,7 @@ export const checkNodeRunStatus = ({
newVisited.add(edge.source);
// 查找目标节点的 source edges 并加入栈中
const nextEdges = filterRuntimeEdges.filter((item) => item.target === edge.source);
const nextEdges = runtimeEdges.filter((item) => item.target === edge.source);
for (const nextEdge of nextEdges) {
stack.push({

View File

@ -0,0 +1,12 @@
import type { InteractiveNodeResponseType } from './type';
export const isChildInteractive = (type: InteractiveNodeResponseType['type']) => {
if (
type === 'childrenInteractive' ||
type === 'toolChildrenInteractive' ||
type === 'loopInteractive'
) {
return true;
}
return false;
};

View File

@ -9,11 +9,6 @@ type InteractiveBasicType = {
memoryEdges: RuntimeEdgeItemType[];
nodeOutputs: NodeOutputItemType[];
skipNodeQueue?: { id: string; skippedNodeIdList: string[] }[]; // 需要记录目前在 queue 里的节点
toolParams?: {
entryNodeIds: string[]; // 记录工具中,交互节点的 Id而不是起始工作流的入口
memoryMessages: ChatCompletionMessageParam[]; // 这轮工具中,产生的新的 messages
toolCallId: string; // 记录对应 tool 的id用于后续交互节点可以替换掉 tool 的 response
};
usageId?: string;
};
@ -27,7 +22,17 @@ type InteractiveNodeType = {
type ChildrenInteractive = InteractiveNodeType & {
type: 'childrenInteractive';
params: {
childrenResponse?: WorkflowInteractiveResponseType;
childrenResponse: WorkflowInteractiveResponseType;
};
};
type ToolCallChildrenInteractive = InteractiveNodeType & {
type: 'toolChildrenInteractive';
params: {
childrenResponse: WorkflowInteractiveResponseType;
toolParams: {
memoryRequestMessages: ChatCompletionMessageParam[]; // 这轮工具中,产生的新的 messages
toolCallId: string; // 记录对应 tool 的id用于后续交互节点可以替换掉 tool 的 response
};
};
};
@ -94,6 +99,7 @@ export type InteractiveNodeResponseType =
| UserSelectInteractive
| UserInputInteractive
| ChildrenInteractive
| ToolCallChildrenInteractive
| LoopInteractive
| PaymentPauseInteractive;

View File

@ -0,0 +1,313 @@
import type {
ChatCompletionMessageParam,
ChatCompletionTool,
ChatCompletionMessageToolCall,
CompletionFinishReason
} from '@fastgpt/global/core/ai/type';
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
import type {
ToolCallChildrenInteractive,
WorkflowInteractiveResponseType
} from '@fastgpt/global/core/workflow/template/system/interactive/type';
import type { CreateLLMResponseProps, ResponseEvents } from '../request';
import { createLLMResponse } from '../request';
import type { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
import { compressRequestMessages } from '../compress';
import { computedMaxToken } from '../../utils';
import { filterGPTMessageByMaxContext } from '../utils';
import { getLLMModel } from '../../model';
import { filterEmptyAssistantMessages } from './utils';
type RunAgentCallProps = {
maxRunAgentTimes: number;
compressTaskDescription?: string;
body: CreateLLMResponseProps['body'] & {
tools: ChatCompletionTool[];
temperature?: number;
top_p?: number;
stream?: boolean;
};
userKey?: CreateLLMResponseProps['userKey'];
isAborted?: CreateLLMResponseProps['isAborted'];
childrenInteractiveParams?: ToolCallChildrenInteractive['params'];
handleInteractiveTool: (e: ToolCallChildrenInteractive['params']) => Promise<{
response: string;
assistantMessages: ChatCompletionMessageParam[];
usages: ChatNodeUsageType[];
interactive?: WorkflowInteractiveResponseType;
stop?: boolean;
}>;
handleToolResponse: (e: {
call: ChatCompletionMessageToolCall;
messages: ChatCompletionMessageParam[];
}) => Promise<{
response: string;
assistantMessages: ChatCompletionMessageParam[];
usages: ChatNodeUsageType[];
interactive?: WorkflowInteractiveResponseType;
stop?: boolean;
}>;
} & ResponseEvents;
type RunAgentResponse = {
completeMessages: ChatCompletionMessageParam[]; // Step request complete messages
assistantMessages: ChatCompletionMessageParam[]; // Step assistant response messages
interactiveResponse?: ToolCallChildrenInteractive;
// Usage
inputTokens: number;
outputTokens: number;
subAppUsages: ChatNodeUsageType[];
finish_reason: CompletionFinishReason | undefined;
};
/*
LLM
AssistantMessages
1. AI messages
2. tool messages
3. tool role=toolcontent=tool response
RequestMessages :
1.
2. AI messages
3. tool role=toolcontent=tool response
memoryRequestMessages requestMessages
*/
export const runAgentCall = async ({
maxRunAgentTimes,
body: { model, messages, max_tokens, tools, ...body },
userKey,
isAborted,
childrenInteractiveParams,
handleInteractiveTool,
handleToolResponse,
onReasoning,
onStreaming,
onToolCall,
onToolParam
}: RunAgentCallProps): Promise<RunAgentResponse> => {
const modelData = getLLMModel(model);
let runTimes = 0;
let interactiveResponse: ToolCallChildrenInteractive | undefined;
// Init messages
const maxTokens = computedMaxToken({
model: modelData,
maxToken: max_tokens || 8000,
min: 100
});
// 本轮产生的 assistantMessages包括 tool 内产生的
const assistantMessages: ChatCompletionMessageParam[] = [];
// 多轮运行时候的请求 messages
let requestMessages = (
await filterGPTMessageByMaxContext({
messages,
maxContext: modelData.maxContext - (maxTokens || 0) // filter token. not response maxToken
})
).map((item) => {
if (item.role === 'assistant' && item.tool_calls) {
return {
...item,
tool_calls: item.tool_calls.map((tool) => ({
id: tool.id,
type: tool.type,
function: tool.function
}))
};
}
return item;
});
let inputTokens: number = 0;
let outputTokens: number = 0;
let finish_reason: CompletionFinishReason | undefined;
const subAppUsages: ChatNodeUsageType[] = [];
// 处理 tool 里的交互
if (childrenInteractiveParams) {
const {
response,
assistantMessages: toolAssistantMessages,
usages,
interactive,
stop
} = await handleInteractiveTool(childrenInteractiveParams);
// 将 requestMessages 复原成上一轮中断时的内容,并附上 tool response
requestMessages = childrenInteractiveParams.toolParams.memoryRequestMessages.map((item) =>
item.role === 'tool' && item.tool_call_id === childrenInteractiveParams.toolParams.toolCallId
? {
...item,
content: response
}
: item
);
// 只需要推送本轮产生的 assistantMessages
assistantMessages.push(...filterEmptyAssistantMessages(toolAssistantMessages));
subAppUsages.push(...usages);
// 相同 tool 触发了多次交互, 调用的 toolId 认为是相同的
if (interactive) {
// console.dir(interactive, { depth: null });
interactiveResponse = {
type: 'toolChildrenInteractive',
params: {
childrenResponse: interactive,
toolParams: {
memoryRequestMessages: requestMessages,
toolCallId: childrenInteractiveParams.toolParams.toolCallId
}
}
};
}
if (interactiveResponse || stop) {
return {
inputTokens: 0,
outputTokens: 0,
subAppUsages,
completeMessages: requestMessages,
assistantMessages,
interactiveResponse,
finish_reason: 'stop'
};
}
// 正常完成该工具的响应,继续进行工具调用
}
// 自循环运行
while (runTimes < maxRunAgentTimes) {
// TODO: 费用检测
runTimes++;
// 1. Compress request messages
const result = await compressRequestMessages({
messages: requestMessages,
model: modelData
});
requestMessages = result.messages;
inputTokens += result.usage?.inputTokens || 0;
outputTokens += result.usage?.outputTokens || 0;
// 2. Request LLM
let {
reasoningText: reasoningContent,
answerText: answer,
toolCalls = [],
usage,
getEmptyResponseTip,
assistantMessage: llmAssistantMessage,
finish_reason: finishReason
} = await createLLMResponse({
body: {
...body,
model,
messages: requestMessages,
tool_choice: 'auto',
toolCallMode: modelData.toolChoice ? 'toolChoice' : 'prompt',
tools,
parallel_tool_calls: true
},
userKey,
isAborted,
onReasoning,
onStreaming,
onToolCall,
onToolParam
});
finish_reason = finishReason;
if (!answer && !reasoningContent && !toolCalls.length) {
return Promise.reject(getEmptyResponseTip());
}
// 3. 更新 messages
const cloneRequestMessages = requestMessages.slice();
// 推送 AI 生成后的 assistantMessages
assistantMessages.push(...llmAssistantMessage);
requestMessages.push(...llmAssistantMessage);
// 4. Call tools
let toolCallStep = false;
for await (const tool of toolCalls) {
const {
response,
assistantMessages: toolAssistantMessages,
usages,
interactive,
stop
} = await handleToolResponse({
call: tool,
messages: cloneRequestMessages
});
const toolMessage: ChatCompletionMessageParam = {
tool_call_id: tool.id,
role: ChatCompletionRequestMessageRoleEnum.Tool,
content: response
};
// 5. Add tool response to messages
assistantMessages.push(toolMessage);
assistantMessages.push(...filterEmptyAssistantMessages(toolAssistantMessages)); // 因为 toolAssistantMessages 也需要记录成 AI 响应,所以这里需要推送。
requestMessages.push(toolMessage); // 请求的 Request 只需要工具响应,不需要工具中 assistant 的内容,所以不推送 toolAssistantMessages
subAppUsages.push(...usages);
if (interactive) {
interactiveResponse = {
type: 'toolChildrenInteractive',
params: {
childrenResponse: interactive,
toolParams: {
memoryRequestMessages: [],
toolCallId: tool.id
}
}
};
}
if (stop) {
toolCallStep = true;
}
}
// 6 Record usage
inputTokens += usage.inputTokens;
outputTokens += usage.outputTokens;
if (toolCalls.length === 0 || !!interactiveResponse || toolCallStep) {
break;
}
}
if (interactiveResponse) {
interactiveResponse.params.toolParams.memoryRequestMessages = requestMessages;
}
return {
inputTokens,
outputTokens,
subAppUsages,
completeMessages: requestMessages,
assistantMessages,
interactiveResponse,
finish_reason
};
};

View File

@ -0,0 +1,11 @@
import type { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type';
export const filterEmptyAssistantMessages = (messages: ChatCompletionMessageParam[]) => {
return messages.filter((item) => {
if (item.role === 'assistant') {
if (!item.content) return false;
if (item.content.length === 0) return false;
}
return true;
});
};

View File

@ -0,0 +1,103 @@
/**
* Agent
*
* ##
*
* 1. ****
* - 30% +
* - Depends on15%
* - Agent 55%
*
* 2. ****
* -
* -
* - tool
*
* 3. ****
* - Depends on 使 response15%
* - Agent tool responses55%
* - tool 10%
*/
export const COMPRESSION_CONFIG = {
/**
* === Depends on===
*
* response token
* 使 response summary
*
* maxContext=100k
* - 3 4k 12k (12%)
* - 5 4k 20k (20%) 12k
*/
DEPENDS_ON_THRESHOLD: 0.15, // 15% 触发压缩
DEPENDS_ON_TARGET: 0.12, // 压缩到 12%(预留 3% 缓冲)
/**
* === ===
*
* user/assistant/tool
* tool responses
*
* maxContext=100k
* - 20k + 6 (34k) = 54k (54%)
* - 1 = 60k (60%) 30k
* - 55k - 30k = 25k 4
*/
MESSAGE_THRESHOLD: 0.8, // 55% 触发压缩
MESSAGE_TARGET_RATIO: 0.5, // 压缩到 50%(即原 55% → 27.5%
/**
* === tool response ===
*
* tool
* tool
*
* maxContext=100k
* - tool response = 8k (8%)
* - tool response = 15k (15%) 7k
*/
SINGLE_TOOL_MAX: 0.5,
SINGLE_TOOL_TARGET: 0.25,
/**
* === ===
*
* LLM
*
*
* maxContext=100k
* - 40k tokens
* - 50k 2 25k
*/
CHUNK_SIZE_RATIO: 0.5 // 40%(单块不超过此比例)
} as const;
/**
*
* @param maxContext -
* @returns token
*/
export const calculateCompressionThresholds = (maxContext: number) => {
return {
// Depends on 压缩阈值
dependsOn: {
threshold: Math.floor(maxContext * COMPRESSION_CONFIG.DEPENDS_ON_THRESHOLD),
target: Math.floor(maxContext * COMPRESSION_CONFIG.DEPENDS_ON_TARGET)
},
// 对话历史压缩阈值
messages: {
threshold: Math.floor(maxContext * COMPRESSION_CONFIG.MESSAGE_THRESHOLD),
targetRatio: COMPRESSION_CONFIG.MESSAGE_TARGET_RATIO
},
// 单个 tool response 压缩阈值
singleTool: {
threshold: Math.floor(maxContext * COMPRESSION_CONFIG.SINGLE_TOOL_MAX),
target: Math.floor(maxContext * COMPRESSION_CONFIG.SINGLE_TOOL_TARGET)
},
// 分块大小
chunkSize: Math.floor(maxContext * COMPRESSION_CONFIG.CHUNK_SIZE_RATIO)
};
};

View File

@ -0,0 +1,140 @@
import type { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
import { countGptMessagesTokens } from '../../../../common/string/tiktoken';
import { addLog } from '../../../../common/system/log';
import { calculateCompressionThresholds } from './constants';
import { createLLMResponse } from '../request';
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
import type { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type';
import { getCompressRequestMessagesPrompt } from './prompt';
import type { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
import { formatModelChars2Points } from '../../../../support/wallet/usage/utils';
import { i18nT } from '../../../../../web/i18n/utils';
import { parseToolArgs } from '../../utils';
/**
*
* messages token LLM
*/
export const compressRequestMessages = async ({
messages,
model
}: {
messages: ChatCompletionMessageParam[];
model: LLMModelItemType;
}): Promise<{
messages: ChatCompletionMessageParam[];
usage?: ChatNodeUsageType;
}> => {
if (!messages || messages.length === 0) {
return {
messages
};
}
// Save the system messages
const [systemMessages, otherMessages]: [
ChatCompletionMessageParam[],
ChatCompletionMessageParam[]
] = [[], []];
messages.forEach((message) => {
if (message.role === ChatCompletionRequestMessageRoleEnum.System) {
systemMessages.push(message);
} else {
otherMessages.push(message);
}
});
const messageTokens = await countGptMessagesTokens(otherMessages);
const thresholds = calculateCompressionThresholds(model.maxContext).messages;
if (messageTokens < thresholds.threshold) {
return {
messages
};
}
addLog.info('[Compression messages] Start', {
tokens: messageTokens
});
const compressPrompt = await getCompressRequestMessagesPrompt({
messages: otherMessages,
rawTokens: messageTokens,
model
});
const userPrompt = '请执行压缩操作严格按照JSON格式返回结果。';
try {
const { answerText, usage } = await createLLMResponse({
body: {
model,
messages: [
{
role: ChatCompletionRequestMessageRoleEnum.System,
content: compressPrompt
},
{
role: ChatCompletionRequestMessageRoleEnum.User,
content: userPrompt
}
],
temperature: 0.1,
stream: true
}
});
if (!answerText) {
addLog.warn('[Compression messages] failed: empty response, return original messages');
return { messages };
}
const { totalPoints, modelName } = formatModelChars2Points({
model: model.model,
inputTokens: usage.inputTokens,
outputTokens: usage.outputTokens
});
const compressedUsage = {
moduleName: i18nT('account_usage:compress_llm_messages'),
model: modelName,
totalPoints,
inputTokens: usage.inputTokens,
outputTokens: usage.outputTokens
};
const compressResult = parseToolArgs<{
compressed_messages: ChatCompletionMessageParam[];
compression_summary: string;
}>(answerText);
if (
!compressResult ||
!Array.isArray(compressResult) ||
compressResult.compressed_messages.length === 0
) {
addLog.warn('[Compression messages] failed: cannot parse JSON, return original messages', {
messages: compressResult?.compressed_messages
});
return { messages, usage: compressedUsage };
}
const compressedTokens = usage.outputTokens;
addLog.info('[Compression messages] successfully', {
originalTokens: messageTokens,
compressedTokens,
actualRatio: (compressedTokens / messageTokens).toFixed(2),
summary: compressResult.compression_summary
});
// 如果之前提取了 system 消息,现在插回去
const finalMessages = [...systemMessages, ...compressResult.compressed_messages];
return {
messages: finalMessages,
usage: compressedUsage
};
} catch (error) {
addLog.error('[Compression messages] failed', error);
return { messages };
}
};

View File

@ -0,0 +1,296 @@
import type { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
import type { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type';
import { calculateCompressionThresholds } from './constants';
export const getCompressRequestMessagesPrompt = async ({
rawTokens,
messages,
model
}: {
messages: ChatCompletionMessageParam[];
rawTokens: number;
model: LLMModelItemType;
}) => {
const thresholds = calculateCompressionThresholds(model.maxContext);
const targetTokens = Math.round(rawTokens * thresholds.messages.targetRatio);
return `你是 Agent 对话历史压缩专家。你的任务是将对话历史压缩到目标 token 数,同时确保对话逻辑连贯性和工具调用的 ID 映射关系完全正确。
##
###
****
****
-
-
-
- tool_call 使
****
- tool_call
- content
-
-
-
**** ID
---
##
- ** token **: ${rawTokens} tokens
- ** token **: ${targetTokens} tokens (压缩比例: ${Math.round(thresholds.messages.targetRatio * 100)}%)
- ****: JSON ${targetTokens} tokens
---
##
###
1. ** ID **
- assistant tool_calls tool_call id
- tool tool_call_id
-
\`\`\`
call_abc123 tool #5
call_def456 tool #7
\`\`\`
2. ****
** 1**
- **[]**:
- **[]**:
- **[]**:
- **[]**:
** 2**
- **[]**:
- **[]**:
- **[]**:
- **[]**:
** 3**
- **[]**:
- **[]**: /
** 4**
- **[]**: tool
- **[]**:
- **[]**:
- **[]**: content null "无结果""未找到"
****
tool
- content "失败""错误""Error""Exception""超时""Timeout"
- content null"无结果""未找到""No results"
- assistant
-
- **** tool_call
3. ****
- **tool_call **
- **** 70-90%
- **** + 40-60%
- **** + + 10-20%
---
###
****
1. ****:
- ****assistant tool_calls tool ****
- **** assistant tool_call tool
- ****
- tool_call tool
- tool tool_call
- tool_call id tool tool_call_id
- tool_call id id tool
2. **ID **: tool_call id tool_call_id
3. ****: tool_call \`id\`, \`type\`, \`function\` 字段
4. ****: assistant tool_calls tool
5. ****:
6. ** content**:
- tool content
- user/assistant
- tool_call_id
****
- ****
- ****
- ****
- "核心发现A、B、C"
- "报错:具体错误"
- "已完成操作X"
---
###
1. **ID **
- assistant tool_calls[i].id tool
- tool tool_call_id assistant
- ID ID
2. ****
-
-
-
3. ****
- JSON ${targetTokens} tokens
- content
4. ****
- tool_call \`id\`, \`type\`, \`function\` 字段?
- JSON
---
##
JSON 使 \`\`\`json 代码块):
\`\`\`json
{
"compressed_messages": [
{"role": "user", "content": "用户请求(精简表达)"},
{
"role": "assistant",
"content": "分析说明(精简但保留逻辑)",
"tool_calls": [
{
"id": "call_原始ID",
"type": "function",
"function": {
"name": "工具名",
"arguments": "{\\"param\\":\\"\\"}"
}
}
]
},
{
"role": "tool",
"tool_call_id": "call_原始ID",
"content": "工具返回的核心结果(已大幅精简,只保留关键信息)"
},
{"role": "assistant", "content": "基于工具结果的结论(精简表达)"}
],
"compression_summary": "原始${rawTokens}tokens → 约X tokens (压缩比例Y%)。操作删除了Z条低价值消息精简了N个工具响应M条用户/助手消息。对话逻辑保持完整ID映射关系已验证正确。"
}
\`\`\`
---
##
** 1**
500 tokens
\`\`\`json
[
{"role": "user", "content": "你好,我想了解一下 Python 性能优化的相关技术和最佳实践,能帮我搜索一些资料吗?"},
{"role": "assistant", "content": "当然可以!我会帮您搜索 Python 性能优化相关的资料。让我先搜索相关文章和教程。"},
{"role": "assistant", "tool_calls": [{"id": "call_abc", "type": "function", "function": {"name": "search", "arguments": "{\\"query\\":\\"Python性能优化完整指南\\",\\"max_results\\":10}"}}]},
{"role": "tool", "tool_call_id": "call_abc", "content": "找到10篇文章\\n1. 标题Python性能优化完整指南\\n 作者:张三\\n 发布时间2024-01-15\\n 摘要本文详细介绍了Python性能优化的各种技巧包括使用Cython进行编译优化NumPy向量化计算以及内存优化技术...此处省略400字详细内容\\n URL: https://example.com/article1\\n\\n2. 标题高性能Python编程实战\\n 作者:李四\\n ..."},
{"role": "assistant", "content": "根据搜索结果我为您总结了Python性能优化的主要技术..."}
]
\`\`\`
200 tokens
\`\`\`json
[
{"role": "user", "content": "我想了解 Python 性能优化的相关技术和最佳实践"},
{"role": "assistant", "tool_calls": [{"id": "call_abc", "type": "function", "function": {"name": "search", "arguments": "{\\"query\\":\\"Python性能优化完整指南\\",\\"max_results\\":10}"}}]},
{"role": "tool", "tool_call_id": "call_abc", "content": "找到10篇文章\\n1. 标题Python性能优化完整指南\\n 摘要使用Cython进行编译优化NumPy向量化计算以及内存优化技术"},
{"role": "assistant", "content": "根据搜索结果我为您总结了Python性能优化的主要技术"}
]
\`\`\`
****"你好""能帮我搜索""作者""发布时间"
** 2**
600 tokens
\`\`\`json
[
{"role": "user", "content": "搜索北京的五星级酒店"},
{"role": "assistant", "tool_calls": [{"id": "call_fail1", "type": "function", "function": {"name": "search", "arguments": "{\\"query\\":\\"\\",\\"location\\":\\"Beijing\\"}"}}]},
{"role": "tool", "tool_call_id": "call_fail1", "content": "Error: 网络超时,请重试"},
{"role": "assistant", "content": "搜索遇到网络问题,让我重试"},
{"role": "assistant", "tool_calls": [{"id": "call_fail2", "type": "function", "function": {"name": "search", "arguments": "{\\"query\\":\\"\\"}"}}]},
{"role": "tool", "tool_call_id": "call_fail2", "content": "未找到相关结果"},
{"role": "assistant", "content": "没找到结果,我换个搜索方式"},
{"role": "assistant", "tool_calls": [{"id": "call_ok", "type": "function", "function": {"name": "search", "arguments": "{\\"query\\":\\"\\"}"}}]},
{"role": "tool", "tool_call_id": "call_ok", "content": "找到5家酒店1. 北京王府半岛酒店 2. 北京四季酒店..."},
{"role": "assistant", "content": "为您找到了5家五星级酒店推荐"}
]
\`\`\`
120 tokens
\`\`\`json
[
{"role": "user", "content": "搜索北京的五星级酒店"},
{"role": "assistant", "tool_calls": [{"id": "call_ok", "type": "function", "function": {"name": "search", "arguments": "{\\"query\\":\\"\\"}"}}]},
{"role": "tool", "tool_call_id": "call_ok", "content": "找到5家酒店1. 北京王府半岛酒店 2. 北京四季酒店..."},
{"role": "assistant", "content": "为您找到5家五星级酒店"}
]
\`\`\`
** 3**
400 tokens
\`\`\`json
[
{"role": "user", "content": "帮我创建一个新文件"},
{"role": "assistant", "content": "好的,我需要知道文件名和内容。请问文件名是什么?"},
{"role": "user", "content": "文件名叫 test.txt"},
{"role": "assistant", "content": "明白了,文件名是 test.txt。那么您想在文件中写入什么内容呢"},
{"role": "user", "content": "写入 'Hello World'"},
{"role": "assistant", "content": "收到!我现在帮您创建文件 test.txt并写入内容 'Hello World'"},
{"role": "assistant", "tool_calls": [{"id": "call_xyz", "type": "function", "function": {"name": "write_file", "arguments": "{\\"path\\":\\"test.txt\\",\\"content\\":\\"Hello World\\"}"}}]},
{"role": "tool", "tool_call_id": "call_xyz", "content": "文件创建成功。文件路径:/workspace/test.txt。文件大小11 bytes。创建时间2024-01-15 10:30:00"},
{"role": "assistant", "content": "太好了!文件 test.txt 已经成功创建,内容为 'Hello World'。"}
]
\`\`\`
150 tokens
\`\`\`json
[
{"role": "user", "content": "帮我创建一个新文件"},
{"role": "user", "content": "文件名叫 test.txt"},
{"role": "user", "content": "写入 'Hello World'"},
{"role": "assistant", "tool_calls": [{"id": "call_xyz", "type": "function", "function": {"name": "write_file", "arguments": "{\\"path\\":\\"test.txt\\",\\"content\\":\\"Hello World\\"}"}}]},
{"role": "tool", "tool_call_id": "call_xyz", "content": "文件创建成功。文件路径:/workspace/test.txt。文件大小11 bytes"},
{"role": "assistant", "content": "文件 test.txt 已经成功创建,内容为 'Hello World'"}
]
\`\`\`
**** assistant user
---
##
${JSON.stringify(messages, null, 2)}
---
ID token `;
};

View File

@ -15,7 +15,7 @@ import { removeDatasetCiteText } from '@fastgpt/global/core/ai/llm/utils';
import { getAIApi } from '../config';
import type { OpenaiAccountType } from '@fastgpt/global/support/user/team/type';
import { getNanoid } from '@fastgpt/global/common/string/tools';
import { parsePromptToolCall, promptToolCallMessageRewrite } from './promptToolCall';
import { parsePromptToolCall, promptToolCallMessageRewrite } from './promptCall';
import { getLLMModel } from '../model';
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
import { countGptMessagesTokens } from '../../../common/string/tiktoken/index';
@ -26,14 +26,14 @@ import { i18nT } from '../../../../web/i18n/utils';
import { getErrText } from '@fastgpt/global/common/error/utils';
import json5 from 'json5';
type ResponseEvents = {
export type ResponseEvents = {
onStreaming?: ({ text }: { text: string }) => void;
onReasoning?: ({ text }: { text: string }) => void;
onToolCall?: ({ call }: { call: ChatCompletionMessageToolCall }) => void;
onToolParam?: ({ tool, params }: { tool: ChatCompletionMessageToolCall; params: string }) => void;
};
type CreateLLMResponseProps<T extends CompletionsBodyType> = {
export type CreateLLMResponseProps<T extends CompletionsBodyType = CompletionsBodyType> = {
userKey?: OpenaiAccountType;
body: LLMRequestBodyType<T>;
isAborted?: () => boolean | undefined;
@ -86,7 +86,7 @@ export const createLLMResponse = async <T extends CompletionsBodyType>(
messages: rewriteMessages
});
// console.log(JSON.stringify(requestBody, null, 2));
// console.dir(requestBody, { depth: null });
const { response, isStreamResponse, getEmptyResponseTip } = await createChatCompletion({
body: requestBody,
userKey,

View File

@ -2,6 +2,8 @@ import { type LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
import type { CompletionFinishReason, CompletionUsage } from '@fastgpt/global/core/ai/type';
import { getLLMDefaultUsage } from '@fastgpt/global/core/ai/constants';
import { removeDatasetCiteText } from '@fastgpt/global/core/ai/llm/utils';
import json5 from 'json5';
import { sliceJsonStr } from '@fastgpt/global/common/string/tools';
/*
Count response max token
@ -317,3 +319,11 @@ export const parseLLMStreamResponse = () => {
updateFinishReason
};
};
export const parseToolArgs = <T = Record<string, any>>(toolArgs: string) => {
try {
return json5.parse(sliceJsonStr(toolArgs)) as T;
} catch {
return;
}
};

View File

@ -3,6 +3,7 @@ import { AppToolSourceEnum } from '@fastgpt/global/core/app/tool/constants';
import { type AppTemplateSchemaType } from '@fastgpt/global/core/app/type';
import { MongoAppTemplate } from './templateSchema';
import { pluginClient } from '../../../thirdProvider/fastgptPlugin';
import { addMinutes } from 'date-fns';
const getFileTemplates = async (): Promise<AppTemplateSchemaType[]> => {
const res = await pluginClient.workflow.getTemplateList();
@ -11,9 +12,15 @@ const getFileTemplates = async (): Promise<AppTemplateSchemaType[]> => {
};
const getAppTemplates = async () => {
const communityTemplates = await getFileTemplates();
const originCommunityTemplates = await getFileTemplates();
const communityTemplates = originCommunityTemplates.map((template) => {
return {
...template,
templateId: `${AppToolSourceEnum.community}-${template.templateId.split('.')[0]}`
};
});
const dbTemplates = await MongoAppTemplate.find();
const dbTemplates = await MongoAppTemplate.find().lean();
// Merge db data to community templates
const communityTemplateConfig = communityTemplates.map((template) => {
@ -22,17 +29,12 @@ const getAppTemplates = async () => {
if (config) {
return {
...template,
isActive: config.isActive ?? template.isActive,
tags: config.tags ?? template.tags,
userGuide: config.userGuide ?? template.userGuide,
isQuickTemplate: config.isQuickTemplate ?? template.isQuickTemplate,
order: config.order ?? template.order
...config
};
}
return template;
});
const res = [
...communityTemplateConfig,
...dbTemplates.filter((t) => isCommercialTemaplte(t.templateId))
@ -42,20 +44,31 @@ const getAppTemplates = async () => {
};
export const getAppTemplatesAndLoadThem = async (refresh = false) => {
if (isProduction && global.appTemplates && global.appTemplates.length > 0 && !refresh)
return global.appTemplates;
// 首次强制刷新
if (!global.templatesRefreshTime) {
global.templatesRefreshTime = Date.now() - 10000;
}
if (!global.appTemplates) {
global.appTemplates = [];
}
if (
isProduction &&
// 有模板缓存
global.appTemplates.length > 0 &&
// 缓存时间未过期
global.templatesRefreshTime > Date.now() &&
!refresh
) {
return global.appTemplates;
}
try {
const appTemplates = await getAppTemplates();
global.appTemplates = appTemplates;
global.templatesRefreshTime = addMinutes(new Date(), 30).getTime(); // 缓存30分钟
return appTemplates;
} catch (error) {
// @ts-ignore
global.appTemplates = undefined;
return [];
}
};
@ -66,4 +79,5 @@ export const isCommercialTemaplte = (templateId: string) => {
declare global {
var appTemplates: AppTemplateSchemaType[];
var templatesRefreshTime: number;
}

View File

@ -19,6 +19,8 @@ const AppTemplateSchema = new Schema({
},
type: String,
isActive: Boolean,
isPromoted: Boolean,
recommendText: String,
userGuide: Object,
isQuickTemplate: Boolean,
order: {

View File

@ -178,12 +178,11 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
const {
toolWorkflowInteractiveResponse,
dispatchFlowResponse, // tool flow response
toolDispatchFlowResponses, // tool flow response
toolCallInputTokens,
toolCallOutputTokens,
completeMessages = [], // The actual message sent to AI(just save text)
assistantResponses = [], // FastGPT system store assistant.value response
runTimes,
finish_reason
} = await (async () => {
const adaptMessages = chats2GPTMessages({
@ -191,22 +190,20 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
reserveId: false
// reserveTool: !!toolModel.toolChoice
});
const requestParams = {
return runToolCall({
...props,
runtimeNodes,
runtimeEdges,
toolNodes,
toolModel,
messages: adaptMessages,
interactiveEntryToolParams: lastInteractive?.toolParams
};
return runToolCall({
...props,
...requestParams,
maxRunToolTimes: 100
childrenInteractiveParams:
lastInteractive?.type === 'toolChildrenInteractive' ? lastInteractive.params : undefined
});
})();
// Usage computed
const { totalPoints: modelTotalPoints, modelName } = formatModelChars2Points({
model,
inputTokens: toolCallInputTokens,
@ -214,12 +211,13 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
});
const modelUsage = externalProvider.openaiAccount?.key ? 0 : modelTotalPoints;
const toolUsages = dispatchFlowResponse.map((item) => item.flowUsages).flat();
const toolUsages = toolDispatchFlowResponses.map((item) => item.flowUsages).flat();
const toolTotalPoints = toolUsages.reduce((sum, item) => sum + item.totalPoints, 0);
// concat tool usage
const totalPointsUsage = modelUsage + toolTotalPoints;
// Preview assistant responses
const previewAssistantResponses = filterToolResponseToPreview(assistantResponses);
return {
@ -229,7 +227,10 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
.map((item) => item.text?.content || '')
.join('')
},
[DispatchNodeResponseKeyEnum.runTimes]: runTimes,
[DispatchNodeResponseKeyEnum.runTimes]: toolDispatchFlowResponses.reduce(
(sum, item) => sum + item.runTimes,
0
),
[DispatchNodeResponseKeyEnum.assistantResponses]: previewAssistantResponses,
[DispatchNodeResponseKeyEnum.nodeResponse]: {
// 展示的积分消耗
@ -244,7 +245,7 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
10000,
useVision
),
toolDetail: dispatchFlowResponse.map((item) => item.flowResponses).flat(),
toolDetail: toolDispatchFlowResponses.map((item) => item.flowResponses).flat(),
mergeSignId: nodeId,
finishReason: finish_reason
},

View File

@ -1,85 +1,22 @@
import { filterGPTMessageByMaxContext } from '../../../../ai/llm/utils';
import type {
ChatCompletionToolMessageParam,
ChatCompletionMessageParam,
ChatCompletionTool
} from '@fastgpt/global/core/ai/type';
import type { ChatCompletionTool } from '@fastgpt/global/core/ai/type';
import { responseWriteController } from '../../../../../common/response';
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import { textAdaptGptResponse } from '@fastgpt/global/core/workflow/runtime/utils';
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
import { runWorkflow } from '../../index';
import type { DispatchToolModuleProps, RunToolResponse, ToolNodeItemType } from './type';
import json5 from 'json5';
import type { DispatchFlowResponse } from '../../type';
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
import type { AIChatItemType } from '@fastgpt/global/core/chat/type';
import { chats2GPTMessages, GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
import type { AIChatItemValueItemType } from '@fastgpt/global/core/chat/type';
import { formatToolResponse, initToolCallEdges, initToolNodes } from './utils';
import { computedMaxToken } from '../../../../ai/utils';
import { parseToolArgs } from '../../../../ai/utils';
import { sliceStrStartEnd } from '@fastgpt/global/common/string/tools';
import type { WorkflowInteractiveResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
import { ChatItemValueTypeEnum } from '@fastgpt/global/core/chat/constants';
import { getErrText } from '@fastgpt/global/common/error/utils';
import { createLLMResponse } from '../../../../ai/llm/request';
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import { toolValueTypeList, valueTypeJsonSchemaMap } from '@fastgpt/global/core/workflow/constants';
import { runAgentCall } from '../../../../ai/llm/agentCall';
type ToolRunResponseType = {
toolRunResponse?: DispatchFlowResponse;
toolMsgParams: ChatCompletionToolMessageParam;
}[];
/*
Check
1.
2.
3.
-
-
1. tools
2. messages
3. Load request llm messages: system prompt, histories, human question, assistant responses, tool responses, assistant responses....)
4. LLM
-
1.
2.
3. assistants tool
4. request llm response messages tokens
5. requestllm response tool response
6. assistant responses: history assistant + tool assistant + tool child assistant
7.
-
-
-
1.
2. completeMessages tokens
1. id
2. toolCallId: 本次工具调用的 ID id
3. messagesassistants responses tool responses
*/
export const runToolCall = async (
props: DispatchToolModuleProps & {
maxRunToolTimes: number;
},
response?: RunToolResponse
): Promise<RunToolResponse> => {
export const runToolCall = async (props: DispatchToolModuleProps): Promise<RunToolResponse> => {
const { messages, toolNodes, toolModel, childrenInteractiveParams, ...workflowProps } = props;
const {
messages,
toolNodes,
toolModel,
maxRunToolTimes,
interactiveEntryToolParams,
...workflowProps
} = props;
let {
res,
requestOrigin,
runtimeNodes,
@ -100,101 +37,7 @@ export const runToolCall = async (
}
} = workflowProps;
if (maxRunToolTimes <= 0 && response) {
return response;
}
// Interactive
if (interactiveEntryToolParams) {
initToolNodes(runtimeNodes, interactiveEntryToolParams.entryNodeIds);
initToolCallEdges(runtimeEdges, interactiveEntryToolParams.entryNodeIds);
// Run entry tool
const toolRunResponse = await runWorkflow({
...workflowProps,
usageId: undefined,
isToolCall: true
});
const stringToolResponse = formatToolResponse(toolRunResponse.toolResponses);
// Response to frontend
workflowStreamResponse?.({
event: SseResponseEventEnum.toolResponse,
data: {
tool: {
id: interactiveEntryToolParams.toolCallId,
toolName: '',
toolAvatar: '',
params: '',
response: sliceStrStartEnd(stringToolResponse, 5000, 5000)
}
}
});
// Check stop signal
const hasStopSignal = toolRunResponse.flowResponses?.some((item) => item.toolStop);
// Check interactive response(Only 1 interaction is reserved)
const workflowInteractiveResponse = toolRunResponse.workflowInteractiveResponse;
const requestMessages = [
...messages,
...interactiveEntryToolParams.memoryMessages.map((item) =>
item.role === 'tool' && item.tool_call_id === interactiveEntryToolParams.toolCallId
? {
...item,
content: stringToolResponse
}
: item
)
];
if (hasStopSignal || workflowInteractiveResponse) {
// Get interactive tool data
const toolWorkflowInteractiveResponse: WorkflowInteractiveResponseType | undefined =
workflowInteractiveResponse
? {
...workflowInteractiveResponse,
toolParams: {
entryNodeIds: workflowInteractiveResponse.entryNodeIds,
toolCallId: interactiveEntryToolParams.toolCallId,
memoryMessages: interactiveEntryToolParams.memoryMessages
}
}
: undefined;
return {
dispatchFlowResponse: [toolRunResponse],
toolCallInputTokens: 0,
toolCallOutputTokens: 0,
completeMessages: requestMessages,
assistantResponses: toolRunResponse.assistantResponses,
runTimes: toolRunResponse.runTimes,
toolWorkflowInteractiveResponse
};
}
return runToolCall(
{
...props,
interactiveEntryToolParams: undefined,
maxRunToolTimes: maxRunToolTimes - 1,
// Rewrite toolCall messages
messages: requestMessages
},
{
dispatchFlowResponse: [toolRunResponse],
toolCallInputTokens: 0,
toolCallOutputTokens: 0,
assistantResponses: toolRunResponse.assistantResponses,
runTimes: toolRunResponse.runTimes
}
);
}
// ------------------------------------------------------------
const assistantResponses = response?.assistantResponses || [];
// 构建 tools 参数
const toolNodesMap = new Map<string, ToolNodeItemType>();
const tools: ChatCompletionTool[] = toolNodes.map((item) => {
toolNodesMap.set(item.nodeId, item);
@ -246,64 +89,44 @@ export const runToolCall = async (
}
};
});
const getToolInfo = (name: string) => {
const toolNode = toolNodesMap.get(name);
return {
name: toolNode?.name || '',
avatar: toolNode?.avatar || ''
};
};
const max_tokens = computedMaxToken({
model: toolModel,
maxToken,
min: 100
});
// Filter histories by maxToken
const filterMessages = (
await filterGPTMessageByMaxContext({
messages,
maxContext: toolModel.maxContext - (max_tokens || 0) // filter token. not response maxToken
})
).map((item) => {
if (item.role === 'assistant' && item.tool_calls) {
return {
...item,
tool_calls: item.tool_calls.map((tool) => ({
id: tool.id,
type: tool.type,
function: tool.function
}))
};
}
return item;
});
// SSE 响应实例
const write = res ? responseWriteController({ res, readStream: stream }) : undefined;
// 工具响应原始值
const toolRunResponses: DispatchFlowResponse[] = [];
let {
reasoningText: reasoningContent,
answerText: answer,
toolCalls = [],
finish_reason,
usage,
getEmptyResponseTip,
assistantMessage,
completeMessages
} = await createLLMResponse({
const {
inputTokens,
outputTokens,
completeMessages,
assistantMessages,
interactiveResponse,
finish_reason
} = await runAgentCall({
maxRunAgentTimes: 50,
body: {
model: toolModel.model,
stream,
messages: filterMessages,
tool_choice: 'auto',
toolCallMode: toolModel.toolChoice ? 'toolChoice' : 'prompt',
messages,
tools,
parallel_tool_calls: true,
model: toolModel.model,
max_tokens: maxToken,
stream,
temperature,
max_tokens,
top_p: aiChatTopP,
stop: aiChatStopSign,
response_format: {
type: aiChatResponseFormat as any,
type: aiChatResponseFormat,
json_schema: aiChatJsonSchema
},
requestOrigin,
retainDatasetCite,
useVision: aiChatVision,
requestOrigin
useVision: aiChatVision
},
isAborted: () => res?.closed,
userKey: externalProvider.openaiAccount,
@ -358,52 +181,39 @@ export const runToolCall = async (
}
}
});
}
});
},
handleToolResponse: async ({ call, messages }) => {
const toolNode = toolNodesMap.get(call.function?.name);
if (!answer && !reasoningContent && !toolCalls.length) {
return Promise.reject(getEmptyResponseTip());
}
/* Run the selected tool by LLM.
Since only reference parameters are passed, if the same tool is run in parallel, it will get the same run parameters
*/
const toolsRunResponse: ToolRunResponseType = [];
for await (const tool of toolCalls) {
try {
const toolNode = toolNodesMap.get(tool.function?.name);
if (!toolNode) continue;
const startParams = (() => {
try {
return json5.parse(tool.function.arguments);
} catch (error) {
return {};
}
})();
if (!toolNode) {
return {
response: 'Call tool not found',
assistantMessages: [],
usages: [],
interactive: undefined
};
}
// Init tool params and run
const startParams = parseToolArgs(call.function.arguments);
initToolNodes(runtimeNodes, [toolNode.nodeId], startParams);
initToolCallEdges(runtimeEdges, [toolNode.nodeId]);
const toolRunResponse = await runWorkflow({
...workflowProps,
runtimeNodes,
usageId: undefined,
isToolCall: true
});
// Format tool response
const stringToolResponse = formatToolResponse(toolRunResponse.toolResponses);
const toolMsgParams: ChatCompletionToolMessageParam = {
tool_call_id: tool.id,
role: ChatCompletionRequestMessageRoleEnum.Tool,
name: tool.function.name,
content: stringToolResponse
};
workflowStreamResponse?.({
event: SseResponseEventEnum.toolResponse,
data: {
tool: {
id: tool.id,
id: call.id,
toolName: '',
toolAvatar: '',
params: '',
@ -412,166 +222,91 @@ export const runToolCall = async (
}
});
toolsRunResponse.push({
toolRunResponse,
toolMsgParams
toolRunResponses.push(toolRunResponse);
const assistantMessages = chats2GPTMessages({
messages: [
{
obj: ChatRoleEnum.AI,
value: toolRunResponse.assistantResponses
}
],
reserveId: false
});
} catch (error) {
const err = getErrText(error);
return {
response: stringToolResponse,
assistantMessages,
usages: toolRunResponse.flowUsages,
interactive: toolRunResponse.workflowInteractiveResponse,
stop: toolRunResponse.flowResponses?.some((item) => item.toolStop)
};
},
childrenInteractiveParams,
handleInteractiveTool: async ({ childrenResponse, toolParams }) => {
initToolNodes(runtimeNodes, childrenResponse.entryNodeIds);
initToolCallEdges(runtimeEdges, childrenResponse.entryNodeIds);
const toolRunResponse = await runWorkflow({
...workflowProps,
lastInteractive: childrenResponse,
runtimeNodes,
runtimeEdges,
usageId: undefined,
isToolCall: true
});
// console.dir(runtimeEdges, { depth: null });
const stringToolResponse = formatToolResponse(toolRunResponse.toolResponses);
workflowStreamResponse?.({
event: SseResponseEventEnum.toolResponse,
data: {
tool: {
id: tool.id,
id: toolParams.toolCallId,
toolName: '',
toolAvatar: '',
params: '',
response: sliceStrStartEnd(err, 5000, 5000)
response: sliceStrStartEnd(stringToolResponse, 5000, 5000)
}
}
});
toolsRunResponse.push({
toolRunResponse: undefined,
toolMsgParams: {
tool_call_id: tool.id,
role: ChatCompletionRequestMessageRoleEnum.Tool,
name: tool.function.name,
content: sliceStrStartEnd(err, 5000, 5000)
}
toolRunResponses.push(toolRunResponse);
const assistantMessages = chats2GPTMessages({
messages: [
{
obj: ChatRoleEnum.AI,
value: toolRunResponse.assistantResponses
}
],
reserveId: false
});
}
}
const flatToolsResponseData = toolsRunResponse
.map((item) => item.toolRunResponse)
.flat()
.filter(Boolean) as DispatchFlowResponse[];
// concat tool responses
const dispatchFlowResponse = response
? response.dispatchFlowResponse.concat(flatToolsResponseData)
: flatToolsResponseData;
const inputTokens = response
? response.toolCallInputTokens + usage.inputTokens
: usage.inputTokens;
const outputTokens = response
? response.toolCallOutputTokens + usage.outputTokens
: usage.outputTokens;
if (toolCalls.length > 0) {
/*
...
user
assistant: tool data
tool: tool response
*/
const nextRequestMessages: ChatCompletionMessageParam[] = [
...completeMessages,
...toolsRunResponse.map((item) => item?.toolMsgParams)
];
/*
Get tool node assistant response
- history assistant
- current tool assistant
- tool child assistant
*/
const toolNodeAssistant = GPTMessages2Chats({
messages: [...assistantMessage, ...toolsRunResponse.map((item) => item?.toolMsgParams)],
getToolInfo: (id) => {
const toolNode = toolNodesMap.get(id);
return {
name: toolNode?.name || '',
avatar: toolNode?.avatar || ''
};
}
})[0] as AIChatItemType;
const toolChildAssistants = flatToolsResponseData
.map((item) => item.assistantResponses)
.flat()
.filter((item) => item.type !== ChatItemValueTypeEnum.interactive); // 交互节点留着下次记录
const concatAssistantResponses = [
...assistantResponses,
...toolNodeAssistant.value,
...toolChildAssistants
];
const runTimes =
(response?.runTimes || 0) +
flatToolsResponseData.reduce((sum, item) => sum + item.runTimes, 0);
// Check stop signal
const hasStopSignal = flatToolsResponseData.some(
(item) => !!item.flowResponses?.find((item) => item.toolStop)
);
// Check interactive response(Only 1 interaction is reserved)
const workflowInteractiveResponseItem = toolsRunResponse.find(
(item) => item.toolRunResponse?.workflowInteractiveResponse
);
if (hasStopSignal || workflowInteractiveResponseItem) {
// Get interactive tool data
const workflowInteractiveResponse =
workflowInteractiveResponseItem?.toolRunResponse?.workflowInteractiveResponse;
// Flashback traverses completeMessages, intercepting messages that know the first user
const firstUserIndex = nextRequestMessages.findLastIndex((item) => item.role === 'user');
const newMessages = nextRequestMessages.slice(firstUserIndex + 1);
const toolWorkflowInteractiveResponse: WorkflowInteractiveResponseType | undefined =
workflowInteractiveResponse
? {
...workflowInteractiveResponse,
toolParams: {
entryNodeIds: workflowInteractiveResponse.entryNodeIds,
toolCallId: workflowInteractiveResponseItem?.toolMsgParams.tool_call_id,
memoryMessages: newMessages
}
}
: undefined;
return {
dispatchFlowResponse,
toolCallInputTokens: inputTokens,
toolCallOutputTokens: outputTokens,
completeMessages: nextRequestMessages,
assistantResponses: concatAssistantResponses,
toolWorkflowInteractiveResponse,
runTimes,
finish_reason
response: stringToolResponse,
assistantMessages,
usages: toolRunResponse.flowUsages,
interactive: toolRunResponse.workflowInteractiveResponse,
stop: toolRunResponse.flowResponses?.some((item) => item.toolStop)
};
}
});
return runToolCall(
{
...props,
maxRunToolTimes: maxRunToolTimes - 1,
messages: nextRequestMessages
},
{
dispatchFlowResponse,
toolCallInputTokens: inputTokens,
toolCallOutputTokens: outputTokens,
assistantResponses: concatAssistantResponses,
runTimes,
finish_reason
}
);
} else {
// concat tool assistant
const toolNodeAssistant = GPTMessages2Chats({
messages: assistantMessage
})[0] as AIChatItemType;
const assistantResponses = GPTMessages2Chats({
messages: assistantMessages,
reserveTool: true,
getToolInfo
})
.map((item) => item.value as AIChatItemValueItemType[])
.flat();
return {
dispatchFlowResponse: response?.dispatchFlowResponse || [],
toolCallInputTokens: inputTokens,
toolCallOutputTokens: outputTokens,
completeMessages,
assistantResponses: [...assistantResponses, ...toolNodeAssistant.value],
runTimes: (response?.runTimes || 0) + 1,
finish_reason
};
}
return {
toolDispatchFlowResponses: toolRunResponses,
toolCallInputTokens: inputTokens,
toolCallOutputTokens: outputTokens,
completeMessages,
assistantResponses,
finish_reason,
toolWorkflowInteractiveResponse: interactiveResponse
};
};

View File

@ -14,7 +14,11 @@ import type { DispatchFlowResponse } from '../../type';
import type { AIChatItemValueItemType } from '@fastgpt/global/core/chat/type';
import { ChatItemValueItemType } from '@fastgpt/global/core/chat/type';
import type { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import type { WorkflowInteractiveResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
import type {
ToolCallChildrenInteractive,
InteractiveNodeResponseType,
WorkflowInteractiveResponseType
} from '@fastgpt/global/core/workflow/template/system/interactive/type';
import type { LLMModelItemType } from '@fastgpt/global/core/ai/model';
import type { JSONSchemaInputType } from '@fastgpt/global/core/app/jsonschema';
@ -37,18 +41,17 @@ export type DispatchToolModuleProps = ModuleDispatchProps<{
messages: ChatCompletionMessageParam[];
toolNodes: ToolNodeItemType[];
toolModel: LLMModelItemType;
interactiveEntryToolParams?: WorkflowInteractiveResponseType['toolParams'];
childrenInteractiveParams?: ToolCallChildrenInteractive['params'];
};
export type RunToolResponse = {
dispatchFlowResponse: DispatchFlowResponse[];
toolDispatchFlowResponses: DispatchFlowResponse[];
toolCallInputTokens: number;
toolCallOutputTokens: number;
completeMessages?: ChatCompletionMessageParam[];
assistantResponses?: AIChatItemValueItemType[];
toolWorkflowInteractiveResponse?: WorkflowInteractiveResponseType;
[DispatchNodeResponseKeyEnum.runTimes]: number;
finish_reason?: CompletionFinishReason;
completeMessages: ChatCompletionMessageParam[];
assistantResponses: AIChatItemValueItemType[];
finish_reason: CompletionFinishReason;
toolWorkflowInteractiveResponse?: ToolCallChildrenInteractive;
};
export type ToolNodeItemType = RuntimeNodeItemType & {
toolParams: RuntimeNodeItemType['inputs'];

View File

@ -62,12 +62,9 @@ export const initToolNodes = (
nodes.forEach((node) => {
if (entryNodeIds.includes(node.nodeId)) {
node.isEntry = true;
node.isStart = true;
if (startParams) {
node.inputs = updateToolInputValue({ params: startParams, inputs: node.inputs });
}
} else {
node.isStart = false;
}
});
};

View File

@ -747,6 +747,7 @@ export const runWorkflow = async (data: RunWorkflowProps): Promise<DispatchFlowR
// Get next source edges and update status
const skipHandleId = result[DispatchNodeResponseKeyEnum.skipHandleId] || [];
const targetEdges = filterWorkflowEdges(runtimeEdges).filter(
(item) => item.source === node.nodeId
);
@ -957,6 +958,7 @@ export const runWorkflow = async (data: RunWorkflowProps): Promise<DispatchFlowR
entryNodeIds,
memoryEdges: runtimeEdges.map((edge) => ({
...edge,
// 入口前面的边全部激活,保证下次进来一定能执行。
status: entryNodeIds.includes(edge.target) ? 'active' : edge.status
})),
nodeOutputs,

View File

@ -36,6 +36,7 @@ export const dispatchAnswer = (props: Record<string, any>): AnswerResponse => {
[DispatchNodeResponseKeyEnum.answerText]: responseText,
[DispatchNodeResponseKeyEnum.nodeResponse]: {
textOutput: formatText
}
},
[DispatchNodeResponseKeyEnum.toolResponses]: responseText
};
};

View File

@ -4,6 +4,7 @@
"app_name": "Application name",
"auto_index": "Auto index",
"billing_module": "Deduction module",
"compress_llm_messages": "AI history compression",
"confirm_export": "A total of {{total}} pieces of data were filtered out. Are you sure to export?",
"count": "Number of runs",
"current_filter_conditions": "Current filter conditions",

View File

@ -319,13 +319,14 @@
"template.hard_strict_des": "Based on the question and answer template, stricter requirements are imposed on the model's answers.",
"template.qa_template": "Q&A template",
"template.qa_template_des": "A knowledge base suitable for QA question and answer structure, which allows AI to answer strictly according to preset content",
"template.recommended": "Promoted",
"template.simple_robot": "Simple robot",
"template.standard_strict": "Standard strict template",
"template.standard_strict_des": "Based on the standard template, stricter requirements are imposed on the model's answers.",
"template.standard_template": "Standard template",
"template.standard_template_des": "Standard prompt words for knowledge bases with unfixed structures.",
"templateMarket.Search_template": "Search Template",
"templateMarket.Use": "Use",
"templateMarket.Use": "Build now",
"templateMarket.no_intro": "No introduction yet~",
"templateMarket.templateTags.Recommendation": "Recommendation",
"templateMarket.template_guide": "Guide",

View File

@ -5,6 +5,7 @@
"app_name": "应用名",
"auto_index": "索引增强",
"billing_module": "扣费模块",
"compress_llm_messages": "AI 历史记录压缩",
"confirm_export": "共筛选出 {{total}} 条数据,是否确认导出?",
"count": "运行次数",
"current_filter_conditions": "当前筛选条件:",

View File

@ -333,16 +333,17 @@
"template.hard_strict_des": "在问答模板基础上,对模型的回答做更严格的要求。",
"template.qa_template": "问答模板",
"template.qa_template_des": "适合 QA 问答结构的知识库可以让AI较为严格的按预设内容回答",
"template.recommended": "精选",
"template.simple_robot": "简易机器人",
"template.standard_strict": "标准严格模板",
"template.standard_strict_des": "在标准模板基础上,对模型的回答做更严格的要求。",
"template.standard_template": "标准模板",
"template.standard_template_des": "标准提示词,用于结构不固定的知识库。",
"templateMarket.Search_template": "搜索模板",
"templateMarket.Use": "使用",
"templateMarket.Use": "立即搭建",
"templateMarket.no_intro": "还没有介绍~",
"templateMarket.templateTags.Recommendation": "推荐",
"templateMarket.template_guide": "模板说明",
"templateMarket.template_guide": "说明",
"template_market": "模板市场",
"template_market_description": "在模板市场探索更多玩法,配置教程与使用引导,带你理解并上手各种应用",
"template_market_empty_data": "找不到合适的模板",

View File

@ -4,6 +4,7 @@
"app_name": "應用程式名",
"auto_index": "索引增強",
"billing_module": "扣費模組",
"compress_llm_messages": "AI 歷史記錄壓縮",
"confirm_export": "共篩選出 {{total}} 條資料,是否確認匯出?",
"count": "運行次數",
"current_filter_conditions": "目前篩選條件:",

View File

@ -318,16 +318,17 @@
"template.hard_strict_des": "在問答範本基礎上,對模型的回答做出更嚴格的要求。",
"template.qa_template": "問答範本",
"template.qa_template_des": "適合問答結構的知識庫,可以讓 AI 較為嚴格地按照預設內容回答",
"template.recommended": "精選",
"template.simple_robot": "簡易機器人",
"template.standard_strict": "標準嚴格範本",
"template.standard_strict_des": "在標準範本基礎上,對模型的回答做出更嚴格的要求。",
"template.standard_template": "標準範本",
"template.standard_template_des": "標準提示詞,用於結構不固定的知識庫。",
"templateMarket.Search_template": "搜尋範本",
"templateMarket.Use": "使用",
"templateMarket.Use": "立即搭建",
"templateMarket.no_intro": "還沒有介紹~",
"templateMarket.templateTags.Recommendation": "推薦",
"templateMarket.template_guide": "範本說明",
"templateMarket.template_guide": "說明",
"template_market": "範本市集",
"template_market_description": "在範本市集探索更多玩法,設定教學與使用指引,帶您理解並上手各種應用程式",
"template_market_empty_data": "找不到合適的範本",

View File

@ -0,0 +1,57 @@
<svg width="366" height="190" viewBox="0 0 366 190" fill="none" xmlns="http://www.w3.org/2000/svg">
<g clip-path="url(#clip0_29654_18158)">
<path d="M0 8C0 3.58172 3.58172 0 8 0H357.667C362.085 0 365.667 3.58172 365.667 8V182C365.667 186.418 362.085 190 357.667 190H7.99999C3.58171 190 0 186.418 0 182V8Z" fill="white"/>
<path d="M0 8C0 3.58172 3.58172 0 8 0H357.667C362.085 0 365.667 3.58172 365.667 8V182C365.667 186.418 362.085 190 357.667 190H7.99999C3.58171 190 0 186.418 0 182V8Z" fill="url(#paint0_linear_29654_18158)" fill-opacity="0.16"/>
<path d="M0 8C0 3.58172 3.58172 0 8 0H357.667C362.085 0 365.667 3.58172 365.667 8V182C365.667 186.418 362.085 190 357.667 190H7.99999C3.58171 190 0 186.418 0 182V8Z" fill="url(#paint1_linear_29654_18158)" fill-opacity="0.07"/>
<path d="M0 8C0 3.58172 3.58172 0 8 0H357.667C362.085 0 365.667 3.58172 365.667 8V182C365.667 186.418 362.085 190 357.667 190H7.99999C3.58171 190 0 186.418 0 182V8Z" fill="url(#paint2_radial_29654_18158)" fill-opacity="0.1"/>
<path d="M0 8C0 3.58172 3.58172 0 8 0H357.667C362.085 0 365.667 3.58172 365.667 8V182C365.667 186.418 362.085 190 357.667 190H7.99999C3.58171 190 0 186.418 0 182V8Z" fill="url(#paint3_radial_29654_18158)"/>
<path d="M271.62 84.9425C272.043 84.3774 272.94 84.7573 272.829 85.4544C272.353 88.4299 273.565 91.4239 275.978 93.2296C276.543 93.6527 276.163 94.5499 275.466 94.4384C272.49 93.9627 269.496 95.1753 267.691 97.5875C267.267 98.1526 266.37 97.7727 266.482 97.0756C266.957 94.1002 265.745 91.1062 263.333 89.3004C262.768 88.8774 263.147 87.9802 263.845 88.0917C266.82 88.5674 269.814 87.3548 271.62 84.9425Z" fill="url(#paint4_linear_29654_18158)"/>
<path d="M337.858 44.5751C337.895 44.4268 338.105 44.4268 338.142 44.5751C338.298 45.2078 338.792 45.7018 339.425 45.858C339.573 45.8946 339.573 46.1054 339.425 46.142C338.792 46.2982 338.298 46.7922 338.142 47.4249C338.105 47.5732 337.895 47.5732 337.858 47.4249C337.702 46.7922 337.208 46.2982 336.575 46.142C336.427 46.1054 336.427 45.8946 336.575 45.858C337.208 45.7018 337.702 45.2078 337.858 44.5751Z" fill="url(#paint5_linear_29654_18158)"/>
<path d="M273.262 34.6282C273.245 34.1841 273.835 34.0146 274.056 34.3999C275.002 36.0444 276.781 37.0292 278.677 36.9574C279.121 36.9406 279.291 37.5301 278.905 37.7517C277.261 38.6975 276.276 40.4769 276.348 42.3726C276.365 42.8167 275.775 42.9862 275.554 42.6009C274.608 40.9564 272.829 39.9716 270.933 40.0434C270.489 40.0603 270.319 39.4707 270.704 39.2492C272.349 38.3033 273.334 36.524 273.262 34.6282Z" fill="url(#paint6_linear_29654_18158)" fill-opacity="0.69"/>
<path d="M321.716 19.1501C321.789 18.8536 322.211 18.8536 322.284 19.1501C322.596 20.4156 323.584 21.4037 324.85 21.7161C325.146 21.7893 325.146 22.2107 324.85 22.2839C323.584 22.5963 322.596 23.5844 322.284 24.8499C322.211 25.1464 321.789 25.1464 321.716 24.8499C321.404 23.5844 320.416 22.5963 319.15 22.2839C318.854 22.2107 318.854 21.7893 319.15 21.7161C320.416 21.4037 321.404 20.4156 321.716 19.1501Z" fill="url(#paint7_linear_29654_18158)" fill-opacity="0.69"/>
<path d="M310.645 61.4376C310.737 61.067 311.263 61.067 311.355 61.4376C311.745 63.0195 312.981 64.2546 314.562 64.6451C314.933 64.7366 314.933 65.2634 314.562 65.3549C312.981 65.7454 311.745 66.9805 311.355 68.5624C311.263 68.933 310.737 68.933 310.645 68.5624C310.255 66.9805 309.019 65.7454 307.438 65.3549C307.067 65.2634 307.067 64.7366 307.438 64.6451C309.019 64.2546 310.255 63.0195 310.645 61.4376Z" fill="url(#paint8_linear_29654_18158)" fill-opacity="0.71"/>
<circle cx="278.5" cy="68.5" r="1.5" fill="white"/>
<circle cx="250.5" cy="52.5" r="0.5" fill="white"/>
</g>
<defs>
<linearGradient id="paint0_linear_29654_18158" x1="232.697" y1="103.407" x2="331.64" y2="-64.2216" gradientUnits="userSpaceOnUse">
<stop stop-color="#2643FF" stop-opacity="0"/>
<stop offset="1" stop-color="#2643FF"/>
</linearGradient>
<linearGradient id="paint1_linear_29654_18158" x1="256.071" y1="66.4159" x2="297.946" y2="-40.2204" gradientUnits="userSpaceOnUse">
<stop stop-color="#26FFF4" stop-opacity="0"/>
<stop offset="1" stop-color="#2643FF"/>
</linearGradient>
<radialGradient id="paint2_radial_29654_18158" cx="0" cy="0" r="1" gradientTransform="matrix(-160.499 125.686 -44.4503 -37.1762 356.837 -5.4646)" gradientUnits="userSpaceOnUse">
<stop stop-color="#E39DFF"/>
<stop offset="1" stop-color="white" stop-opacity="0"/>
</radialGradient>
<radialGradient id="paint3_radial_29654_18158" cx="0" cy="0" r="1" gradientTransform="matrix(204.129 -86.5929 22.7656 35.1486 182.833 68.0973)" gradientUnits="userSpaceOnUse">
<stop stop-color="white"/>
<stop offset="1" stop-color="white" stop-opacity="0"/>
</radialGradient>
<linearGradient id="paint4_linear_29654_18158" x1="273.261" y1="82.7503" x2="266.049" y2="99.7798" gradientUnits="userSpaceOnUse">
<stop stop-color="white"/>
<stop offset="1" stop-color="white"/>
</linearGradient>
<linearGradient id="paint5_linear_29654_18158" x1="338" y1="44" x2="338" y2="48" gradientUnits="userSpaceOnUse">
<stop stop-color="white"/>
<stop offset="1" stop-color="white"/>
</linearGradient>
<linearGradient id="paint6_linear_29654_18158" x1="273.197" y1="32.9054" x2="276.413" y2="44.0955" gradientUnits="userSpaceOnUse">
<stop stop-color="white"/>
<stop offset="1" stop-color="white"/>
</linearGradient>
<linearGradient id="paint7_linear_29654_18158" x1="322" y1="18" x2="322" y2="26" gradientUnits="userSpaceOnUse">
<stop stop-color="white"/>
<stop offset="1" stop-color="#EFF2FF"/>
</linearGradient>
<linearGradient id="paint8_linear_29654_18158" x1="311" y1="60" x2="311" y2="70" gradientUnits="userSpaceOnUse">
<stop stop-color="white"/>
<stop offset="1" stop-color="white"/>
</linearGradient>
<clipPath id="clip0_29654_18158">
<path d="M0 8C0 3.58172 3.58172 0 8 0H357.667C362.085 0 365.667 3.58172 365.667 8V182C365.667 186.418 362.085 190 357.667 190H7.99999C3.58171 190 0 186.418 0 182V8Z" fill="white"/>
</clipPath>
</defs>
</svg>

After

Width:  |  Height:  |  Size: 5.8 KiB

View File

@ -0,0 +1,45 @@
<svg width="308" height="98" viewBox="0 0 308 98" fill="none" xmlns="http://www.w3.org/2000/svg">
<g clip-path="url(#clip0_29676_18182)">
<path d="M0 8C0 3.58172 3.58172 0 8 0H299.667C304.085 0 307.667 3.58172 307.667 8V90C307.667 94.4183 304.085 98 299.667 98H8C3.58172 98 0 94.4183 0 90V8Z" fill="white"/>
<path d="M0 8C0 3.58172 3.58172 0 8 0H299.667C304.085 0 307.667 3.58172 307.667 8V90C307.667 94.4183 304.085 98 299.667 98H8C3.58172 98 0 94.4183 0 90V8Z" fill="url(#paint0_linear_29676_18182)" fill-opacity="0.16"/>
<path d="M0 8C0 3.58172 3.58172 0 8 0H299.667C304.085 0 307.667 3.58172 307.667 8V90C307.667 94.4183 304.085 98 299.667 98H8C3.58172 98 0 94.4183 0 90V8Z" fill="url(#paint1_linear_29676_18182)" fill-opacity="0.07"/>
<path d="M0 8C0 3.58172 3.58172 0 8 0H299.667C304.085 0 307.667 3.58172 307.667 8V90C307.667 94.4183 304.085 98 299.667 98H8C3.58172 98 0 94.4183 0 90V8Z" fill="url(#paint2_radial_29676_18182)" fill-opacity="0.1"/>
<path d="M0 8C0 3.58172 3.58172 0 8 0H299.667C304.085 0 307.667 3.58172 307.667 8V90C307.667 94.4183 304.085 98 299.667 98H8C3.58172 98 0 94.4183 0 90V8Z" fill="url(#paint3_radial_29676_18182)"/>
<path d="M259.341 12.4335C259.592 12.0989 260.123 12.3239 260.057 12.7367C259.775 14.4987 260.493 16.2718 261.922 17.3411C262.256 17.5917 262.031 18.123 261.619 18.057C259.857 17.7752 258.084 18.4933 257.014 19.9218C256.764 20.2565 256.232 20.0315 256.298 19.6187C256.58 17.8567 255.862 16.0836 254.433 15.0143C254.099 14.7637 254.324 14.2324 254.737 14.2984C256.499 14.5802 258.272 13.8621 259.341 12.4335Z" fill="url(#paint4_linear_29676_18182)"/>
<path d="M246.816 38.1572C246.698 37.649 247.34 37.3226 247.681 37.7174C249.137 39.4027 251.407 40.1422 253.576 39.6378C254.085 39.5196 254.411 40.1615 254.016 40.5026C252.331 41.9586 251.592 44.2288 252.096 46.3982C252.214 46.9064 251.572 47.2329 251.231 46.838C249.775 45.1527 247.505 44.4132 245.336 44.9177C244.827 45.0358 244.501 44.3939 244.896 44.0528C246.581 42.5968 247.321 40.3266 246.816 38.1572Z" fill="url(#paint5_linear_29676_18182)"/>
<path d="M289.294 18.7755C289.253 18.5976 289.477 18.4832 289.597 18.6215C290.107 19.2117 290.902 19.4707 291.661 19.294C291.839 19.2526 291.954 19.4774 291.816 19.5969C291.225 20.1068 290.966 20.9018 291.143 21.6615C291.184 21.8395 290.96 21.9538 290.84 21.8156C290.33 21.2253 289.535 20.9664 288.776 21.143C288.598 21.1844 288.483 20.9596 288.621 20.8402C289.212 20.3303 289.471 19.5352 289.294 18.7755Z" fill="url(#paint6_linear_29676_18182)"/>
</g>
<defs>
<linearGradient id="paint0_linear_29676_18182" x1="195.788" y1="53.3363" x2="233.088" y2="-49.7506" gradientUnits="userSpaceOnUse">
<stop stop-color="#2643FF" stop-opacity="0"/>
<stop offset="1" stop-color="#2643FF"/>
</linearGradient>
<linearGradient id="paint1_linear_29676_18182" x1="215.454" y1="34.2566" x2="229.899" y2="-25.7495" gradientUnits="userSpaceOnUse">
<stop stop-color="#26FFF4" stop-opacity="0"/>
<stop offset="1" stop-color="#2643FF"/>
</linearGradient>
<radialGradient id="paint2_radial_29676_18182" cx="0" cy="0" r="1" gradientTransform="matrix(-135.041 64.8274 -37.3999 -19.1751 300.237 -2.81858)" gradientUnits="userSpaceOnUse">
<stop stop-color="#E39DFF"/>
<stop offset="1" stop-color="white" stop-opacity="0"/>
</radialGradient>
<radialGradient id="paint3_radial_29676_18182" cx="0" cy="0" r="1" gradientTransform="matrix(171.751 -44.6637 19.1547 18.1293 153.833 35.1239)" gradientUnits="userSpaceOnUse">
<stop stop-color="white"/>
<stop offset="1" stop-color="white" stop-opacity="0"/>
</radialGradient>
<linearGradient id="paint4_linear_29676_18182" x1="260.313" y1="11.1353" x2="256.042" y2="21.2201" gradientUnits="userSpaceOnUse">
<stop stop-color="white"/>
<stop offset="1" stop-color="white"/>
</linearGradient>
<linearGradient id="paint5_linear_29676_18182" x1="246.358" y1="36.1857" x2="252.554" y2="48.3697" gradientUnits="userSpaceOnUse">
<stop stop-color="white"/>
<stop offset="1" stop-color="white"/>
</linearGradient>
<linearGradient id="paint6_linear_29676_18182" x1="289.133" y1="18.0851" x2="291.304" y2="22.352" gradientUnits="userSpaceOnUse">
<stop stop-color="white"/>
<stop offset="1" stop-color="white"/>
</linearGradient>
<clipPath id="clip0_29676_18182">
<path d="M0 8C0 3.58172 3.58172 0 8 0H299.667C304.085 0 307.667 3.58172 307.667 8V90C307.667 94.4183 304.085 98 299.667 98H8C3.58172 98 0 94.4183 0 90V8Z" fill="white"/>
</clipPath>
</defs>
</svg>

After

Width:  |  Height:  |  Size: 4.3 KiB

View File

@ -123,7 +123,7 @@ const Navbar = ({ unread }: { unread: number }) => {
w={'100%'}
userSelect={'none'}
pb={2}
bg={isSecondNavbarPage ? 'myGray.50' : 'transparent'}
bg={isSecondNavbarPage ? 'white' : 'transparent'}
>
{/* logo */}
<Box flex={'0 0 auto'} mb={3}>

View File

@ -232,14 +232,13 @@ const DashboardContainer = ({
position={'fixed'}
left={isPc ? navbarWidth : 0}
top={0}
bg={'myGray.25'}
bg={'white'}
w={`220px`}
h={'full'}
borderLeft={'1px solid'}
borderRight={'1px solid'}
borderColor={'myGray.200'}
pt={4}
px={2.5}
pb={2.5}
zIndex={100}
userSelect={'none'}
@ -247,7 +246,12 @@ const DashboardContainer = ({
flexDirection={'column'}
justifyContent={'space-between'}
>
<Box>
<Box
flex={1}
overflowY={'auto'}
px={2.5}
sx={{ '&::-webkit-scrollbar': { width: '4px' } }}
>
{groupList.map((group) => {
const selected = currentTab === group.groupId;
@ -339,7 +343,7 @@ const DashboardContainer = ({
</MyBox>
)}
<Box h={'100%'} pl={isPc ? `220px` : 0} position={'relative'} bg={'myGray.25'}>
<Box h={'100%'} pl={isPc ? `220px` : 0} position={'relative'} bg={'white'}>
{children({
templateTags,
templateList,

View File

@ -11,7 +11,7 @@ import {
SkeletonCircle,
useBreakpointValue
} from '@chakra-ui/react';
import type { AppTypeEnum } from '@fastgpt/global/core/app/constants';
import { AppTypeEnum } from '@fastgpt/global/core/app/constants';
import Avatar from '@fastgpt/web/components/common/Avatar';
import { useRequest2 } from '@fastgpt/web/hooks/useRequest';
import MyIcon from '@fastgpt/web/components/common/Icon';
@ -22,13 +22,16 @@ import MyBox from '@fastgpt/web/components/common/MyBox';
import { useLocalStorageState } from 'ahooks';
import { useState } from 'react';
import { getWebReqUrl } from '@fastgpt/web/common/system/utils';
import { form2AppWorkflow } from '@/web/core/app/utils';
import { webPushTrack } from '@/web/common/middle/tracks/utils';
import { appTypeTagMap } from '../constant';
const TemplateCreatePanel = ({ type }: { type: AppTypeEnum | 'all' }) => {
const { t } = useTranslation();
const router = useRouter();
const randomNumber =
useBreakpointValue({ base: 3, sm: 3, md: 4, lg: 4, xl: 5 }, { ssr: false }) || 4;
useBreakpointValue({ base: 2, sm: 2, md: 3, lg: 3, xl: 4 }, { ssr: false }) || 3;
const [isHoverMoreButton, setIsHoverMoreButton] = useState(false);
const [isCollapsed, setIsCollapsed] = useLocalStorageState<boolean>(
@ -43,12 +46,20 @@ const TemplateCreatePanel = ({ type }: { type: AppTypeEnum | 'all' }) => {
data: templateData,
loading: isFetchingTemplates
} = useRequest2(
(excludeIds?: string[]) =>
getTemplateMarketItemList({
(ids?: string[]) => {
const excludeIds = (() => {
try {
return JSON.stringify(ids);
} catch (error) {
return '';
}
})();
return getTemplateMarketItemList({
type,
randomNumber,
excludeIds
}),
});
},
{
manual: false,
refreshDeps: [type, randomNumber]
@ -62,6 +73,11 @@ const TemplateCreatePanel = ({ type }: { type: AppTypeEnum | 'all' }) => {
setCreatingTemplateId(templateId);
const templateDetail = await getTemplateMarketItemDetail(templateId);
if (templateDetail.type === AppTypeEnum.simple) {
const completeWorkflow = form2AppWorkflow(templateDetail.workflow, t);
templateDetail.workflow = completeWorkflow;
}
return postCreateApp({
avatar: templateDetail.avatar,
name: templateDetail.name,
@ -70,6 +86,13 @@ const TemplateCreatePanel = ({ type }: { type: AppTypeEnum | 'all' }) => {
edges: templateDetail.workflow.edges || [],
chatConfig: templateDetail.workflow.chatConfig || {},
templateId: templateDetail.templateId
}).then((res) => {
webPushTrack.useAppTemplate({
id: res,
name: templateDetail.name
});
return res;
});
},
{
@ -141,66 +164,167 @@ const TemplateCreatePanel = ({ type }: { type: AppTypeEnum | 'all' }) => {
in={!isCollapsed}
animateOpacity
transition={{ enter: { duration: 0.2 }, exit: { duration: 0.2 } }}
style={{ overflow: 'visible' }}
>
<Box
display={'grid'}
gridTemplateColumns={[
'repeat(2, 1fr) 160px',
'repeat(2, 1fr) 160px',
'repeat(3, 1fr) 160px',
'repeat(3, 1fr) 160px',
'repeat(4, 1fr) 160px',
'repeat(4, 1fr) 160px',
'repeat(5, 1fr) 160px'
'repeat(4, 1fr) 160px'
]}
gap={4}
gap={5}
>
{isFetchingTemplates && !templateData?.list?.length
? Array.from({ length: randomNumber }).map((_, index) => (
<Box
key={`skeleton-${index}`}
bg={'white'}
p={'19px'}
p={6}
borderRadius={'10px'}
border={'1px solid'}
borderColor={'myGray.200'}
>
<Flex alignItems={'center'} gap={2} mb={2}>
<SkeletonCircle size={'24px'} />
<Skeleton height={'16px'} flex={1} />
<SkeletonCircle size={'40px'} />
<Flex flexDirection={'column'} gap={2} flex={1}>
<Skeleton height={4} />
<Skeleton height={4} />
</Flex>
</Flex>
<Skeleton height={'12px'} />
</Box>
))
: templateData?.list.map((item, index) => (
<MyBox
key={index}
bg={'white'}
p={4}
borderRadius={'10px'}
border={'1px solid'}
borderColor={'myGray.200'}
cursor={'pointer'}
_hover={{
borderColor: 'primary.500',
boxShadow: 'md'
}}
isLoading={creatingTemplateId === item.templateId}
onClick={() => {
if (!creatingTemplateId) {
handleCreateFromTemplate(item.templateId);
}
}}
>
<Flex alignItems={'center'} gap={2} mb={2}>
<Avatar src={item.avatar} w={'24px'} h={'24px'} borderRadius={'4px'} />
<Box fontSize={'16px'} fontWeight={'medium'} color={'myGray.900'} noOfLines={1}>
{item.name}
</Box>
</Flex>
<Box fontSize={'12px'} color={'myGray.500'} noOfLines={1}>
{item.intro || ''}
</Box>
</MyBox>
))}
: templateData?.list.map((item, index) => {
return (
<MyBox
key={index}
bg={'white'}
p={6}
borderRadius={'10px'}
border={'1px solid'}
borderColor={'myGray.200'}
boxShadow={'none'}
cursor={'pointer'}
position={'relative'}
overflow={'hidden'}
bgImage={item.isPromoted ? "url('/imgs/app/templateCreateBg.svg')" : 'none'}
bgSize={'105% auto'}
bgPosition={'top'}
bgRepeat={'no-repeat'}
_hover={{
boxShadow:
'0 1px 2px 0 rgba(19, 51, 107, 0.10), 0 0 1px 0 rgba(19, 51, 107, 0.15)',
'& .template-content': {
filter: 'blur(5px)'
},
'& .hover-text': {
opacity: 1
}
}}
isLoading={creatingTemplateId === item.templateId}
onClick={() => {
if (!creatingTemplateId) {
handleCreateFromTemplate(item.templateId);
}
}}
display={'flex'}
gap={2}
alignItems={'center'}
>
<Flex
className="template-content"
gap={2}
alignItems={'center'}
transition={'filter 0.1s ease-in-out'}
w={'full'}
>
<Avatar src={item.avatar} w={10} h={10} borderRadius={'4px'} />
<Box flex={1} minW={0} h={12}>
<Flex
fontSize={'16px'}
fontWeight={'medium'}
color={'myGray.900'}
alignItems={'center'}
gap={1}
justifyContent={'space-between'}
>
<Flex alignItems={'center'} gap={'7px'} flex={1} minW={0}>
<Box className="textEllipsis2" whiteSpace={'nowrap'}>
{item.name}
</Box>
{item.isPromoted && (
<Box
p={'1px'}
bgGradient={'linear(201deg, #E6B3FF 13.74%, #006AFF 89.76%)'}
borderRadius={'full'}
flexShrink={0}
>
<Box
px={1.5}
fontSize={'10px'}
bg={'myGray.25'}
borderRadius={'full'}
color={'myGray.900'}
>
{t('app:template.recommended')}
</Box>
</Box>
)}
</Flex>
<MyIcon
name={
appTypeTagMap[item.type as keyof typeof appTypeTagMap]?.icon as any
}
w={4}
color={'myGray.900'}
flexShrink={0}
/>
</Flex>
<Box
fontSize={item.isPromoted ? '16px' : '14px'}
fontWeight={item.isPromoted ? 'medium' : 'normal'}
color={'myGray.500'}
noOfLines={1}
mt={0.5}
>
{(item.isPromoted ? item.recommendText || item.intro : item.intro) ||
t('app:templateMarket.no_intro')}
</Box>
</Box>
</Flex>
<Flex
className="hover-text"
position={'absolute'}
top={0}
left={0}
right={0}
bottom={0}
alignItems={'center'}
justifyContent={'center'}
opacity={0}
bg={' linear-gradient(180deg, rgba(255, 255, 255, 0.00) 0%, #FFF 100%)'}
transition={'opacity 0.1s ease-in-out'}
cursor={'pointer'}
>
<Flex
fontSize={'14px'}
fontWeight={'medium'}
color={'primary.700'}
rounded={'sm'}
px={5}
py={2.5}
_hover={{
bg: 'rgba(17, 24, 36, 0.05)'
}}
>
{t('app:templateMarket.Use')}
</Flex>
</Flex>
</MyBox>
);
})}
<Box
borderRadius={'10px'}
overflow={'hidden'}
@ -218,6 +342,8 @@ const TemplateCreatePanel = ({ type }: { type: AppTypeEnum | 'all' }) => {
p={0}
onMouseEnter={() => setIsHoverMoreButton(true)}
onMouseLeave={() => setIsHoverMoreButton(false)}
minH={20}
maxW={160}
>
<Box
as="img"

View File

@ -1,44 +1,14 @@
import React, { useRef } from 'react';
import { AppTypeEnum } from '@fastgpt/global/core/app/constants';
import type { AppTypeEnum } from '@fastgpt/global/core/app/constants';
import MyIcon from '@fastgpt/web/components/common/Icon';
import { Box, Flex } from '@chakra-ui/react';
import { useTranslation } from 'next-i18next';
import { appTypeTagMap } from '../constant';
const AppTypeTag = ({ type }: { type: AppTypeEnum }) => {
const { t } = useTranslation();
const map = useRef({
[AppTypeEnum.simple]: {
label: t('app:type.Chat_Agent'),
icon: 'core/app/type/simple'
},
[AppTypeEnum.workflow]: {
label: t('app:type.Workflow bot'),
icon: 'core/app/type/workflow'
},
[AppTypeEnum.workflowTool]: {
label: t('app:toolType_workflow'),
icon: 'core/app/type/plugin'
},
[AppTypeEnum.httpPlugin]: {
label: t('app:type.Http plugin'),
icon: 'core/app/type/httpPlugin'
},
[AppTypeEnum.httpToolSet]: {
label: t('app:toolType_http'),
icon: 'core/app/type/httpPlugin'
},
[AppTypeEnum.mcpToolSet]: {
label: t('app:toolType_mcp'),
icon: 'core/app/type/mcpTools'
},
[AppTypeEnum.tool]: undefined,
[AppTypeEnum.folder]: undefined,
[AppTypeEnum.hidden]: undefined,
[AppTypeEnum.agent]: undefined
});
const data = map.current[type as keyof typeof map.current];
const data = appTypeTagMap[type as keyof typeof appTypeTagMap];
return data ? (
<Flex
@ -53,7 +23,7 @@ const AppTypeTag = ({ type }: { type: AppTypeEnum }) => {
>
<MyIcon name={data.icon as any} w={'14px'} color={'myGray.500'} />
<Box ml={1} fontSize={'mini'}>
{data.label}
{t(data.label)}
</Box>
</Flex>
) : null;

View File

@ -0,0 +1,33 @@
import { AppTypeEnum } from '@fastgpt/global/core/app/constants';
import { i18nT } from '@fastgpt/web/i18n/utils';
export const appTypeTagMap = {
[AppTypeEnum.simple]: {
label: i18nT('app:type.Chat_Agent'),
icon: 'core/app/type/simple'
},
[AppTypeEnum.workflow]: {
label: i18nT('app:type.Workflow bot'),
icon: 'core/app/type/workflow'
},
[AppTypeEnum.workflowTool]: {
label: i18nT('app:toolType_workflow'),
icon: 'core/app/type/plugin'
},
[AppTypeEnum.httpPlugin]: {
label: i18nT('app:type.Http plugin'),
icon: 'core/app/type/httpPlugin'
},
[AppTypeEnum.httpToolSet]: {
label: i18nT('app:toolType_http'),
icon: 'core/app/type/httpPlugin'
},
[AppTypeEnum.mcpToolSet]: {
label: i18nT('app:toolType_mcp'),
icon: 'core/app/type/mcpTools'
},
[AppTypeEnum.tool]: undefined,
[AppTypeEnum.folder]: undefined,
[AppTypeEnum.hidden]: undefined,
[AppTypeEnum.agent]: undefined
};

View File

@ -1,4 +1,4 @@
import React, { type Dispatch } from 'react';
import React, { useEffect, type Dispatch } from 'react';
import { FormControl, Flex, Input, Button, Box } from '@chakra-ui/react';
import { useForm } from 'react-hook-form';
import { LoginPageTypeEnum } from '@/web/support/user/login/constants';
@ -10,6 +10,10 @@ import { useTranslation } from 'next-i18next';
import FormLayout from './FormLayout';
import { useRequest2 } from '@fastgpt/web/hooks/useRequest';
import PolicyTip from './PolicyTip';
import { useSearchParams } from 'next/navigation';
import { UserErrEnum } from '@fastgpt/global/common/error/code/user';
import { useRouter } from 'next/router';
import { useMount } from 'ahooks';
interface Props {
setPageType: Dispatch<`${LoginPageTypeEnum}`>;
@ -25,6 +29,9 @@ const LoginForm = ({ setPageType, loginSuccess }: Props) => {
const { t } = useTranslation();
const { toast } = useToast();
const { feConfigs } = useSystemStore();
const query = useSearchParams();
const router = useRouter();
const {
register,
handleSubmit,
@ -41,13 +48,28 @@ const LoginForm = ({ setPageType, loginSuccess }: Props) => {
code
})
);
toast({
title: t('login:login_success'),
status: 'success'
});
},
{
refreshDeps: [loginSuccess]
refreshDeps: [loginSuccess],
successToast: t('login:login_success'),
onError: (error: any) => {
// 密码错误,需要清空 query 参数
if (error.statusText === UserErrEnum.account_psw_error) {
router.replace(
router.pathname,
{
query: {
...router.query,
u: '',
p: ''
}
},
{
shallow: false
}
);
}
}
}
);
@ -71,6 +93,17 @@ const LoginForm = ({ setPageType, loginSuccess }: Props) => {
.join('/');
})();
useMount(() => {
const username = query.get('u');
const password = query.get('p');
if (username && password) {
onclickLogin({
username,
password
});
}
});
return (
<FormLayout setPageType={setPageType} pageType={LoginPageTypeEnum.passwordLogin}>
<Box

View File

@ -11,7 +11,6 @@ import {
import { LoginPageTypeEnum } from '@/web/support/user/login/constants';
import { useSystemStore } from '@/web/common/system/useSystemStore';
import type { LoginSuccessResponse } from '@/global/support/api/userRes.d';
import { useUserStore } from '@/web/support/user/useUserStore';
import { useChatStore } from '@/web/core/chat/context/useChatStore';
import dynamic from 'next/dynamic';
import Script from 'next/script';

View File

@ -10,7 +10,7 @@ export type ListParams = {
isQuickTemplate?: boolean;
randomNumber?: number;
type?: AppTypeEnum | 'all';
excludeIds?: string[];
excludeIds?: string;
};
export type ListResponse = {
@ -24,20 +24,30 @@ async function handler(
): Promise<ListResponse> {
await authCert({ req, authToken: true });
const { isQuickTemplate = false, randomNumber = 0, type = 'all', excludeIds = [] } = req.query;
const { isQuickTemplate = false, randomNumber = 0, type = 'all', excludeIds } = req.query;
const parsedExcludeIds: string[] = (() => {
if (!excludeIds) return [];
try {
return JSON.parse(excludeIds);
} catch (error) {
console.error('Failed to parse excludeIds:', error);
return [];
}
})();
const templateMarketItems = await getAppTemplatesAndLoadThem();
let filteredItems = templateMarketItems.filter((item) => {
if (!item.isActive) return false;
if (type === 'all' && !ToolTypeList.includes(item.type as AppTypeEnum)) return true;
if (type === 'all' && !(ToolTypeList.includes(item.type as AppTypeEnum) && randomNumber > 0))
return true;
if (item.type === type) return true;
return false;
});
const total = filteredItems.length;
if (excludeIds && excludeIds.length > 0) {
filteredItems = filteredItems.filter((item) => !excludeIds.includes(item.templateId));
if (parsedExcludeIds && parsedExcludeIds.length > 0) {
filteredItems = filteredItems.filter((item) => !parsedExcludeIds.includes(item.templateId));
}
if (isQuickTemplate) {
@ -63,6 +73,8 @@ async function handler(
templateId: item.templateId,
name: item.name,
intro: item.intro,
recommendText: item.recommendText,
isPromoted: item.isPromoted,
avatar: item.avatar,
tags: item.tags,
type: item.type,

View File

@ -146,7 +146,7 @@ const ToolKitProvider = ({ MenuIcon }: { MenuIcon: JSX.Element }) => {
}, [tools, searchText, selectedTagIds, installedFilter, tags, i18n.language]);
return (
<Box h={'full'} pr={6}>
<Box h={'full'}>
<MyBox
bg={'white'}
h={'full'}

View File

@ -12,19 +12,23 @@ import {
type AppTemplateSchemaType,
type TemplateTypeSchemaType
} from '@fastgpt/global/core/app/type';
import { appWorkflow2Form } from '@fastgpt/global/core/app/utils';
import { form2AppWorkflow } from '@/web/core/app/utils';
import MyBox from '@fastgpt/web/components/common/MyBox';
import { useRequest2 } from '@fastgpt/web/hooks/useRequest';
import { getTemplateMarketItemDetail } from '@/web/core/app/api/template';
import { postCreateApp } from '@/web/core/app/api';
import { webPushTrack } from '@/web/common/middle/tracks/utils';
import Avatar from '@fastgpt/web/components/common/Avatar';
import AppTypeTag from '@/pageComponents/dashboard/agent/TypeTag';
import dynamic from 'next/dynamic';
import SearchInput from '@fastgpt/web/components/common/Input/SearchInput';
import MySelect from '@fastgpt/web/components/common/MySelect';
import EmptyTip from '@fastgpt/web/components/common/EmptyTip';
import { useSystem } from '@fastgpt/web/hooks/useSystem';
import MyIcon from '@fastgpt/web/components/common/Icon';
import { appTypeTagMap } from '@/pageComponents/dashboard/constant';
import MyTooltip from '@fastgpt/web/components/common/MyTooltip';
const UseGuideModal = dynamic(() => import('@/components/common/Modal/UseGuideModal'), {
ssr: false
});
@ -40,7 +44,6 @@ const TemplateMarket = ({
}) => {
const router = useRouter();
const { t } = useTranslation();
const { feConfigs } = useSystemStore();
const { isPc } = useSystem();
const containerRef = useRef<HTMLDivElement>(null);
@ -51,7 +54,7 @@ const TemplateMarket = ({
} = router.query as { parentId?: ParentIdType; type?: string; appType?: AppTypeEnum | 'all' };
const [searchKey, setSearchKey] = useState('');
const filterTemplateTags = useMemo(() => {
const tagsWithTemplates = useMemo(() => {
return templateTags
.map((tag) => {
const templates = templateList.filter((template) => template.tags.includes(tag.typeId));
@ -67,6 +70,11 @@ const TemplateMarket = ({
async (template: AppTemplateSchemaType) => {
const templateDetail = await getTemplateMarketItemDetail(template.templateId);
if (template.type === AppTypeEnum.simple) {
const completeWorkflow = form2AppWorkflow(templateDetail.workflow, t);
templateDetail.workflow = completeWorkflow;
}
return postCreateApp({
parentId,
avatar: template.avatar,
@ -97,99 +105,132 @@ const TemplateMarket = ({
const TemplateCard = useCallback(
({ item }: { item: AppTemplateSchemaType }) => {
const { t } = useTranslation();
const icon = appTypeTagMap[item.type as keyof typeof appTypeTagMap]?.icon;
return (
<MyBox
key={item.templateId}
lineHeight={1.5}
h="100%"
pt={4}
pb={3}
px={4}
border={'base'}
boxShadow={'2'}
bg={'white'}
borderRadius={'10px'}
position={'relative'}
w={'100%'}
minWidth={0}
py={3}
px={6}
border={'1px solid'}
borderColor={'myGray.250'}
borderRadius={'lg'}
display={'flex'}
flexDirection={'column'}
gap={4}
position={'relative'}
overflow={'hidden'}
bgImage={item.isPromoted ? "url('/imgs/app/templateBg.svg')" : 'none'}
bgSize={'105% auto'}
bgPosition={'top'}
bgRepeat={'no-repeat'}
_hover={{
borderColor: 'primary.300',
boxShadow: '1.5',
'& .buttons': {
display: 'flex'
}
boxShadow: '0 1px 2px 0 rgba(19, 51, 107, 0.10), 0 0 1px 0 rgba(19, 51, 107, 0.15)'
}}
>
<HStack>
<Avatar src={item.avatar} borderRadius={'sm'} w={'1.5rem'} h={'1.5rem'} />
<Box flex={'1 0 0'} color={'myGray.900'} fontWeight={500}>
{item.name}
</Box>
<Box mr={'-1rem'}>
<AppTypeTag type={item.type as AppTypeEnum} />
</Box>
<Avatar src={item.avatar} borderRadius={'4px'} w={10} h={10} />
<Box flex={1} />
<Flex w={10} h={10} justifyContent={'center'} alignItems={'center'}>
<MyIcon name={icon as any} w={4} color={'myGray.900'} />
</Flex>
</HStack>
<Box
flex={['1 0 48px', '1 0 56px']}
mt={3}
pr={1}
textAlign={'justify'}
wordBreak={'break-all'}
fontSize={'xs'}
color={'myGray.500'}
>
<Box className={'textEllipsis2'}>{item.intro || t('app:templateMarket.no_intro')}</Box>
<Box w={'100%'} minWidth={0}>
<Flex
color={'myGray.900'}
fontWeight={'medium'}
fontSize={'18px'}
alignItems={'center'}
gap={'7px'}
>
{item.name}
{item.isPromoted && (
<Box
p={'1px'}
bgGradient={'linear(201deg, #E6B3FF 13.74%, #006AFF 89.76%)'}
borderRadius={'full'}
flexShrink={0}
>
<Box
px={1.5}
fontSize={'10px'}
bg={'myGray.25'}
borderRadius={'full'}
color={'myGray.900'}
>
{t('app:template.recommended')}
</Box>
</Box>
)}
</Flex>
<MyTooltip
label={item.isPromoted ? item.recommendText || item.intro : item.intro}
shouldWrapChildren={false}
placement={'top'}
hasArrow={false}
offset={[0, 3]}
>
<Box
w={'100%'}
minWidth={0}
color={'myGray.500'}
fontSize={item.isPromoted ? '16px' : '14px'}
fontWeight={item.isPromoted ? 'medium' : 'normal'}
overflow={'hidden'}
textOverflow={'ellipsis'}
whiteSpace={'nowrap'}
>
{(item.isPromoted ? item.recommendText || item.intro : item.intro) ||
t('app:templateMarket.no_intro')}
</Box>
</MyTooltip>
</Box>
<Box w={'full'} fontSize={'mini'}>
<Box color={'myGray.500'}>{`by ${item.author || feConfigs.systemTitle}`}</Box>
<Flex
className="buttons"
display={'none'}
justifyContent={'center'}
alignItems={'center'}
position={'absolute'}
borderRadius={'lg'}
w={'full'}
h={'full'}
left={0}
right={0}
bottom={1}
height={'40px'}
bg={'white'}
zIndex={1}
gap={2}
>
{((item.userGuide?.type === 'markdown' && item.userGuide?.content) ||
(item.userGuide?.type === 'link' && item.userGuide?.link)) && (
<UseGuideModal
title={item.name}
iconSrc={item.avatar}
text={item.userGuide?.content}
link={item.userGuide?.link}
>
{({ onClick }) => (
<Button variant={'whiteBase'} h={6} rounded={'sm'} onClick={onClick}>
{t('app:templateMarket.template_guide')}
</Button>
)}
</UseGuideModal>
)}
<Button
variant={'whiteBase'}
h={6}
rounded={'sm'}
onClick={() => onUseTemplate(item)}
<Flex justifyContent={'space-between'} alignItems={'center'}>
{(item.userGuide?.type === 'markdown' && item.userGuide?.content) ||
(item.userGuide?.type === 'link' && item.userGuide?.link) ? (
<UseGuideModal
title={item.name}
iconSrc={item.avatar}
text={item.userGuide?.content}
link={item.userGuide?.link}
>
{t('app:templateMarket.Use')}
</Button>
</Flex>
</Box>
{({ onClick }) => (
<Flex
cursor={'pointer'}
color={'myGray.500'}
gap={1}
fontSize={'14px'}
onClick={onClick}
_hover={{
color: 'primary.600'
}}
>
<MyIcon name="book" w={4} />
{t('app:templateMarket.template_guide')}
</Flex>
)}
</UseGuideModal>
) : (
<Box></Box>
)}
<Button
variant={'transparentBase'}
px={5}
py={2.5}
rounded={'sm'}
color={'primary.700'}
onClick={() => onUseTemplate(item)}
>
{t('app:templateMarket.Use')}
</Button>
</Flex>
</MyBox>
);
},
[onUseTemplate, feConfigs.systemTitle]
[onUseTemplate]
);
// Scroll to the selected template type
@ -204,20 +245,20 @@ const TemplateMarket = ({
return (
<MyBox ref={containerRef} h={'100%'} isLoading={isCreating}>
<Flex flexDirection={'column'} h={'100%'} py={5}>
<Flex mb={4} alignItems={'center'} px={5}>
<Flex flexDirection={'column'} h={'100%'} py={6}>
<Flex alignItems={'center'} px={6} mb={5}>
{isPc ? (
<Box fontSize={'lg'} color={'myGray.900'} fontWeight={500}>
<Box fontSize={'lg'} color={'myGray.900'} fontWeight={'medium'}>
{t('app:template_market')}
</Box>
) : (
MenuIcon
)}
<Box flex={1} />
<Box mr={3}>
<Box mr={2}>
<SearchInput
h={'34px'}
h={9}
w={240}
bg={'white'}
placeholder={t('app:templateMarket.Search_template')}
value={searchKey}
@ -225,7 +266,8 @@ const TemplateMarket = ({
/>
</Box>
<MySelect
h={'34px'}
h={9}
w={124}
bg={'white'}
value={appType}
list={[
@ -258,7 +300,7 @@ const TemplateMarket = ({
/>
</Flex>
<Box flex={'1 0 0'} px={5} overflow={'auto'}>
<Box flex={'1 0 0'} px={6} overflow={'auto'}>
{searchKey ? (
<>
<Box fontSize={'lg'} color={'myGray.900'} mb={4}>
@ -294,11 +336,17 @@ const TemplateMarket = ({
})()}
</>
) : (
<>
{filterTemplateTags.map((item) => {
<Flex flexDirection={'column'} gap={5}>
{tagsWithTemplates.map((item) => {
return (
<Box key={item.typeId}>
<Box id={item.typeId} color={'myGray.900'} mb={4} fontWeight={500} pt={2}>
<Box
id={item.typeId}
color={'myGray.900'}
mb={4}
fontWeight={'medium'}
fontSize={'14px'}
>
{t(item.typeName as any)}
</Box>
<Grid
@ -307,11 +355,10 @@ const TemplateMarket = ({
'repeat(2,1fr)',
'repeat(3,1fr)',
'repeat(3,1fr)',
'repeat(4,1fr)'
'repeat(4,1fr)',
'repeat(5,1fr)'
]}
gridGap={4}
alignItems={'stretch'}
pb={5}
>
{item.templates.map((item) => (
<TemplateCard key={item.templateId} item={item} />
@ -320,7 +367,7 @@ const TemplateMarket = ({
</Box>
);
})}
</>
</Flex>
)}
</Box>
</Flex>

View File

@ -1,5 +1,5 @@
import type { ListParams, ListResponse } from '@/pages/api/core/app/template/list';
import { GET } from '@/web/common/api/request';
import { GET, POST } from '@/web/common/api/request';
import { useSystemStore } from '@/web/common/system/useSystemStore';
import type { AppTemplateSchemaType, TemplateTypeSchemaType } from '@fastgpt/global/core/app/type';
import { defaultTemplateTypes } from '@fastgpt/web/core/workflow/constants';

View File

@ -1,7 +1,7 @@
import {
parsePromptToolCall,
promptToolCallMessageRewrite
} from '@fastgpt/service/core/ai/llm/promptToolCall';
} from '@fastgpt/service/core/ai/llm/promptCall/index';
import type { ChatCompletionMessageParam, ChatCompletionTool } from '@fastgpt/global/core/ai/type';
import { describe, expect, it } from 'vitest';

View File

@ -1187,3 +1187,167 @@ describe('checkNodeRunStatus - 边界情况测试', () => {
expect(checkNodeRunStatus({ nodesMap, node: nodeA, runtimeEdges: edges })).toBe('run');
});
});
describe('checkNodeRunStatus - 工具调用场景测试', () => {
it('工具调用1: Tool节点作为入口节点 (无workflowStart时)', () => {
// 场景:当工作流中没有 workflowStart/pluginInput 节点时tool 节点可以作为入口节点
// Tool → Process → End
const toolNode = createNode('tool1', FlowNodeTypeEnum.tool);
const processNode = createNode('process');
const endNode = createNode('end');
const nodesMap = new Map<string, RuntimeNodeItemType>([
['tool1', toolNode],
['process', processNode],
['end', endNode]
]);
// 场景1: Tool节点作为入口无输入边
const edges1: RuntimeEdgeItemType[] = [
createEdge('tool1', 'process', 'waiting'),
createEdge('process', 'end', 'waiting')
];
// Tool节点作为入口节点应该可以运行
expect(checkNodeRunStatus({ nodesMap, node: toolNode, runtimeEdges: edges1 })).toBe('run');
// 注意由于tool节点没有输入边是入口process节点也会没有可追溯到start的边
// 因此process节点在这个场景下也会返回'run'因为commonEdges和recursiveEdgeGroups都为空
expect(checkNodeRunStatus({ nodesMap, node: processNode, runtimeEdges: edges1 })).toBe('run');
// 场景2: Tool节点执行完成后,process可以运行但end仍需等待
const edges2: RuntimeEdgeItemType[] = [
createEdge('tool1', 'process', 'active'),
createEdge('process', 'end', 'waiting')
];
expect(checkNodeRunStatus({ nodesMap, node: processNode, runtimeEdges: edges2 })).toBe('run');
// end节点的输入边是waiting状态,需要等待process完成
expect(checkNodeRunStatus({ nodesMap, node: endNode, runtimeEdges: edges2 })).toBe('wait');
// 场景2.1: process完成后,end可以运行
const edges2_1: RuntimeEdgeItemType[] = [
createEdge('tool1', 'process', 'active'),
createEdge('process', 'end', 'active')
];
expect(checkNodeRunStatus({ nodesMap, node: endNode, runtimeEdges: edges2_1 })).toBe('run');
// 场景3: 有workflowStart时tool节点不再是入口节点
const startNode = createNode('start', FlowNodeTypeEnum.workflowStart);
const nodesMapWithStart = new Map<string, RuntimeNodeItemType>([
['start', startNode],
['tool1', toolNode],
['process', processNode],
['end', endNode]
]);
const edges3: RuntimeEdgeItemType[] = [
createEdge('start', 'tool1', 'active'),
createEdge('tool1', 'process', 'waiting'),
createEdge('process', 'end', 'waiting')
];
// 此时tool节点不再是入口节点需要start激活才能运行
expect(
checkNodeRunStatus({ nodesMap: nodesMapWithStart, node: toolNode, runtimeEdges: edges3 })
).toBe('run');
expect(
checkNodeRunStatus({ nodesMap: nodesMapWithStart, node: processNode, runtimeEdges: edges3 })
).toBe('wait');
// Tool执行完成后process可以运行
const edges4: RuntimeEdgeItemType[] = [
createEdge('start', 'tool1', 'active'),
createEdge('tool1', 'process', 'active'),
createEdge('process', 'end', 'waiting')
];
expect(
checkNodeRunStatus({ nodesMap: nodesMapWithStart, node: processNode, runtimeEdges: edges4 })
).toBe('run');
});
it('工具调用2: ToolSet节点与条件分支和循环组合 (Agent → ToolSet → Tool1/Tool2 → Result → Agent)', () => {
// 场景Agent调用工具集工具集根据条件选择不同工具执行并支持循环调用
// Start → Agent → ToolSet → (Tool1 | Tool2) → Result → Agent (循环)
const nodeStart = createNode('start', FlowNodeTypeEnum.workflowStart);
const agentNode = createNode('agent', FlowNodeTypeEnum.agent);
const toolSetNode = createNode('toolSet', FlowNodeTypeEnum.toolSet);
const tool1Node = createNode('tool1', FlowNodeTypeEnum.tool);
const tool2Node = createNode('tool2', FlowNodeTypeEnum.tool);
const resultNode = createNode('result');
const nodesMap = new Map<string, RuntimeNodeItemType>([
['start', nodeStart],
['agent', agentNode],
['toolSet', toolSetNode],
['tool1', tool1Node],
['tool2', tool2Node],
['result', resultNode]
]);
// 场景1: 第一次执行Agent选择Tool1
const edges1: RuntimeEdgeItemType[] = [
createEdge('start', 'agent', 'active'),
createEdge('agent', 'toolSet', 'active'),
createEdge('toolSet', 'tool1', 'active'), // 选择Tool1
createEdge('toolSet', 'tool2', 'skipped'), // Tool2未选择
createEdge('tool1', 'result', 'waiting'),
createEdge('tool2', 'result', 'skipped'),
createEdge('result', 'agent', 'waiting') // 循环边等待
];
expect(checkNodeRunStatus({ nodesMap, node: agentNode, runtimeEdges: edges1 })).toBe('wait');
expect(checkNodeRunStatus({ nodesMap, node: toolSetNode, runtimeEdges: edges1 })).toBe('run');
expect(checkNodeRunStatus({ nodesMap, node: tool1Node, runtimeEdges: edges1 })).toBe('run');
expect(checkNodeRunStatus({ nodesMap, node: tool2Node, runtimeEdges: edges1 })).toBe('skip');
expect(checkNodeRunStatus({ nodesMap, node: resultNode, runtimeEdges: edges1 })).toBe('wait');
// 场景2: Tool1执行完成Result处理结果
const edges2: RuntimeEdgeItemType[] = [
createEdge('start', 'agent', 'active'),
createEdge('agent', 'toolSet', 'active'),
createEdge('toolSet', 'tool1', 'active'),
createEdge('toolSet', 'tool2', 'skipped'),
createEdge('tool1', 'result', 'active'), // Tool1完成
createEdge('tool2', 'result', 'skipped'),
createEdge('result', 'agent', 'waiting')
];
expect(checkNodeRunStatus({ nodesMap, node: resultNode, runtimeEdges: edges2 })).toBe('run');
// 场景3: 循环回Agent第二次调用选择Tool2
const edges3: RuntimeEdgeItemType[] = [
createEdge('start', 'agent', 'active'),
createEdge('agent', 'toolSet', 'active'),
createEdge('toolSet', 'tool1', 'skipped'), // Tool1未选择
createEdge('toolSet', 'tool2', 'active'), // 选择Tool2
createEdge('tool1', 'result', 'skipped'),
createEdge('tool2', 'result', 'active'), // Tool2完成
createEdge('result', 'agent', 'active') // 循环边激活
];
// Agent有来自start和result的两条active边
expect(checkNodeRunStatus({ nodesMap, node: agentNode, runtimeEdges: edges3 })).toBe('run');
expect(checkNodeRunStatus({ nodesMap, node: tool1Node, runtimeEdges: edges3 })).toBe('skip');
expect(checkNodeRunStatus({ nodesMap, node: tool2Node, runtimeEdges: edges3 })).toBe('run');
expect(checkNodeRunStatus({ nodesMap, node: resultNode, runtimeEdges: edges3 })).toBe('run');
// 场景4: 循环退出,不再调用工具
const edges4: RuntimeEdgeItemType[] = [
createEdge('start', 'agent', 'active'),
createEdge('agent', 'toolSet', 'skipped'), // 不再调用工具集
createEdge('toolSet', 'tool1', 'skipped'),
createEdge('toolSet', 'tool2', 'skipped'),
createEdge('tool1', 'result', 'skipped'),
createEdge('tool2', 'result', 'skipped'),
createEdge('result', 'agent', 'skipped') // 循环退出
];
expect(checkNodeRunStatus({ nodesMap, node: agentNode, runtimeEdges: edges4 })).toBe('run');
expect(checkNodeRunStatus({ nodesMap, node: toolSetNode, runtimeEdges: edges4 })).toBe('skip');
expect(checkNodeRunStatus({ nodesMap, node: tool1Node, runtimeEdges: edges4 })).toBe('skip');
expect(checkNodeRunStatus({ nodesMap, node: tool2Node, runtimeEdges: edges4 })).toBe('skip');
expect(checkNodeRunStatus({ nodesMap, node: resultNode, runtimeEdges: edges4 })).toBe('skip');
});
});