mirror of
https://github.com/labring/FastGPT.git
synced 2025-12-26 04:32:50 +00:00
App run node update (#2542)
* feat(workflow): allow apps to be invoked like plugins (#2521) * feat(workflow): allow apps to be invoked like plugins * fix type * Encapsulate SSE response methods (#2530) * perf: sse response fn * perf: sse response * fix: ts * perf: not ssl copy * perf: myselect auto scroll * perf: run app code * fix: app plugin (#2538) --------- Co-authored-by: heheer <heheer@sealos.io>
This commit is contained in:
parent
67445b40bc
commit
450167c951
|
|
@ -18,4 +18,6 @@ weight: 813
|
|||
1.
|
||||
2. 新增 - 插件自定义输入支持单选框
|
||||
3. 新增 - 插件输出,支持指定某些字段为工具调用结果
|
||||
4. 新增 - 插件支持配置使用引导、全局变量和文件输入
|
||||
4. 新增 - 插件支持配置使用引导、全局变量和文件输入
|
||||
5. 优化 - SSE 响应代码。
|
||||
6. 优化 - 非 HTTPS 环境下支持复制(除非 textarea 复制也不支持)
|
||||
|
|
@ -128,6 +128,7 @@ export enum NodeInputKeyEnum {
|
|||
|
||||
// read files
|
||||
fileUrlList = 'fileUrlList',
|
||||
|
||||
// user select
|
||||
userSelectOptions = 'userSelectOptions'
|
||||
}
|
||||
|
|
|
|||
|
|
@ -106,6 +106,7 @@ export enum FlowNodeTypeEnum {
|
|||
contentExtract = 'contentExtract',
|
||||
httpRequest468 = 'httpRequest468',
|
||||
runApp = 'app',
|
||||
appModule = 'appModule',
|
||||
pluginModule = 'pluginModule',
|
||||
pluginInput = 'pluginInput',
|
||||
pluginOutput = 'pluginOutput',
|
||||
|
|
|
|||
|
|
@ -19,6 +19,7 @@ import { RuntimeNodeItemType } from '../runtime/type';
|
|||
import { RuntimeEdgeItemType } from './edge';
|
||||
import { ReadFileNodeResponse } from '../template/system/readFiles/type';
|
||||
import { UserSelectOptionType } from '../template/system/userSelect/type';
|
||||
import { WorkflowResponseType } from '../../../../service/core/workflow/dispatch/type';
|
||||
|
||||
/* workflow props */
|
||||
export type ChatDispatchProps = {
|
||||
|
|
@ -36,9 +37,9 @@ export type ChatDispatchProps = {
|
|||
query: UserChatItemValueItemType[]; // trigger query
|
||||
chatConfig: AppSchema['chatConfig'];
|
||||
stream: boolean;
|
||||
detail: boolean; // response detail
|
||||
maxRunTimes: number;
|
||||
isToolCall?: boolean;
|
||||
workflowStreamResponse?: WorkflowResponseType;
|
||||
};
|
||||
|
||||
export type ModuleDispatchProps<T> = ChatDispatchProps & {
|
||||
|
|
|
|||
|
|
@ -236,7 +236,7 @@ export const textAdaptGptResponse = ({
|
|||
finish_reason?: null | 'stop';
|
||||
extraData?: Object;
|
||||
}) => {
|
||||
return JSON.stringify({
|
||||
return {
|
||||
...extraData,
|
||||
id: '',
|
||||
object: '',
|
||||
|
|
@ -252,7 +252,7 @@ export const textAdaptGptResponse = ({
|
|||
finish_reason
|
||||
}
|
||||
]
|
||||
});
|
||||
};
|
||||
};
|
||||
|
||||
/* Update runtimeNode's outputs with interactive data from history */
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@ import { RunAppModule } from './system/runApp/index';
|
|||
import { PluginInputModule } from './system/pluginInput';
|
||||
import { PluginOutputModule } from './system/pluginOutput';
|
||||
import { RunPluginModule } from './system/runPlugin';
|
||||
import { RunAppPluginModule } from './system/runAppPlugin';
|
||||
import { AiQueryExtension } from './system/queryExtension';
|
||||
|
||||
import type { FlowNodeTemplateType } from '../type/node';
|
||||
|
|
@ -44,8 +45,8 @@ const systemNodes: FlowNodeTemplateType[] = [
|
|||
LafModule,
|
||||
IfElseNode,
|
||||
VariableUpdateNode,
|
||||
CodeNode,
|
||||
RunAppModule
|
||||
CodeNode
|
||||
// RunAppModule
|
||||
];
|
||||
/* app flow module templates */
|
||||
export const appSystemModuleTemplates: FlowNodeTemplateType[] = [
|
||||
|
|
@ -70,5 +71,6 @@ export const moduleTemplatesFlat: FlowNodeTemplateType[] = [
|
|||
)
|
||||
),
|
||||
EmptyNode,
|
||||
RunPluginModule
|
||||
RunPluginModule,
|
||||
RunAppPluginModule
|
||||
];
|
||||
|
|
|
|||
|
|
@ -73,3 +73,12 @@ export const Input_Template_Text_Quote: FlowNodeInputItemType = {
|
|||
description: i18nT('app:document_quote_tip'),
|
||||
valueType: WorkflowIOValueTypeEnum.string
|
||||
};
|
||||
export const Input_Template_File_Link: FlowNodeInputItemType = {
|
||||
key: NodeInputKeyEnum.fileUrlList,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.reference],
|
||||
required: true,
|
||||
label: i18nT('app:workflow.user_file_input'),
|
||||
debugLabel: i18nT('app:workflow.user_file_input'),
|
||||
description: i18nT('app:workflow.user_file_input_desc'),
|
||||
valueType: WorkflowIOValueTypeEnum.arrayString
|
||||
};
|
||||
|
|
|
|||
|
|
@ -0,0 +1,19 @@
|
|||
import { FlowNodeTemplateTypeEnum } from '../../constants';
|
||||
import { FlowNodeTypeEnum } from '../../node/constant';
|
||||
import { FlowNodeTemplateType } from '../../type/node';
|
||||
import { getHandleConfig } from '../utils';
|
||||
|
||||
export const RunAppPluginModule: FlowNodeTemplateType = {
|
||||
id: FlowNodeTypeEnum.appModule,
|
||||
templateType: FlowNodeTemplateTypeEnum.other,
|
||||
flowNodeType: FlowNodeTypeEnum.appModule,
|
||||
sourceHandle: getHandleConfig(true, true, true, true),
|
||||
targetHandle: getHandleConfig(true, true, true, true),
|
||||
intro: '',
|
||||
name: '',
|
||||
showStatus: false,
|
||||
isTool: false,
|
||||
version: '481',
|
||||
inputs: [], // [{key:'pluginId'},...]
|
||||
outputs: []
|
||||
};
|
||||
|
|
@ -22,5 +22,3 @@ type UserSelectInteractive = {
|
|||
};
|
||||
|
||||
export type InteractiveNodeResponseItemType = InteractiveBasicType & UserSelectInteractive;
|
||||
|
||||
export type UserInteractiveType = UserSelectInteractive;
|
||||
|
|
|
|||
|
|
@ -1,10 +1,16 @@
|
|||
import { FlowNodeInputTypeEnum, FlowNodeOutputTypeEnum, FlowNodeTypeEnum } from './node/constant';
|
||||
import {
|
||||
chatHistoryValueDesc,
|
||||
FlowNodeInputTypeEnum,
|
||||
FlowNodeOutputTypeEnum,
|
||||
FlowNodeTypeEnum
|
||||
} from './node/constant';
|
||||
import {
|
||||
WorkflowIOValueTypeEnum,
|
||||
NodeInputKeyEnum,
|
||||
VariableInputEnum,
|
||||
variableMap,
|
||||
VARIABLE_NODE_ID
|
||||
VARIABLE_NODE_ID,
|
||||
NodeOutputKeyEnum
|
||||
} from './constants';
|
||||
import { FlowNodeInputItemType, FlowNodeOutputItemType, ReferenceValueProps } from './type/io.d';
|
||||
import { StoreNodeItemType } from './type/node';
|
||||
|
|
@ -25,6 +31,7 @@ import {
|
|||
import { IfElseResultEnum } from './template/system/ifElse/constant';
|
||||
import { RuntimeNodeItemType } from './runtime/type';
|
||||
import { getReferenceVariableValue } from './runtime/utils';
|
||||
import { Input_Template_History, Input_Template_UserChatInput } from './template/input';
|
||||
|
||||
export const getHandleId = (nodeId: string, type: 'source' | 'target', key: string) => {
|
||||
return `${nodeId}-${type}-${key}`;
|
||||
|
|
@ -147,9 +154,11 @@ export const getModuleInputUiField = (input: FlowNodeInputItemType) => {
|
|||
return {};
|
||||
};
|
||||
|
||||
export const pluginData2FlowNodeIO = (
|
||||
nodes: StoreNodeItemType[]
|
||||
): {
|
||||
export const pluginData2FlowNodeIO = ({
|
||||
nodes
|
||||
}: {
|
||||
nodes: StoreNodeItemType[];
|
||||
}): {
|
||||
inputs: FlowNodeInputItemType[];
|
||||
outputs: FlowNodeOutputItemType[];
|
||||
} => {
|
||||
|
|
@ -180,6 +189,80 @@ export const pluginData2FlowNodeIO = (
|
|||
};
|
||||
};
|
||||
|
||||
export const appData2FlowNodeIO = ({
|
||||
chatConfig
|
||||
}: {
|
||||
chatConfig?: AppChatConfigType;
|
||||
}): {
|
||||
inputs: FlowNodeInputItemType[];
|
||||
outputs: FlowNodeOutputItemType[];
|
||||
} => {
|
||||
const variableInput = !chatConfig?.variables
|
||||
? []
|
||||
: chatConfig.variables.map((item) => {
|
||||
const renderTypeMap = {
|
||||
[VariableInputEnum.input]: [FlowNodeInputTypeEnum.input, FlowNodeInputTypeEnum.reference],
|
||||
[VariableInputEnum.textarea]: [
|
||||
FlowNodeInputTypeEnum.textarea,
|
||||
FlowNodeInputTypeEnum.reference
|
||||
],
|
||||
[VariableInputEnum.select]: [FlowNodeInputTypeEnum.select],
|
||||
[VariableInputEnum.custom]: [
|
||||
FlowNodeInputTypeEnum.input,
|
||||
FlowNodeInputTypeEnum.reference
|
||||
],
|
||||
default: [FlowNodeInputTypeEnum.reference]
|
||||
};
|
||||
|
||||
return {
|
||||
key: item.key,
|
||||
renderTypeList: renderTypeMap[item.type] || renderTypeMap.default,
|
||||
label: item.label,
|
||||
debugLabel: item.label,
|
||||
description: '',
|
||||
valueType: WorkflowIOValueTypeEnum.any,
|
||||
required: item.required,
|
||||
list: item.enums.map((enumItem) => ({
|
||||
label: enumItem.value,
|
||||
value: enumItem.value
|
||||
}))
|
||||
};
|
||||
});
|
||||
|
||||
// const showFileLink =
|
||||
// chatConfig?.fileSelectConfig?.canSelectFile || chatConfig?.fileSelectConfig?.canSelectImg;
|
||||
|
||||
return {
|
||||
inputs: [
|
||||
Input_Template_History,
|
||||
Input_Template_UserChatInput,
|
||||
// ...(showFileLink ? [Input_Template_File_Link] : []),
|
||||
...variableInput
|
||||
],
|
||||
outputs: [
|
||||
{
|
||||
id: NodeOutputKeyEnum.history,
|
||||
key: NodeOutputKeyEnum.history,
|
||||
required: true,
|
||||
label: 'core.module.output.label.New context',
|
||||
description: 'core.module.output.description.New context',
|
||||
valueType: WorkflowIOValueTypeEnum.chatHistory,
|
||||
valueDesc: chatHistoryValueDesc,
|
||||
type: FlowNodeOutputTypeEnum.static
|
||||
},
|
||||
{
|
||||
id: NodeOutputKeyEnum.answerText,
|
||||
key: NodeOutputKeyEnum.answerText,
|
||||
required: false,
|
||||
label: 'core.module.output.label.Ai response content',
|
||||
description: 'core.module.output.description.Ai response content',
|
||||
valueType: WorkflowIOValueTypeEnum.string,
|
||||
type: FlowNodeOutputTypeEnum.static
|
||||
}
|
||||
]
|
||||
};
|
||||
};
|
||||
|
||||
export const formatEditorVariablePickerIcon = (
|
||||
variables: { key: string; label: string; type?: `${VariableInputEnum}`; required?: boolean }[]
|
||||
): EditorVariablePickerType[] => {
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
import { FlowNodeTemplateType } from '@fastgpt/global/core/workflow/type/node.d';
|
||||
import { FlowNodeTypeEnum, defaultNodeVersion } from '@fastgpt/global/core/workflow/node/constant';
|
||||
import { pluginData2FlowNodeIO } from '@fastgpt/global/core/workflow/utils';
|
||||
import { appData2FlowNodeIO, pluginData2FlowNodeIO } from '@fastgpt/global/core/workflow/utils';
|
||||
import { PluginSourceEnum } from '@fastgpt/global/core/plugin/constants';
|
||||
import type { PluginRuntimeType } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { FlowNodeTemplateTypeEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
|
|
@ -52,10 +52,10 @@ const getPluginTemplateById = async (
|
|||
showStatus: true,
|
||||
workflow: {
|
||||
nodes: item.modules,
|
||||
edges: item.edges
|
||||
edges: item.edges,
|
||||
chatConfig: item.chatConfig
|
||||
},
|
||||
templateType: FlowNodeTemplateTypeEnum.teamApp,
|
||||
isTool: true,
|
||||
version: item?.pluginData?.nodeVersion || defaultNodeVersion,
|
||||
originCost: 0,
|
||||
currentCost: 0
|
||||
|
|
@ -71,22 +71,27 @@ const getPluginTemplateById = async (
|
|||
/* format plugin modules to plugin preview module */
|
||||
export async function getPluginPreviewNode({ id }: { id: string }): Promise<FlowNodeTemplateType> {
|
||||
const plugin = await getPluginTemplateById(id);
|
||||
const isPlugin = !!plugin.workflow.nodes.find(
|
||||
(node) => node.flowNodeType === FlowNodeTypeEnum.pluginInput
|
||||
);
|
||||
|
||||
return {
|
||||
id: getNanoid(),
|
||||
pluginId: plugin.id,
|
||||
templateType: plugin.templateType,
|
||||
flowNodeType: FlowNodeTypeEnum.pluginModule,
|
||||
flowNodeType: isPlugin ? FlowNodeTypeEnum.pluginModule : FlowNodeTypeEnum.appModule,
|
||||
avatar: plugin.avatar,
|
||||
name: plugin.name,
|
||||
intro: plugin.intro,
|
||||
inputExplanationUrl: plugin.inputExplanationUrl,
|
||||
showStatus: plugin.showStatus,
|
||||
isTool: plugin.isTool,
|
||||
isTool: isPlugin,
|
||||
version: plugin.version,
|
||||
sourceHandle: getHandleConfig(true, true, true, true),
|
||||
targetHandle: getHandleConfig(true, true, true, true),
|
||||
...pluginData2FlowNodeIO(plugin.workflow.nodes)
|
||||
...(isPlugin
|
||||
? pluginData2FlowNodeIO({ nodes: plugin.workflow.nodes })
|
||||
: appData2FlowNodeIO({ chatConfig: plugin.workflow.chatConfig }))
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,85 +1,88 @@
|
|||
// @ts-nocheck
|
||||
import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
|
||||
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { SelectAppItemType } from '@fastgpt/global/core/workflow/template/system/runApp/type';
|
||||
import { dispatchWorkFlowV1 } from '../index';
|
||||
import { MongoApp } from '../../../../core/app/schema';
|
||||
import { responseWrite } from '../../../../common/response';
|
||||
import { dispatchWorkFlow } from '../index';
|
||||
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
|
||||
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import { textAdaptGptResponse } from '@fastgpt/global/core/workflow/runtime/utils';
|
||||
import {
|
||||
getWorkflowEntryNodeIds,
|
||||
initWorkflowEdgeStatus,
|
||||
storeNodes2RuntimeNodes,
|
||||
textAdaptGptResponse
|
||||
} from '@fastgpt/global/core/workflow/runtime/utils';
|
||||
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import { getHistories, setEntryEntries } from '../utils';
|
||||
import { getHistories } from '../utils';
|
||||
import { chatValue2RuntimePrompt, runtimePrompt2ChatsValue } from '@fastgpt/global/core/chat/adapt';
|
||||
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { authAppByTmbId } from '../../../../support/permission/app/auth';
|
||||
import { ReadPermissionVal } from '@fastgpt/global/support/permission/constant';
|
||||
|
||||
type Props = ModuleDispatchProps<{
|
||||
[NodeInputKeyEnum.userChatInput]: string;
|
||||
[NodeInputKeyEnum.history]?: ChatItemType[] | number;
|
||||
app: SelectAppItemType;
|
||||
[NodeInputKeyEnum.fileUrlList]?: string[];
|
||||
}>;
|
||||
type Response = DispatchNodeResultType<{
|
||||
[NodeOutputKeyEnum.answerText]: string;
|
||||
[NodeOutputKeyEnum.history]: ChatItemType[];
|
||||
}>;
|
||||
|
||||
export const dispatchAppRequest = async (props: Props): Promise<Response> => {
|
||||
export const dispatchRunAppNode = async (props: Props): Promise<Response> => {
|
||||
const {
|
||||
res,
|
||||
teamId,
|
||||
stream,
|
||||
detail,
|
||||
app: workflowApp,
|
||||
histories,
|
||||
inputFiles,
|
||||
params: { userChatInput, history, app }
|
||||
query,
|
||||
node: { pluginId },
|
||||
workflowStreamResponse,
|
||||
params
|
||||
} = props;
|
||||
let start = Date.now();
|
||||
|
||||
const { userChatInput, history, ...variables } = params;
|
||||
if (!userChatInput) {
|
||||
return Promise.reject('Input is empty');
|
||||
}
|
||||
if (!pluginId) {
|
||||
return Promise.reject('pluginId is empty');
|
||||
}
|
||||
|
||||
const appData = await MongoApp.findOne({
|
||||
_id: app.id,
|
||||
teamId
|
||||
// Auth the app by tmbId(Not the user, but the workflow user)
|
||||
const { app: appData } = await authAppByTmbId({
|
||||
appId: pluginId,
|
||||
tmbId: workflowApp.tmbId,
|
||||
per: ReadPermissionVal
|
||||
});
|
||||
|
||||
if (!appData) {
|
||||
return Promise.reject('App not found');
|
||||
}
|
||||
|
||||
if (stream) {
|
||||
responseWrite({
|
||||
res,
|
||||
event: detail ? SseResponseEventEnum.answer : undefined,
|
||||
data: textAdaptGptResponse({
|
||||
text: '\n'
|
||||
})
|
||||
});
|
||||
}
|
||||
// Auto line
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.answer,
|
||||
data: textAdaptGptResponse({
|
||||
text: '\n'
|
||||
})
|
||||
});
|
||||
|
||||
const chatHistories = getHistories(history, histories);
|
||||
const { files } = chatValue2RuntimePrompt(query);
|
||||
|
||||
const { flowResponses, flowUsages, assistantResponses } = await dispatchWorkFlowV1({
|
||||
const { flowResponses, flowUsages, assistantResponses } = await dispatchWorkFlow({
|
||||
...props,
|
||||
appId: app.id,
|
||||
modules: setEntryEntries(appData.modules),
|
||||
runtimeModules: undefined, // must reset
|
||||
app: appData,
|
||||
runtimeNodes: storeNodes2RuntimeNodes(
|
||||
appData.modules,
|
||||
getWorkflowEntryNodeIds(appData.modules)
|
||||
),
|
||||
runtimeEdges: initWorkflowEdgeStatus(appData.edges),
|
||||
histories: chatHistories,
|
||||
inputFiles,
|
||||
startParams: {
|
||||
userChatInput
|
||||
}
|
||||
query: runtimePrompt2ChatsValue({
|
||||
files,
|
||||
text: userChatInput
|
||||
}),
|
||||
variables: variables
|
||||
});
|
||||
|
||||
const completeMessages = chatHistories.concat([
|
||||
{
|
||||
obj: ChatRoleEnum.Human,
|
||||
value: runtimePrompt2ChatsValue({
|
||||
files: inputFiles,
|
||||
text: userChatInput
|
||||
})
|
||||
value: query
|
||||
},
|
||||
{
|
||||
obj: ChatRoleEnum.AI,
|
||||
|
|
@ -11,18 +11,14 @@ import {
|
|||
ChatCompletionAssistantMessageParam
|
||||
} from '@fastgpt/global/core/ai/type.d';
|
||||
import { NextApiResponse } from 'next';
|
||||
import {
|
||||
responseWrite,
|
||||
responseWriteController,
|
||||
responseWriteNodeStatus
|
||||
} from '../../../../../common/response';
|
||||
import { responseWriteController } from '../../../../../common/response';
|
||||
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import { textAdaptGptResponse } from '@fastgpt/global/core/workflow/runtime/utils';
|
||||
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
|
||||
import { dispatchWorkFlow } from '../../index';
|
||||
import { DispatchToolModuleProps, RunToolResponse, ToolNodeItemType } from './type.d';
|
||||
import json5 from 'json5';
|
||||
import { DispatchFlowResponse } from '../../type';
|
||||
import { DispatchFlowResponse, WorkflowResponseType } from '../../type';
|
||||
import { countGptMessagesTokens } from '../../../../../common/string/tiktoken/index';
|
||||
import { getNanoid, sliceStrStartEnd } from '@fastgpt/global/common/string/tools';
|
||||
import { AIChatItemType } from '@fastgpt/global/core/chat/type';
|
||||
|
|
@ -50,9 +46,9 @@ export const runToolWithFunctionCall = async (
|
|||
res,
|
||||
requestOrigin,
|
||||
runtimeNodes,
|
||||
detail = false,
|
||||
node,
|
||||
stream,
|
||||
workflowStreamResponse,
|
||||
params: { temperature = 0, maxToken = 4000, aiChatVision }
|
||||
} = props;
|
||||
const assistantResponses = response?.assistantResponses || [];
|
||||
|
|
@ -143,9 +139,9 @@ export const runToolWithFunctionCall = async (
|
|||
if (res && stream) {
|
||||
return streamResponse({
|
||||
res,
|
||||
detail,
|
||||
toolNodes,
|
||||
stream: aiResponse
|
||||
stream: aiResponse,
|
||||
workflowStreamResponse
|
||||
});
|
||||
} else {
|
||||
const result = aiResponse as ChatCompletion;
|
||||
|
|
@ -216,21 +212,18 @@ export const runToolWithFunctionCall = async (
|
|||
content: stringToolResponse
|
||||
};
|
||||
|
||||
if (stream && detail) {
|
||||
responseWrite({
|
||||
res,
|
||||
event: SseResponseEventEnum.toolResponse,
|
||||
data: JSON.stringify({
|
||||
tool: {
|
||||
id: tool.id,
|
||||
toolName: '',
|
||||
toolAvatar: '',
|
||||
params: '',
|
||||
response: sliceStrStartEnd(stringToolResponse, 500, 500)
|
||||
}
|
||||
})
|
||||
});
|
||||
}
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.toolResponse,
|
||||
data: {
|
||||
tool: {
|
||||
id: tool.id,
|
||||
toolName: '',
|
||||
toolAvatar: '',
|
||||
params: '',
|
||||
response: sliceStrStartEnd(stringToolResponse, 500, 500)
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
return {
|
||||
toolRunResponse,
|
||||
|
|
@ -260,12 +253,14 @@ export const runToolWithFunctionCall = async (
|
|||
];
|
||||
// console.log(tokens, 'tool');
|
||||
|
||||
if (stream && detail) {
|
||||
responseWriteNodeStatus({
|
||||
res,
|
||||
// Run tool status
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.flowNodeStatus,
|
||||
data: {
|
||||
status: 'running',
|
||||
name: node.name
|
||||
});
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// tool assistant
|
||||
const toolAssistants = toolsRunResponse
|
||||
|
|
@ -337,14 +332,14 @@ export const runToolWithFunctionCall = async (
|
|||
|
||||
async function streamResponse({
|
||||
res,
|
||||
detail,
|
||||
toolNodes,
|
||||
stream
|
||||
stream,
|
||||
workflowStreamResponse
|
||||
}: {
|
||||
res: NextApiResponse;
|
||||
detail: boolean;
|
||||
toolNodes: ToolNodeItemType[];
|
||||
stream: StreamChatType;
|
||||
workflowStreamResponse?: WorkflowResponseType;
|
||||
}) {
|
||||
const write = responseWriteController({
|
||||
res,
|
||||
|
|
@ -367,9 +362,9 @@ async function streamResponse({
|
|||
const content = responseChoice?.content || '';
|
||||
textAnswer += content;
|
||||
|
||||
responseWrite({
|
||||
workflowStreamResponse?.({
|
||||
write,
|
||||
event: detail ? SseResponseEventEnum.answer : undefined,
|
||||
event: SseResponseEventEnum.answer,
|
||||
data: textAdaptGptResponse({
|
||||
text: content
|
||||
})
|
||||
|
|
@ -397,22 +392,20 @@ async function streamResponse({
|
|||
toolAvatar: toolNode.avatar
|
||||
});
|
||||
|
||||
if (detail) {
|
||||
responseWrite({
|
||||
write,
|
||||
event: SseResponseEventEnum.toolCall,
|
||||
data: JSON.stringify({
|
||||
tool: {
|
||||
id: functionId,
|
||||
toolName: toolNode.name,
|
||||
toolAvatar: toolNode.avatar,
|
||||
functionName: functionCall.name,
|
||||
params: functionCall.arguments,
|
||||
response: ''
|
||||
}
|
||||
})
|
||||
});
|
||||
}
|
||||
workflowStreamResponse?.({
|
||||
write,
|
||||
event: SseResponseEventEnum.toolCall,
|
||||
data: {
|
||||
tool: {
|
||||
id: functionId,
|
||||
toolName: toolNode.name,
|
||||
toolAvatar: toolNode.avatar,
|
||||
functionName: functionCall.name,
|
||||
params: functionCall.arguments,
|
||||
response: ''
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
continue;
|
||||
|
|
@ -424,21 +417,19 @@ async function streamResponse({
|
|||
if (currentTool) {
|
||||
currentTool.arguments += arg;
|
||||
|
||||
if (detail) {
|
||||
responseWrite({
|
||||
write,
|
||||
event: SseResponseEventEnum.toolParams,
|
||||
data: JSON.stringify({
|
||||
tool: {
|
||||
id: functionId,
|
||||
toolName: '',
|
||||
toolAvatar: '',
|
||||
params: arg,
|
||||
response: ''
|
||||
}
|
||||
})
|
||||
});
|
||||
}
|
||||
workflowStreamResponse?.({
|
||||
write,
|
||||
event: SseResponseEventEnum.toolParams,
|
||||
data: {
|
||||
tool: {
|
||||
id: functionId,
|
||||
toolName: '',
|
||||
toolAvatar: '',
|
||||
params: arg,
|
||||
response: ''
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -8,11 +8,7 @@ import {
|
|||
ChatCompletionAssistantMessageParam
|
||||
} from '@fastgpt/global/core/ai/type';
|
||||
import { NextApiResponse } from 'next';
|
||||
import {
|
||||
responseWrite,
|
||||
responseWriteController,
|
||||
responseWriteNodeStatus
|
||||
} from '../../../../../common/response';
|
||||
import { responseWriteController } from '../../../../../common/response';
|
||||
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import { textAdaptGptResponse } from '@fastgpt/global/core/workflow/runtime/utils';
|
||||
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
|
||||
|
|
@ -30,6 +26,7 @@ import { AIChatItemType } from '@fastgpt/global/core/chat/type';
|
|||
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
|
||||
import { updateToolInputValue } from './utils';
|
||||
import { computedMaxToken, computedTemperature } from '../../../../ai/utils';
|
||||
import { WorkflowResponseType } from '../../type';
|
||||
|
||||
type FunctionCallCompletion = {
|
||||
id: string;
|
||||
|
|
@ -56,9 +53,9 @@ export const runToolWithPromptCall = async (
|
|||
res,
|
||||
requestOrigin,
|
||||
runtimeNodes,
|
||||
detail = false,
|
||||
node,
|
||||
stream,
|
||||
workflowStreamResponse,
|
||||
params: { temperature = 0, maxToken = 4000, aiChatVision }
|
||||
} = props;
|
||||
const assistantResponses = response?.assistantResponses || [];
|
||||
|
|
@ -143,9 +140,9 @@ export const runToolWithPromptCall = async (
|
|||
if (res && stream) {
|
||||
const { answer } = await streamResponse({
|
||||
res,
|
||||
detail,
|
||||
toolNodes,
|
||||
stream: aiResponse
|
||||
stream: aiResponse,
|
||||
workflowStreamResponse
|
||||
});
|
||||
|
||||
return answer;
|
||||
|
|
@ -159,9 +156,8 @@ export const runToolWithPromptCall = async (
|
|||
const { answer: replaceAnswer, toolJson } = parseAnswer(answer);
|
||||
// No tools
|
||||
if (!toolJson) {
|
||||
if (replaceAnswer === ERROR_TEXT && stream && detail) {
|
||||
responseWrite({
|
||||
res,
|
||||
if (replaceAnswer === ERROR_TEXT) {
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.answer,
|
||||
data: textAdaptGptResponse({
|
||||
text: replaceAnswer
|
||||
|
|
@ -206,22 +202,19 @@ export const runToolWithPromptCall = async (
|
|||
})();
|
||||
|
||||
// SSE response to client
|
||||
if (stream && detail) {
|
||||
responseWrite({
|
||||
res,
|
||||
event: SseResponseEventEnum.toolCall,
|
||||
data: JSON.stringify({
|
||||
tool: {
|
||||
id: toolJson.id,
|
||||
toolName: toolNode.name,
|
||||
toolAvatar: toolNode.avatar,
|
||||
functionName: toolJson.name,
|
||||
params: toolJson.arguments,
|
||||
response: ''
|
||||
}
|
||||
})
|
||||
});
|
||||
}
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.toolCall,
|
||||
data: {
|
||||
tool: {
|
||||
id: toolJson.id,
|
||||
toolName: toolNode.name,
|
||||
toolAvatar: toolNode.avatar,
|
||||
functionName: toolJson.name,
|
||||
params: toolJson.arguments,
|
||||
response: ''
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
const moduleRunResponse = await dispatchWorkFlow({
|
||||
...props,
|
||||
|
|
@ -245,21 +238,18 @@ export const runToolWithPromptCall = async (
|
|||
return moduleRunResponse.toolResponses ? String(moduleRunResponse.toolResponses) : 'none';
|
||||
})();
|
||||
|
||||
if (stream && detail) {
|
||||
responseWrite({
|
||||
res,
|
||||
event: SseResponseEventEnum.toolResponse,
|
||||
data: JSON.stringify({
|
||||
tool: {
|
||||
id: toolJson.id,
|
||||
toolName: '',
|
||||
toolAvatar: '',
|
||||
params: '',
|
||||
response: sliceStrStartEnd(stringToolResponse, 500, 500)
|
||||
}
|
||||
})
|
||||
});
|
||||
}
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.toolResponse,
|
||||
data: {
|
||||
tool: {
|
||||
id: toolJson.id,
|
||||
toolName: '',
|
||||
toolAvatar: '',
|
||||
params: '',
|
||||
response: sliceStrStartEnd(stringToolResponse, 500, 500)
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
return {
|
||||
moduleRunResponse,
|
||||
|
|
@ -267,12 +257,14 @@ export const runToolWithPromptCall = async (
|
|||
};
|
||||
})();
|
||||
|
||||
if (stream && detail) {
|
||||
responseWriteNodeStatus({
|
||||
res,
|
||||
// Run tool status
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.flowNodeStatus,
|
||||
data: {
|
||||
status: 'running',
|
||||
name: node.name
|
||||
});
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// 合并工具调用的结果,使用 functionCall 格式存储。
|
||||
const assistantToolMsgParams: ChatCompletionAssistantMessageParam = {
|
||||
|
|
@ -340,13 +332,13 @@ ANSWER: `;
|
|||
|
||||
async function streamResponse({
|
||||
res,
|
||||
detail,
|
||||
stream
|
||||
stream,
|
||||
workflowStreamResponse
|
||||
}: {
|
||||
res: NextApiResponse;
|
||||
detail: boolean;
|
||||
toolNodes: ToolNodeItemType[];
|
||||
stream: StreamChatType;
|
||||
workflowStreamResponse?: WorkflowResponseType;
|
||||
}) {
|
||||
const write = responseWriteController({
|
||||
res,
|
||||
|
|
@ -370,9 +362,9 @@ async function streamResponse({
|
|||
textAnswer += content;
|
||||
|
||||
if (startResponseWrite) {
|
||||
responseWrite({
|
||||
workflowStreamResponse?.({
|
||||
write,
|
||||
event: detail ? SseResponseEventEnum.answer : undefined,
|
||||
event: SseResponseEventEnum.answer,
|
||||
data: textAdaptGptResponse({
|
||||
text: content
|
||||
})
|
||||
|
|
@ -384,9 +376,9 @@ async function streamResponse({
|
|||
// find first : index
|
||||
const firstIndex = textAnswer.indexOf(':');
|
||||
textAnswer = textAnswer.substring(firstIndex + 1).trim();
|
||||
responseWrite({
|
||||
workflowStreamResponse?.({
|
||||
write,
|
||||
event: detail ? SseResponseEventEnum.answer : undefined,
|
||||
event: SseResponseEventEnum.answer,
|
||||
data: textAdaptGptResponse({
|
||||
text: textAnswer
|
||||
})
|
||||
|
|
|
|||
|
|
@ -12,24 +12,21 @@ import {
|
|||
ChatCompletionAssistantMessageParam
|
||||
} from '@fastgpt/global/core/ai/type';
|
||||
import { NextApiResponse } from 'next';
|
||||
import {
|
||||
responseWrite,
|
||||
responseWriteController,
|
||||
responseWriteNodeStatus
|
||||
} from '../../../../../common/response';
|
||||
import { responseWriteController } from '../../../../../common/response';
|
||||
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import { textAdaptGptResponse } from '@fastgpt/global/core/workflow/runtime/utils';
|
||||
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
|
||||
import { dispatchWorkFlow } from '../../index';
|
||||
import { DispatchToolModuleProps, RunToolResponse, ToolNodeItemType } from './type.d';
|
||||
import json5 from 'json5';
|
||||
import { DispatchFlowResponse } from '../../type';
|
||||
import { DispatchFlowResponse, WorkflowResponseType } from '../../type';
|
||||
import { countGptMessagesTokens } from '../../../../../common/string/tiktoken/index';
|
||||
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
|
||||
import { AIChatItemType } from '@fastgpt/global/core/chat/type';
|
||||
import { updateToolInputValue } from './utils';
|
||||
import { computedMaxToken, computedTemperature } from '../../../../ai/utils';
|
||||
import { sliceStrStartEnd } from '@fastgpt/global/common/string/tools';
|
||||
import { addLog } from '../../../../../common/system/log';
|
||||
|
||||
type ToolRunResponseType = {
|
||||
toolRunResponse: DispatchFlowResponse;
|
||||
|
|
@ -58,9 +55,9 @@ export const runToolWithToolChoice = async (
|
|||
res,
|
||||
requestOrigin,
|
||||
runtimeNodes,
|
||||
detail = false,
|
||||
node,
|
||||
stream,
|
||||
workflowStreamResponse,
|
||||
params: { temperature = 0, maxToken = 4000, aiChatVision }
|
||||
} = props;
|
||||
const assistantResponses = response?.assistantResponses || [];
|
||||
|
|
@ -145,91 +142,91 @@ export const runToolWithToolChoice = async (
|
|||
const ai = getAIApi({
|
||||
timeout: 480000
|
||||
});
|
||||
const aiResponse = await ai.chat.completions.create(requestBody, {
|
||||
headers: {
|
||||
Accept: 'application/json, text/plain, */*'
|
||||
}
|
||||
});
|
||||
|
||||
const { answer, toolCalls } = await (async () => {
|
||||
if (res && stream) {
|
||||
return streamResponse({
|
||||
res,
|
||||
detail,
|
||||
toolNodes,
|
||||
stream: aiResponse
|
||||
});
|
||||
} else {
|
||||
const result = aiResponse as ChatCompletion;
|
||||
const calls = result.choices?.[0]?.message?.tool_calls || [];
|
||||
try {
|
||||
const aiResponse = await ai.chat.completions.create(requestBody, {
|
||||
headers: {
|
||||
Accept: 'application/json, text/plain, */*'
|
||||
}
|
||||
});
|
||||
|
||||
// 加上name和avatar
|
||||
const toolCalls = calls.map((tool) => {
|
||||
const toolNode = toolNodes.find((item) => item.nodeId === tool.function?.name);
|
||||
return {
|
||||
...tool,
|
||||
toolName: toolNode?.name || '',
|
||||
toolAvatar: toolNode?.avatar || ''
|
||||
};
|
||||
});
|
||||
const { answer, toolCalls } = await (async () => {
|
||||
if (res && stream) {
|
||||
return streamResponse({
|
||||
res,
|
||||
workflowStreamResponse,
|
||||
toolNodes,
|
||||
stream: aiResponse
|
||||
});
|
||||
} else {
|
||||
const result = aiResponse as ChatCompletion;
|
||||
const calls = result.choices?.[0]?.message?.tool_calls || [];
|
||||
|
||||
return {
|
||||
answer: result.choices?.[0]?.message?.content || '',
|
||||
toolCalls: toolCalls
|
||||
};
|
||||
}
|
||||
})();
|
||||
|
||||
// Run the selected tool by LLM.
|
||||
const toolsRunResponse = (
|
||||
await Promise.all(
|
||||
toolCalls.map(async (tool) => {
|
||||
const toolNode = toolNodes.find((item) => item.nodeId === tool.function?.name);
|
||||
|
||||
if (!toolNode) return;
|
||||
|
||||
const startParams = (() => {
|
||||
try {
|
||||
return json5.parse(tool.function.arguments);
|
||||
} catch (error) {
|
||||
return {};
|
||||
}
|
||||
})();
|
||||
|
||||
const toolRunResponse = await dispatchWorkFlow({
|
||||
...props,
|
||||
isToolCall: true,
|
||||
runtimeNodes: runtimeNodes.map((item) =>
|
||||
item.nodeId === toolNode.nodeId
|
||||
? {
|
||||
...item,
|
||||
isEntry: true,
|
||||
inputs: updateToolInputValue({ params: startParams, inputs: item.inputs })
|
||||
}
|
||||
: item
|
||||
)
|
||||
// 加上name和avatar
|
||||
const toolCalls = calls.map((tool) => {
|
||||
const toolNode = toolNodes.find((item) => item.nodeId === tool.function?.name);
|
||||
return {
|
||||
...tool,
|
||||
toolName: toolNode?.name || '',
|
||||
toolAvatar: toolNode?.avatar || ''
|
||||
};
|
||||
});
|
||||
|
||||
const stringToolResponse = (() => {
|
||||
if (typeof toolRunResponse.toolResponses === 'object') {
|
||||
return JSON.stringify(toolRunResponse.toolResponses, null, 2);
|
||||
}
|
||||
|
||||
return toolRunResponse.toolResponses ? String(toolRunResponse.toolResponses) : 'none';
|
||||
})();
|
||||
|
||||
const toolMsgParams: ChatCompletionToolMessageParam = {
|
||||
tool_call_id: tool.id,
|
||||
role: ChatCompletionRequestMessageRoleEnum.Tool,
|
||||
name: tool.function.name,
|
||||
content: stringToolResponse
|
||||
return {
|
||||
answer: result.choices?.[0]?.message?.content || '',
|
||||
toolCalls: toolCalls
|
||||
};
|
||||
}
|
||||
})();
|
||||
|
||||
if (stream && detail) {
|
||||
responseWrite({
|
||||
res,
|
||||
// Run the selected tool by LLM.
|
||||
const toolsRunResponse = (
|
||||
await Promise.all(
|
||||
toolCalls.map(async (tool) => {
|
||||
const toolNode = toolNodes.find((item) => item.nodeId === tool.function?.name);
|
||||
|
||||
if (!toolNode) return;
|
||||
|
||||
const startParams = (() => {
|
||||
try {
|
||||
return json5.parse(tool.function.arguments);
|
||||
} catch (error) {
|
||||
return {};
|
||||
}
|
||||
})();
|
||||
|
||||
const toolRunResponse = await dispatchWorkFlow({
|
||||
...props,
|
||||
isToolCall: true,
|
||||
runtimeNodes: runtimeNodes.map((item) =>
|
||||
item.nodeId === toolNode.nodeId
|
||||
? {
|
||||
...item,
|
||||
isEntry: true,
|
||||
inputs: updateToolInputValue({ params: startParams, inputs: item.inputs })
|
||||
}
|
||||
: item
|
||||
)
|
||||
});
|
||||
|
||||
const stringToolResponse = (() => {
|
||||
if (typeof toolRunResponse.toolResponses === 'object') {
|
||||
return JSON.stringify(toolRunResponse.toolResponses, null, 2);
|
||||
}
|
||||
|
||||
return toolRunResponse.toolResponses ? String(toolRunResponse.toolResponses) : 'none';
|
||||
})();
|
||||
|
||||
const toolMsgParams: ChatCompletionToolMessageParam = {
|
||||
tool_call_id: tool.id,
|
||||
role: ChatCompletionRequestMessageRoleEnum.Tool,
|
||||
name: tool.function.name,
|
||||
content: stringToolResponse
|
||||
};
|
||||
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.toolResponse,
|
||||
data: JSON.stringify({
|
||||
data: {
|
||||
tool: {
|
||||
id: tool.id,
|
||||
toolName: '',
|
||||
|
|
@ -237,123 +234,130 @@ export const runToolWithToolChoice = async (
|
|||
params: '',
|
||||
response: sliceStrStartEnd(stringToolResponse, 500, 500)
|
||||
}
|
||||
})
|
||||
}
|
||||
});
|
||||
|
||||
return {
|
||||
toolRunResponse,
|
||||
toolMsgParams
|
||||
};
|
||||
})
|
||||
)
|
||||
).filter(Boolean) as ToolRunResponseType;
|
||||
|
||||
const flatToolsResponseData = toolsRunResponse.map((item) => item.toolRunResponse).flat();
|
||||
if (toolCalls.length > 0 && !res?.closed) {
|
||||
// Run the tool, combine its results, and perform another round of AI calls
|
||||
const assistantToolMsgParams: ChatCompletionAssistantToolParam = {
|
||||
role: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
tool_calls: toolCalls
|
||||
};
|
||||
const concatToolMessages = [
|
||||
...requestMessages,
|
||||
assistantToolMsgParams
|
||||
] as ChatCompletionMessageParam[];
|
||||
const tokens = await countGptMessagesTokens(concatToolMessages, tools);
|
||||
const completeMessages = [
|
||||
...concatToolMessages,
|
||||
...toolsRunResponse.map((item) => item?.toolMsgParams)
|
||||
];
|
||||
|
||||
// console.log(tokens, 'tool');
|
||||
|
||||
// Run tool status
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.flowNodeStatus,
|
||||
data: {
|
||||
status: 'running',
|
||||
name: node.name
|
||||
}
|
||||
|
||||
return {
|
||||
toolRunResponse,
|
||||
toolMsgParams
|
||||
};
|
||||
})
|
||||
)
|
||||
).filter(Boolean) as ToolRunResponseType;
|
||||
|
||||
const flatToolsResponseData = toolsRunResponse.map((item) => item.toolRunResponse).flat();
|
||||
if (toolCalls.length > 0 && !res?.closed) {
|
||||
// Run the tool, combine its results, and perform another round of AI calls
|
||||
const assistantToolMsgParams: ChatCompletionAssistantToolParam = {
|
||||
role: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
tool_calls: toolCalls
|
||||
};
|
||||
const concatToolMessages = [
|
||||
...requestMessages,
|
||||
assistantToolMsgParams
|
||||
] as ChatCompletionMessageParam[];
|
||||
const tokens = await countGptMessagesTokens(concatToolMessages, tools);
|
||||
const completeMessages = [
|
||||
...concatToolMessages,
|
||||
...toolsRunResponse.map((item) => item?.toolMsgParams)
|
||||
];
|
||||
|
||||
// console.log(tokens, 'tool');
|
||||
|
||||
if (stream && detail) {
|
||||
responseWriteNodeStatus({
|
||||
res,
|
||||
name: node.name
|
||||
});
|
||||
}
|
||||
|
||||
// tool assistant
|
||||
const toolAssistants = toolsRunResponse
|
||||
.map((item) => {
|
||||
const assistantResponses = item.toolRunResponse.assistantResponses || [];
|
||||
return assistantResponses;
|
||||
})
|
||||
.flat();
|
||||
// tool assistant
|
||||
const toolAssistants = toolsRunResponse
|
||||
.map((item) => {
|
||||
const assistantResponses = item.toolRunResponse.assistantResponses || [];
|
||||
return assistantResponses;
|
||||
})
|
||||
.flat();
|
||||
|
||||
// tool node assistant
|
||||
const adaptChatMessages = GPTMessages2Chats(completeMessages);
|
||||
const toolNodeAssistant = adaptChatMessages.pop() as AIChatItemType;
|
||||
// tool node assistant
|
||||
const adaptChatMessages = GPTMessages2Chats(completeMessages);
|
||||
const toolNodeAssistant = adaptChatMessages.pop() as AIChatItemType;
|
||||
|
||||
const toolNodeAssistants = [
|
||||
...assistantResponses,
|
||||
...toolAssistants,
|
||||
...toolNodeAssistant.value
|
||||
];
|
||||
const toolNodeAssistants = [
|
||||
...assistantResponses,
|
||||
...toolAssistants,
|
||||
...toolNodeAssistant.value
|
||||
];
|
||||
|
||||
// concat tool responses
|
||||
const dispatchFlowResponse = response
|
||||
? response.dispatchFlowResponse.concat(flatToolsResponseData)
|
||||
: flatToolsResponseData;
|
||||
// concat tool responses
|
||||
const dispatchFlowResponse = response
|
||||
? response.dispatchFlowResponse.concat(flatToolsResponseData)
|
||||
: flatToolsResponseData;
|
||||
|
||||
/* check stop signal */
|
||||
const hasStopSignal = flatToolsResponseData.some(
|
||||
(item) => !!item.flowResponses?.find((item) => item.toolStop)
|
||||
);
|
||||
if (hasStopSignal) {
|
||||
return {
|
||||
dispatchFlowResponse,
|
||||
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
|
||||
completeMessages,
|
||||
assistantResponses: toolNodeAssistants
|
||||
};
|
||||
}
|
||||
|
||||
return runToolWithToolChoice(
|
||||
{
|
||||
...props,
|
||||
messages: completeMessages
|
||||
},
|
||||
{
|
||||
dispatchFlowResponse,
|
||||
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
|
||||
assistantResponses: toolNodeAssistants
|
||||
}
|
||||
);
|
||||
} else {
|
||||
// No tool is invoked, indicating that the process is over
|
||||
const gptAssistantResponse: ChatCompletionAssistantMessageParam = {
|
||||
role: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
content: answer
|
||||
};
|
||||
const completeMessages = filterMessages.concat(gptAssistantResponse);
|
||||
const tokens = await countGptMessagesTokens(completeMessages, tools);
|
||||
// console.log(tokens, 'response token');
|
||||
|
||||
// concat tool assistant
|
||||
const toolNodeAssistant = GPTMessages2Chats([gptAssistantResponse])[0] as AIChatItemType;
|
||||
|
||||
/* check stop signal */
|
||||
const hasStopSignal = flatToolsResponseData.some(
|
||||
(item) => !!item.flowResponses?.find((item) => item.toolStop)
|
||||
);
|
||||
if (hasStopSignal) {
|
||||
return {
|
||||
dispatchFlowResponse,
|
||||
dispatchFlowResponse: response?.dispatchFlowResponse || [],
|
||||
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
|
||||
completeMessages,
|
||||
assistantResponses: toolNodeAssistants
|
||||
assistantResponses: [...assistantResponses, ...toolNodeAssistant.value]
|
||||
};
|
||||
}
|
||||
|
||||
return runToolWithToolChoice(
|
||||
{
|
||||
...props,
|
||||
messages: completeMessages
|
||||
},
|
||||
{
|
||||
dispatchFlowResponse,
|
||||
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
|
||||
assistantResponses: toolNodeAssistants
|
||||
}
|
||||
);
|
||||
} else {
|
||||
// No tool is invoked, indicating that the process is over
|
||||
const gptAssistantResponse: ChatCompletionAssistantMessageParam = {
|
||||
role: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
content: answer
|
||||
};
|
||||
const completeMessages = filterMessages.concat(gptAssistantResponse);
|
||||
const tokens = await countGptMessagesTokens(completeMessages, tools);
|
||||
// console.log(tokens, 'response token');
|
||||
|
||||
// concat tool assistant
|
||||
const toolNodeAssistant = GPTMessages2Chats([gptAssistantResponse])[0] as AIChatItemType;
|
||||
|
||||
return {
|
||||
dispatchFlowResponse: response?.dispatchFlowResponse || [],
|
||||
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
|
||||
completeMessages,
|
||||
assistantResponses: [...assistantResponses, ...toolNodeAssistant.value]
|
||||
};
|
||||
} catch (error) {
|
||||
addLog.warn(`LLM response error`, {
|
||||
requestBody
|
||||
});
|
||||
return Promise.reject(error);
|
||||
}
|
||||
};
|
||||
|
||||
async function streamResponse({
|
||||
res,
|
||||
detail,
|
||||
toolNodes,
|
||||
stream
|
||||
stream,
|
||||
workflowStreamResponse
|
||||
}: {
|
||||
res: NextApiResponse;
|
||||
detail: boolean;
|
||||
toolNodes: ToolNodeItemType[];
|
||||
stream: StreamChatType;
|
||||
workflowStreamResponse?: WorkflowResponseType;
|
||||
}) {
|
||||
const write = responseWriteController({
|
||||
res,
|
||||
|
|
@ -375,9 +379,9 @@ async function streamResponse({
|
|||
const content = responseChoice.content || '';
|
||||
textAnswer += content;
|
||||
|
||||
responseWrite({
|
||||
workflowStreamResponse?.({
|
||||
write,
|
||||
event: detail ? SseResponseEventEnum.answer : undefined,
|
||||
event: SseResponseEventEnum.answer,
|
||||
data: textAdaptGptResponse({
|
||||
text: content
|
||||
})
|
||||
|
|
@ -405,22 +409,19 @@ async function streamResponse({
|
|||
toolAvatar: toolNode.avatar
|
||||
});
|
||||
|
||||
if (detail) {
|
||||
responseWrite({
|
||||
write,
|
||||
event: SseResponseEventEnum.toolCall,
|
||||
data: JSON.stringify({
|
||||
tool: {
|
||||
id: toolCall.id,
|
||||
toolName: toolNode.name,
|
||||
toolAvatar: toolNode.avatar,
|
||||
functionName: toolCall.function.name,
|
||||
params: toolCall.function.arguments,
|
||||
response: ''
|
||||
}
|
||||
})
|
||||
});
|
||||
}
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.toolCall,
|
||||
data: {
|
||||
tool: {
|
||||
id: toolCall.id,
|
||||
toolName: toolNode.name,
|
||||
toolAvatar: toolNode.avatar,
|
||||
functionName: toolCall.function.name,
|
||||
params: toolCall.function.arguments,
|
||||
response: ''
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
continue;
|
||||
}
|
||||
|
|
@ -437,21 +438,19 @@ async function streamResponse({
|
|||
if (currentTool) {
|
||||
currentTool.function.arguments += arg;
|
||||
|
||||
if (detail) {
|
||||
responseWrite({
|
||||
write,
|
||||
event: SseResponseEventEnum.toolParams,
|
||||
data: JSON.stringify({
|
||||
tool: {
|
||||
id: currentTool.id,
|
||||
toolName: '',
|
||||
toolAvatar: '',
|
||||
params: arg,
|
||||
response: ''
|
||||
}
|
||||
})
|
||||
});
|
||||
}
|
||||
workflowStreamResponse?.({
|
||||
write,
|
||||
event: SseResponseEventEnum.toolParams,
|
||||
data: {
|
||||
tool: {
|
||||
id: currentTool.id,
|
||||
toolName: '',
|
||||
toolAvatar: '',
|
||||
params: arg,
|
||||
response: ''
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@ import {
|
|||
import type { AIChatNodeProps } from '@fastgpt/global/core/workflow/runtime/type.d';
|
||||
import { replaceVariable } from '@fastgpt/global/common/string/tools';
|
||||
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { responseWrite, responseWriteController } from '../../../../common/response';
|
||||
import { responseWriteController } from '../../../../common/response';
|
||||
import { getLLMModel, ModelTypeEnum } from '../../../ai/model';
|
||||
import type { SearchDataResponseItemType } from '@fastgpt/global/core/dataset/type';
|
||||
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
|
|
@ -41,6 +41,7 @@ import { filterSearchResultsByMaxChars } from '../../utils';
|
|||
import { getHistoryPreview } from '@fastgpt/global/core/chat/utils';
|
||||
import { addLog } from '../../../../common/system/log';
|
||||
import { computedMaxToken, computedTemperature } from '../../../ai/utils';
|
||||
import { WorkflowResponseType } from '../type';
|
||||
|
||||
export type ChatProps = ModuleDispatchProps<
|
||||
AIChatNodeProps & {
|
||||
|
|
@ -60,11 +61,11 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
|||
res,
|
||||
requestOrigin,
|
||||
stream = false,
|
||||
detail = false,
|
||||
user,
|
||||
histories,
|
||||
node: { name },
|
||||
query,
|
||||
workflowStreamResponse,
|
||||
params: {
|
||||
model,
|
||||
temperature = 0,
|
||||
|
|
@ -179,8 +180,8 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
|||
// sse response
|
||||
const { answer } = await streamResponse({
|
||||
res,
|
||||
detail,
|
||||
stream: response
|
||||
stream: response,
|
||||
workflowStreamResponse
|
||||
});
|
||||
|
||||
if (!answer) {
|
||||
|
|
@ -340,12 +341,12 @@ async function getChatMessages({
|
|||
|
||||
async function streamResponse({
|
||||
res,
|
||||
detail,
|
||||
stream
|
||||
stream,
|
||||
workflowStreamResponse
|
||||
}: {
|
||||
res: NextApiResponse;
|
||||
detail: boolean;
|
||||
stream: StreamChatType;
|
||||
workflowStreamResponse?: WorkflowResponseType;
|
||||
}) {
|
||||
const write = responseWriteController({
|
||||
res,
|
||||
|
|
@ -360,9 +361,9 @@ async function streamResponse({
|
|||
const content = part.choices?.[0]?.delta?.content || '';
|
||||
answer += content;
|
||||
|
||||
responseWrite({
|
||||
workflowStreamResponse?.({
|
||||
write,
|
||||
event: detail ? SseResponseEventEnum.answer : undefined,
|
||||
event: SseResponseEventEnum.answer,
|
||||
data: textAdaptGptResponse({
|
||||
text: content
|
||||
})
|
||||
|
|
|
|||
|
|
@ -1,4 +1,3 @@
|
|||
import { NextApiResponse } from 'next';
|
||||
import { NodeInputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
import {
|
||||
DispatchNodeResponseKeyEnum,
|
||||
|
|
@ -21,7 +20,6 @@ import {
|
|||
FlowNodeTypeEnum
|
||||
} from '@fastgpt/global/core/workflow/node/constant';
|
||||
import { replaceVariable } from '@fastgpt/global/common/string/tools';
|
||||
import { responseWrite, responseWriteNodeStatus } from '../../../common/response';
|
||||
import { getSystemTime } from '@fastgpt/global/common/time/timezone';
|
||||
import { replaceVariableLabel } from '@fastgpt/global/core/workflow/utils';
|
||||
|
||||
|
|
@ -41,8 +39,7 @@ import { dispatchPluginOutput } from './plugin/runOutput';
|
|||
import { removeSystemVariable, valueTypeFormat } from './utils';
|
||||
import {
|
||||
filterWorkflowEdges,
|
||||
checkNodeRunStatus,
|
||||
getLastInteractiveValue
|
||||
checkNodeRunStatus
|
||||
} from '@fastgpt/global/core/workflow/runtime/utils';
|
||||
import { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
|
||||
import { dispatchRunTools } from './agent/runTool/index';
|
||||
|
|
@ -62,12 +59,11 @@ import { dispatchTextEditor } from './tools/textEditor';
|
|||
import { dispatchCustomFeedback } from './tools/customFeedback';
|
||||
import { dispatchReadFiles } from './tools/readFiles';
|
||||
import { dispatchUserSelect } from './interactive/userSelect';
|
||||
import { FlowNodeOutputItemType } from '@fastgpt/global/core/workflow/type/io';
|
||||
import {
|
||||
InteractiveNodeResponseItemType,
|
||||
UserInteractiveType,
|
||||
UserSelectInteractive
|
||||
} from '@fastgpt/global/core/workflow/template/system/userSelect/type';
|
||||
import { dispatchRunAppNode } from './agent/runAppModule';
|
||||
|
||||
const callbackMap: Record<FlowNodeTypeEnum, Function> = {
|
||||
[FlowNodeTypeEnum.workflowStart]: dispatchWorkflowStart,
|
||||
|
|
@ -79,6 +75,7 @@ const callbackMap: Record<FlowNodeTypeEnum, Function> = {
|
|||
[FlowNodeTypeEnum.contentExtract]: dispatchContentExtract,
|
||||
[FlowNodeTypeEnum.httpRequest468]: dispatchHttp468Request,
|
||||
[FlowNodeTypeEnum.runApp]: dispatchAppRequest,
|
||||
[FlowNodeTypeEnum.appModule]: dispatchRunAppNode,
|
||||
[FlowNodeTypeEnum.pluginModule]: dispatchRunPlugin,
|
||||
[FlowNodeTypeEnum.pluginInput]: dispatchPluginInput,
|
||||
[FlowNodeTypeEnum.pluginOutput]: dispatchPluginOutput,
|
||||
|
|
@ -115,7 +112,6 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
|
|||
variables = {},
|
||||
user,
|
||||
stream = false,
|
||||
detail = false,
|
||||
...props
|
||||
} = data;
|
||||
|
||||
|
|
@ -261,13 +257,10 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
|
|||
nodeOutputs
|
||||
};
|
||||
|
||||
if (stream && res) {
|
||||
responseWrite({
|
||||
res,
|
||||
event: SseResponseEventEnum.interactive,
|
||||
data: JSON.stringify({ interactive: interactiveResult })
|
||||
});
|
||||
}
|
||||
props.workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.interactive,
|
||||
data: { interactive: interactiveResult }
|
||||
});
|
||||
|
||||
return {
|
||||
type: ChatItemValueTypeEnum.interactive,
|
||||
|
|
@ -401,11 +394,13 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
|
|||
}
|
||||
async function nodeRunWithActive(node: RuntimeNodeItemType) {
|
||||
// push run status messages
|
||||
if (res && stream && detail && node.showStatus) {
|
||||
responseStatus({
|
||||
res,
|
||||
name: node.name,
|
||||
status: 'running'
|
||||
if (node.showStatus) {
|
||||
props.workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.flowNodeStatus,
|
||||
data: {
|
||||
status: 'running',
|
||||
name: node.name
|
||||
}
|
||||
});
|
||||
}
|
||||
const startTime = Date.now();
|
||||
|
|
@ -420,7 +415,6 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
|
|||
histories,
|
||||
user,
|
||||
stream,
|
||||
detail,
|
||||
node,
|
||||
runtimeNodes,
|
||||
runtimeEdges,
|
||||
|
|
@ -510,23 +504,6 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
|
|||
};
|
||||
}
|
||||
|
||||
/* sse response modules staus */
|
||||
export function responseStatus({
|
||||
res,
|
||||
status,
|
||||
name
|
||||
}: {
|
||||
res: NextApiResponse;
|
||||
status?: 'running' | 'finish';
|
||||
name?: string;
|
||||
}) {
|
||||
if (!name) return;
|
||||
responseWriteNodeStatus({
|
||||
res,
|
||||
name
|
||||
});
|
||||
}
|
||||
|
||||
/* get system variable */
|
||||
export function getSystemVariable({
|
||||
user,
|
||||
|
|
|
|||
|
|
@ -14,7 +14,6 @@ import type {
|
|||
} from '@fastgpt/global/core/workflow/template/system/userSelect/type';
|
||||
import { updateUserSelectedResult } from '../../../chat/controller';
|
||||
import { textAdaptGptResponse } from '@fastgpt/global/core/workflow/runtime/utils';
|
||||
import { responseWrite } from '../../../../common/response';
|
||||
import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt';
|
||||
|
||||
type Props = ModuleDispatchProps<{
|
||||
|
|
@ -29,10 +28,7 @@ type UserSelectResponse = DispatchNodeResultType<{
|
|||
|
||||
export const dispatchUserSelect = async (props: Props): Promise<UserSelectResponse> => {
|
||||
const {
|
||||
res,
|
||||
detail,
|
||||
histories,
|
||||
stream,
|
||||
workflowStreamResponse,
|
||||
app: { _id: appId },
|
||||
chatId,
|
||||
node: { nodeId, isEntry },
|
||||
|
|
@ -43,10 +39,9 @@ export const dispatchUserSelect = async (props: Props): Promise<UserSelectRespon
|
|||
// Interactive node is not the entry node, return interactive result
|
||||
if (!isEntry) {
|
||||
const answerText = description ? `\n${description}` : undefined;
|
||||
if (res && stream && answerText) {
|
||||
responseWrite({
|
||||
res,
|
||||
event: detail ? SseResponseEventEnum.fastAnswer : undefined,
|
||||
if (answerText) {
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.fastAnswer,
|
||||
data: textAdaptGptResponse({
|
||||
text: answerText
|
||||
})
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@ import {
|
|||
DispatchNodeResponseKeyEnum,
|
||||
SseResponseEventEnum
|
||||
} from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import { responseWrite } from '../../../../common/response';
|
||||
import { textAdaptGptResponse } from '@fastgpt/global/core/workflow/runtime/utils';
|
||||
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
|
|
@ -16,24 +15,19 @@ export type AnswerResponse = DispatchNodeResultType<{
|
|||
|
||||
export const dispatchAnswer = (props: Record<string, any>): AnswerResponse => {
|
||||
const {
|
||||
res,
|
||||
detail,
|
||||
stream,
|
||||
workflowStreamResponse,
|
||||
params: { text = '' }
|
||||
} = props as AnswerProps;
|
||||
|
||||
const formatText = typeof text === 'string' ? text : JSON.stringify(text, null, 2);
|
||||
const responseText = `\n${formatText}`;
|
||||
|
||||
if (res && stream) {
|
||||
responseWrite({
|
||||
res,
|
||||
event: detail ? SseResponseEventEnum.fastAnswer : undefined,
|
||||
data: textAdaptGptResponse({
|
||||
text: responseText
|
||||
})
|
||||
});
|
||||
}
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.fastAnswer,
|
||||
data: textAdaptGptResponse({
|
||||
text: responseText
|
||||
})
|
||||
});
|
||||
|
||||
return {
|
||||
[NodeOutputKeyEnum.answerText]: responseText,
|
||||
|
|
|
|||
|
|
@ -6,7 +6,6 @@ import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/
|
|||
import { NodeInputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { addCustomFeedbacks } from '../../../chat/controller';
|
||||
import { responseWrite } from '../../../../common/response';
|
||||
import { textAdaptGptResponse } from '@fastgpt/global/core/workflow/runtime/utils';
|
||||
|
||||
type Props = ModuleDispatchProps<{
|
||||
|
|
@ -16,12 +15,11 @@ type Response = DispatchNodeResultType<{}>;
|
|||
|
||||
export const dispatchCustomFeedback = (props: Record<string, any>): Response => {
|
||||
const {
|
||||
res,
|
||||
app: { _id: appId },
|
||||
chatId,
|
||||
responseChatItemId: chatItemId,
|
||||
stream,
|
||||
detail,
|
||||
workflowStreamResponse,
|
||||
params: { system_textareaInput: feedbackText = '' }
|
||||
} = props as Props;
|
||||
|
||||
|
|
@ -36,9 +34,8 @@ export const dispatchCustomFeedback = (props: Record<string, any>): Response =>
|
|||
|
||||
if (stream) {
|
||||
if (!chatId || !chatItemId) {
|
||||
responseWrite({
|
||||
res,
|
||||
event: detail ? SseResponseEventEnum.fastAnswer : undefined,
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.fastAnswer,
|
||||
data: textAdaptGptResponse({
|
||||
text: `\n\n**自定义反馈成功: (仅调试模式下展示该内容)**: "${feedbackText}"\n\n`
|
||||
})
|
||||
|
|
|
|||
|
|
@ -14,7 +14,6 @@ import { SERVICE_LOCAL_HOST } from '../../../../common/system/tools';
|
|||
import { addLog } from '../../../../common/system/log';
|
||||
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { getErrText } from '@fastgpt/global/common/error/utils';
|
||||
import { responseWrite } from '../../../../common/response';
|
||||
import { textAdaptGptResponse } from '@fastgpt/global/core/workflow/runtime/utils';
|
||||
import { getSystemPluginCb } from '../../../../../plugins/register';
|
||||
|
||||
|
|
@ -43,15 +42,13 @@ const UNDEFINED_SIGN = 'UNDEFINED_SIGN';
|
|||
|
||||
export const dispatchHttp468Request = async (props: HttpRequestProps): Promise<HttpResponse> => {
|
||||
let {
|
||||
res,
|
||||
detail,
|
||||
app: { _id: appId },
|
||||
chatId,
|
||||
stream,
|
||||
responseChatItemId,
|
||||
variables,
|
||||
node: { outputs },
|
||||
histories,
|
||||
workflowStreamResponse,
|
||||
params: {
|
||||
system_httpMethod: httpMethod = 'POST',
|
||||
system_httpReqUrl: httpReqUrl,
|
||||
|
|
@ -158,10 +155,9 @@ export const dispatchHttp468Request = async (props: HttpRequestProps): Promise<H
|
|||
results[key] = valueTypeFormat(formatResponse[key], output.valueType);
|
||||
}
|
||||
|
||||
if (stream && typeof formatResponse[NodeOutputKeyEnum.answerText] === 'string') {
|
||||
responseWrite({
|
||||
res,
|
||||
event: detail ? SseResponseEventEnum.fastAnswer : undefined,
|
||||
if (typeof formatResponse[NodeOutputKeyEnum.answerText] === 'string') {
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.fastAnswer,
|
||||
data: textAdaptGptResponse({
|
||||
text: formatResponse[NodeOutputKeyEnum.answerText]
|
||||
})
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@ import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
|
|||
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { SelectAppItemType } from '@fastgpt/global/core/workflow/template/system/runApp/type';
|
||||
import { dispatchWorkFlow } from '../index';
|
||||
import { responseWrite } from '../../../../common/response';
|
||||
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
|
||||
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import {
|
||||
|
|
@ -31,10 +30,8 @@ type Response = DispatchNodeResultType<{
|
|||
|
||||
export const dispatchAppRequest = async (props: Props): Promise<Response> => {
|
||||
const {
|
||||
res,
|
||||
app: workflowApp,
|
||||
stream,
|
||||
detail,
|
||||
workflowStreamResponse,
|
||||
histories,
|
||||
query,
|
||||
params: { userChatInput, history, app }
|
||||
|
|
@ -51,15 +48,12 @@ export const dispatchAppRequest = async (props: Props): Promise<Response> => {
|
|||
per: ReadPermissionVal
|
||||
});
|
||||
|
||||
if (res && stream) {
|
||||
responseWrite({
|
||||
res,
|
||||
event: detail ? SseResponseEventEnum.answer : undefined,
|
||||
data: textAdaptGptResponse({
|
||||
text: '\n'
|
||||
})
|
||||
});
|
||||
}
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.fastAnswer,
|
||||
data: textAdaptGptResponse({
|
||||
text: '\n'
|
||||
})
|
||||
});
|
||||
|
||||
const chatHistories = getHistories(history, histories);
|
||||
const { files } = chatValue2RuntimePrompt(query);
|
||||
|
|
|
|||
|
|
@ -8,7 +8,6 @@ import { getReferenceVariableValue } from '@fastgpt/global/core/workflow/runtime
|
|||
import { TUpdateListItem } from '@fastgpt/global/core/workflow/template/system/variableUpdate/type';
|
||||
import { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { removeSystemVariable, valueTypeFormat } from '../utils';
|
||||
import { responseWrite } from '../../../../common/response';
|
||||
|
||||
type Props = ModuleDispatchProps<{
|
||||
[NodeInputKeyEnum.updateList]: TUpdateListItem[];
|
||||
|
|
@ -16,7 +15,7 @@ type Props = ModuleDispatchProps<{
|
|||
type Response = DispatchNodeResultType<{}>;
|
||||
|
||||
export const dispatchUpdateVariable = async (props: Props): Promise<Response> => {
|
||||
const { res, detail, stream, params, variables, runtimeNodes } = props;
|
||||
const { params, variables, runtimeNodes, workflowStreamResponse } = props;
|
||||
|
||||
const { updateList } = params;
|
||||
updateList.forEach((item) => {
|
||||
|
|
@ -54,13 +53,10 @@ export const dispatchUpdateVariable = async (props: Props): Promise<Response> =>
|
|||
}
|
||||
});
|
||||
|
||||
if (detail && stream) {
|
||||
responseWrite({
|
||||
res,
|
||||
event: SseResponseEventEnum.updateVariables,
|
||||
data: JSON.stringify(removeSystemVariable(variables))
|
||||
});
|
||||
}
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.updateVariables,
|
||||
data: removeSystemVariable(variables)
|
||||
});
|
||||
|
||||
return {
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
|
|
|
|||
|
|
@ -4,7 +4,10 @@ import {
|
|||
ChatItemValueItemType,
|
||||
ToolRunResponseItemType
|
||||
} from '@fastgpt/global/core/chat/type';
|
||||
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import {
|
||||
DispatchNodeResponseKeyEnum,
|
||||
SseResponseEventEnum
|
||||
} from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import { RuntimeNodeItemType } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { RuntimeEdgeItemType } from '@fastgpt/global/core/workflow/type/edge';
|
||||
import { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
|
||||
|
|
@ -21,3 +24,15 @@ export type DispatchFlowResponse = {
|
|||
[DispatchNodeResponseKeyEnum.assistantResponses]: AIChatItemValueItemType[];
|
||||
newVariables: Record<string, string>;
|
||||
};
|
||||
|
||||
export type WorkflowResponseType = ({
|
||||
write,
|
||||
event,
|
||||
data,
|
||||
stream
|
||||
}: {
|
||||
write?: ((text: string) => void) | undefined;
|
||||
event: SseResponseEventEnum;
|
||||
data: Record<string, any>;
|
||||
stream?: boolean | undefined;
|
||||
}) => void;
|
||||
|
|
|
|||
|
|
@ -6,6 +6,56 @@ import {
|
|||
NodeOutputKeyEnum
|
||||
} from '@fastgpt/global/core/workflow/constants';
|
||||
import { RuntimeEdgeItemType } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { responseWrite } from '../../../common/response';
|
||||
import { NextApiResponse } from 'next';
|
||||
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
|
||||
export const getWorkflowResponseWrite = ({
|
||||
res,
|
||||
detail,
|
||||
streamResponse,
|
||||
id
|
||||
}: {
|
||||
res?: NextApiResponse;
|
||||
detail: boolean;
|
||||
streamResponse: boolean;
|
||||
id: string;
|
||||
}) => {
|
||||
return ({
|
||||
write,
|
||||
event,
|
||||
data,
|
||||
stream
|
||||
}: {
|
||||
write?: (text: string) => void;
|
||||
event: SseResponseEventEnum;
|
||||
data: Record<string, any>;
|
||||
stream?: boolean; // Focus set stream response
|
||||
}) => {
|
||||
const useStreamResponse = stream ?? streamResponse;
|
||||
|
||||
if (!res || res.closed || !useStreamResponse) return;
|
||||
|
||||
const detailEvent = [
|
||||
SseResponseEventEnum.error,
|
||||
SseResponseEventEnum.flowNodeStatus,
|
||||
SseResponseEventEnum.flowResponses,
|
||||
SseResponseEventEnum.interactive,
|
||||
SseResponseEventEnum.toolCall,
|
||||
SseResponseEventEnum.toolParams,
|
||||
SseResponseEventEnum.toolResponse,
|
||||
SseResponseEventEnum.updateVariables
|
||||
];
|
||||
if (!detail && detailEvent.includes(event)) return;
|
||||
|
||||
responseWrite({
|
||||
res,
|
||||
write,
|
||||
event: detail ? event : undefined,
|
||||
data: JSON.stringify(data)
|
||||
});
|
||||
};
|
||||
};
|
||||
|
||||
export const filterToolNodeIdByEdges = ({
|
||||
nodeId,
|
||||
|
|
|
|||
|
|
@ -1,321 +0,0 @@
|
|||
// @ts-nocheck
|
||||
import { chats2GPTMessages } from '@fastgpt/global/core/chat/adapt';
|
||||
import { filterGPTMessageByMaxTokens } from '../../../chat/utils';
|
||||
|
||||
import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
|
||||
import { ChatItemValueTypeEnum, ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
|
||||
import { getAIApi } from '../../../ai/config';
|
||||
import type { ClassifyQuestionAgentItemType } from '@fastgpt/global/core/workflow/template/system/classifyQuestion/type';
|
||||
import { NodeInputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { replaceVariable } from '@fastgpt/global/common/string/tools';
|
||||
import { Prompt_CQJson } from '@fastgpt/global/core/ai/prompt/agent';
|
||||
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
|
||||
import { ModelTypeEnum, getLLMModel } from '../../../ai/model';
|
||||
import { getHistories } from '../utils';
|
||||
import { formatModelChars2Points } from '../../../../support/wallet/usage/utils';
|
||||
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
|
||||
import {
|
||||
ChatCompletionCreateParams,
|
||||
ChatCompletionMessageParam,
|
||||
ChatCompletionTool
|
||||
} from '@fastgpt/global/core/ai/type';
|
||||
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt';
|
||||
import {
|
||||
countMessagesTokens,
|
||||
countGptMessagesTokens
|
||||
} from '../../../../common/string/tiktoken/index';
|
||||
|
||||
type Props = ModuleDispatchProps<{
|
||||
[NodeInputKeyEnum.aiModel]: string;
|
||||
[NodeInputKeyEnum.aiSystemPrompt]?: string;
|
||||
[NodeInputKeyEnum.history]?: ChatItemType[] | number;
|
||||
[NodeInputKeyEnum.userChatInput]: string;
|
||||
[NodeInputKeyEnum.agents]: ClassifyQuestionAgentItemType[];
|
||||
}>;
|
||||
type CQResponse = DispatchNodeResultType<{
|
||||
[key: string]: any;
|
||||
}>;
|
||||
type ActionProps = Props & { cqModel: LLMModelItemType };
|
||||
|
||||
const agentFunName = 'classify_question';
|
||||
|
||||
/* request openai chat */
|
||||
export const dispatchClassifyQuestion = async (props: Props): Promise<CQResponse> => {
|
||||
const {
|
||||
user,
|
||||
module: { name },
|
||||
histories,
|
||||
params: { model, history = 6, agents, userChatInput }
|
||||
} = props as Props;
|
||||
|
||||
if (!userChatInput) {
|
||||
return Promise.reject('Input is empty');
|
||||
}
|
||||
|
||||
const cqModel = getLLMModel(model);
|
||||
|
||||
const chatHistories = getHistories(history, histories);
|
||||
|
||||
const { arg, tokens } = await (async () => {
|
||||
if (cqModel.toolChoice) {
|
||||
return toolChoice({
|
||||
...props,
|
||||
histories: chatHistories,
|
||||
cqModel
|
||||
});
|
||||
}
|
||||
if (cqModel.functionCall) {
|
||||
return functionCall({
|
||||
...props,
|
||||
histories: chatHistories,
|
||||
cqModel
|
||||
});
|
||||
}
|
||||
return completions({
|
||||
...props,
|
||||
histories: chatHistories,
|
||||
cqModel
|
||||
});
|
||||
})();
|
||||
|
||||
const result = agents.find((item) => item.key === arg?.type) || agents[agents.length - 1];
|
||||
|
||||
const { totalPoints, modelName } = formatModelChars2Points({
|
||||
model: cqModel.model,
|
||||
tokens,
|
||||
modelType: ModelTypeEnum.llm
|
||||
});
|
||||
|
||||
return {
|
||||
[result.key]: true,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
totalPoints: user.openaiAccount?.key ? 0 : totalPoints,
|
||||
model: modelName,
|
||||
query: userChatInput,
|
||||
tokens,
|
||||
cqList: agents,
|
||||
cqResult: result.value,
|
||||
contextTotalLen: chatHistories.length + 2
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
|
||||
{
|
||||
moduleName: name,
|
||||
totalPoints: user.openaiAccount?.key ? 0 : totalPoints,
|
||||
model: modelName,
|
||||
tokens
|
||||
}
|
||||
]
|
||||
};
|
||||
};
|
||||
|
||||
const getFunctionCallSchema = async ({
|
||||
cqModel,
|
||||
histories,
|
||||
params: { agents, systemPrompt, userChatInput }
|
||||
}: ActionProps) => {
|
||||
const messages: ChatItemType[] = [
|
||||
...histories,
|
||||
{
|
||||
obj: ChatRoleEnum.Human,
|
||||
value: [
|
||||
{
|
||||
type: ChatItemValueTypeEnum.text,
|
||||
text: {
|
||||
content: systemPrompt
|
||||
? `<背景知识>
|
||||
${systemPrompt}
|
||||
</背景知识>
|
||||
|
||||
问题: "${userChatInput}"
|
||||
`
|
||||
: userChatInput
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
];
|
||||
|
||||
const adaptMessages = chats2GPTMessages({ messages, reserveId: false });
|
||||
const filterMessages = await filterGPTMessageByMaxTokens({
|
||||
messages: adaptMessages,
|
||||
maxTokens: cqModel.maxContext
|
||||
});
|
||||
|
||||
// function body
|
||||
const agentFunction = {
|
||||
name: agentFunName,
|
||||
description: '结合对话记录及背景知识,对问题进行分类,并返回对应的类型字段',
|
||||
parameters: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
type: {
|
||||
type: 'string',
|
||||
description: `问题类型。下面是几种可选的问题类型: ${agents
|
||||
.map((item) => `${item.value},返回:'${item.key}'`)
|
||||
.join(';')}`,
|
||||
enum: agents.map((item) => item.key)
|
||||
}
|
||||
},
|
||||
required: ['type']
|
||||
}
|
||||
};
|
||||
|
||||
return {
|
||||
agentFunction,
|
||||
filterMessages
|
||||
};
|
||||
};
|
||||
|
||||
const toolChoice = async (props: ActionProps) => {
|
||||
const { user, cqModel } = props;
|
||||
|
||||
const { agentFunction, filterMessages } = await getFunctionCallSchema(props);
|
||||
// function body
|
||||
const tools: ChatCompletionTool[] = [
|
||||
{
|
||||
type: 'function',
|
||||
function: agentFunction
|
||||
}
|
||||
];
|
||||
|
||||
const ai = getAIApi({
|
||||
userKey: user.openaiAccount,
|
||||
timeout: 480000
|
||||
});
|
||||
|
||||
const response = await ai.chat.completions.create({
|
||||
model: cqModel.model,
|
||||
temperature: 0.01,
|
||||
messages: filterMessages,
|
||||
tools,
|
||||
tool_choice: { type: 'function', function: { name: agentFunName } }
|
||||
});
|
||||
|
||||
try {
|
||||
const arg = JSON.parse(
|
||||
response?.choices?.[0]?.message?.tool_calls?.[0]?.function?.arguments || ''
|
||||
);
|
||||
const completeMessages: ChatCompletionMessageParam[] = [
|
||||
...filterMessages,
|
||||
{
|
||||
role: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
tool_calls: response.choices?.[0]?.message?.tool_calls
|
||||
}
|
||||
];
|
||||
|
||||
return {
|
||||
arg,
|
||||
tokens: await countGptMessagesTokens(completeMessages, tools)
|
||||
};
|
||||
} catch (error) {
|
||||
console.log(response.choices?.[0]?.message);
|
||||
|
||||
console.log('Your model may not support toll_call', error);
|
||||
|
||||
return {
|
||||
arg: {},
|
||||
tokens: 0
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
const functionCall = async (props: ActionProps) => {
|
||||
const { user, cqModel } = props;
|
||||
|
||||
const { agentFunction, filterMessages } = await getFunctionCallSchema(props);
|
||||
const functions: ChatCompletionCreateParams.Function[] = [agentFunction];
|
||||
|
||||
const ai = getAIApi({
|
||||
userKey: user.openaiAccount,
|
||||
timeout: 480000
|
||||
});
|
||||
|
||||
const response = await ai.chat.completions.create({
|
||||
model: cqModel.model,
|
||||
temperature: 0.01,
|
||||
messages: filterMessages,
|
||||
function_call: {
|
||||
name: agentFunName
|
||||
},
|
||||
functions
|
||||
});
|
||||
|
||||
try {
|
||||
const arg = JSON.parse(response?.choices?.[0]?.message?.function_call?.arguments || '');
|
||||
const completeMessages: ChatCompletionMessageParam[] = [
|
||||
...filterMessages,
|
||||
{
|
||||
role: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
function_call: response.choices?.[0]?.message?.function_call
|
||||
}
|
||||
];
|
||||
|
||||
return {
|
||||
arg,
|
||||
tokens: await countGptMessagesTokens(completeMessages, undefined, functions)
|
||||
};
|
||||
} catch (error) {
|
||||
console.log(response.choices?.[0]?.message);
|
||||
|
||||
console.log('Your model may not support toll_call', error);
|
||||
|
||||
return {
|
||||
arg: {},
|
||||
tokens: 0
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
const completions = async ({
|
||||
cqModel,
|
||||
user,
|
||||
histories,
|
||||
params: { agents, systemPrompt = '', userChatInput }
|
||||
}: ActionProps) => {
|
||||
const messages: ChatItemType[] = [
|
||||
{
|
||||
obj: ChatRoleEnum.Human,
|
||||
value: [
|
||||
{
|
||||
type: ChatItemValueTypeEnum.text,
|
||||
text: {
|
||||
content: replaceVariable(cqModel.customCQPrompt || Prompt_CQJson, {
|
||||
systemPrompt: systemPrompt || 'null',
|
||||
typeList: agents
|
||||
.map((item) => `{"questionType": "${item.value}", "typeId": "${item.key}"}`)
|
||||
.join('\n'),
|
||||
history: histories
|
||||
.map((item) => `${item.obj}:${chatValue2RuntimePrompt(item.value).text}`)
|
||||
.join('\n'),
|
||||
question: userChatInput
|
||||
})
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
];
|
||||
|
||||
const ai = getAIApi({
|
||||
userKey: user.openaiAccount,
|
||||
timeout: 480000
|
||||
});
|
||||
|
||||
const data = await ai.chat.completions.create({
|
||||
model: cqModel.model,
|
||||
temperature: 0.01,
|
||||
messages: chats2GPTMessages({ messages, reserveId: false }),
|
||||
stream: false
|
||||
});
|
||||
const answer = data.choices?.[0].message?.content || '';
|
||||
|
||||
const id =
|
||||
agents.find((item) => answer.includes(item.key) || answer.includes(item.value))?.key || '';
|
||||
|
||||
return {
|
||||
tokens: await countMessagesTokens(messages),
|
||||
arg: { type: id }
|
||||
};
|
||||
};
|
||||
|
|
@ -1,384 +0,0 @@
|
|||
// @ts-nocheck
|
||||
import { chats2GPTMessages } from '@fastgpt/global/core/chat/adapt';
|
||||
import { filterGPTMessageByMaxTokens } from '../../../chat/utils';
|
||||
import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
|
||||
import {
|
||||
countMessagesTokens,
|
||||
countGptMessagesTokens
|
||||
} from '../../../../common/string/tiktoken/index';
|
||||
|
||||
import { ChatItemValueTypeEnum, ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
|
||||
import { getAIApi } from '../../../ai/config';
|
||||
import type { ContextExtractAgentItemType } from '@fastgpt/global/core/workflow/template/system/contextExtract/type';
|
||||
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { Prompt_ExtractJson } from '@fastgpt/global/core/ai/prompt/agent';
|
||||
import { replaceVariable } from '@fastgpt/global/common/string/tools';
|
||||
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
|
||||
import { getHistories } from '../utils';
|
||||
import { ModelTypeEnum, getLLMModel } from '../../../ai/model';
|
||||
import { formatModelChars2Points } from '../../../../support/wallet/usage/utils';
|
||||
import json5 from 'json5';
|
||||
import {
|
||||
ChatCompletionCreateParams,
|
||||
ChatCompletionMessageParam,
|
||||
ChatCompletionTool
|
||||
} from '@fastgpt/global/core/ai/type';
|
||||
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
|
||||
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt';
|
||||
|
||||
type Props = ModuleDispatchProps<{
|
||||
[NodeInputKeyEnum.history]?: ChatItemType[];
|
||||
[NodeInputKeyEnum.contextExtractInput]: string;
|
||||
[NodeInputKeyEnum.extractKeys]: ContextExtractAgentItemType[];
|
||||
[NodeInputKeyEnum.description]: string;
|
||||
[NodeInputKeyEnum.aiModel]: string;
|
||||
}>;
|
||||
type Response = DispatchNodeResultType<{
|
||||
[NodeOutputKeyEnum.success]?: boolean;
|
||||
[NodeOutputKeyEnum.failed]?: boolean;
|
||||
[NodeOutputKeyEnum.contextExtractFields]: string;
|
||||
}>;
|
||||
|
||||
type ActionProps = Props & { extractModel: LLMModelItemType };
|
||||
|
||||
const agentFunName = 'request_function';
|
||||
|
||||
export async function dispatchContentExtract(props: Props): Promise<Response> {
|
||||
const {
|
||||
user,
|
||||
module: { name },
|
||||
histories,
|
||||
params: { content, history = 6, model, description, extractKeys }
|
||||
} = props;
|
||||
|
||||
if (!content) {
|
||||
return Promise.reject('Input is empty');
|
||||
}
|
||||
|
||||
const extractModel = getLLMModel(model);
|
||||
const chatHistories = getHistories(history, histories);
|
||||
|
||||
const { arg, tokens } = await (async () => {
|
||||
if (extractModel.toolChoice) {
|
||||
return toolChoice({
|
||||
...props,
|
||||
histories: chatHistories,
|
||||
extractModel
|
||||
});
|
||||
}
|
||||
if (extractModel.functionCall) {
|
||||
return functionCall({
|
||||
...props,
|
||||
histories: chatHistories,
|
||||
extractModel
|
||||
});
|
||||
}
|
||||
return completions({
|
||||
...props,
|
||||
histories: chatHistories,
|
||||
extractModel
|
||||
});
|
||||
})();
|
||||
|
||||
// remove invalid key
|
||||
for (let key in arg) {
|
||||
const item = extractKeys.find((item) => item.key === key);
|
||||
if (!item) {
|
||||
delete arg[key];
|
||||
}
|
||||
if (arg[key] === '') {
|
||||
delete arg[key];
|
||||
}
|
||||
}
|
||||
|
||||
// auto fill required fields
|
||||
extractKeys.forEach((item) => {
|
||||
if (item.required && !arg[item.key]) {
|
||||
arg[item.key] = item.defaultValue || '';
|
||||
}
|
||||
});
|
||||
|
||||
// auth fields
|
||||
let success = !extractKeys.find((item) => !(item.key in arg));
|
||||
// auth empty value
|
||||
if (success) {
|
||||
for (const key in arg) {
|
||||
const item = extractKeys.find((item) => item.key === key);
|
||||
if (!item) {
|
||||
success = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const { totalPoints, modelName } = formatModelChars2Points({
|
||||
model: extractModel.model,
|
||||
tokens,
|
||||
modelType: ModelTypeEnum.llm
|
||||
});
|
||||
|
||||
return {
|
||||
[NodeOutputKeyEnum.success]: success ? true : undefined,
|
||||
[NodeOutputKeyEnum.failed]: success ? undefined : true,
|
||||
[NodeOutputKeyEnum.contextExtractFields]: JSON.stringify(arg),
|
||||
...arg,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
totalPoints: user.openaiAccount?.key ? 0 : totalPoints,
|
||||
model: modelName,
|
||||
query: content,
|
||||
tokens,
|
||||
extractDescription: description,
|
||||
extractResult: arg,
|
||||
contextTotalLen: chatHistories.length + 2
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
|
||||
{
|
||||
moduleName: name,
|
||||
totalPoints: user.openaiAccount?.key ? 0 : totalPoints,
|
||||
model: modelName,
|
||||
tokens
|
||||
}
|
||||
]
|
||||
};
|
||||
}
|
||||
|
||||
const getFunctionCallSchema = async ({
|
||||
extractModel,
|
||||
histories,
|
||||
params: { content, extractKeys, description }
|
||||
}: ActionProps) => {
|
||||
const messages: ChatItemType[] = [
|
||||
...histories,
|
||||
{
|
||||
obj: ChatRoleEnum.Human,
|
||||
value: [
|
||||
{
|
||||
type: ChatItemValueTypeEnum.text,
|
||||
text: {
|
||||
content: `我正在执行一个函数,需要你提供一些参数,请以 JSON 字符串格式返回这些参数,要求:
|
||||
"""
|
||||
${description ? `- ${description}` : ''}
|
||||
- 不是每个参数都是必须生成的,如果没有合适的参数值,不要生成该参数,或返回空字符串。
|
||||
- 需要结合前面的对话内容,一起生成合适的参数。
|
||||
"""
|
||||
|
||||
本次输入内容: ${content}
|
||||
`
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
];
|
||||
const adaptMessages = chats2GPTMessages({ messages, reserveId: false });
|
||||
const filterMessages = await filterGPTMessageByMaxTokens({
|
||||
messages: adaptMessages,
|
||||
maxTokens: extractModel.maxContext
|
||||
});
|
||||
|
||||
const properties: Record<
|
||||
string,
|
||||
{
|
||||
type: string;
|
||||
description: string;
|
||||
}
|
||||
> = {};
|
||||
extractKeys.forEach((item) => {
|
||||
properties[item.key] = {
|
||||
type: 'string',
|
||||
description: item.desc,
|
||||
...(item.enum ? { enum: item.enum.split('\n') } : {})
|
||||
};
|
||||
});
|
||||
// function body
|
||||
const agentFunction = {
|
||||
name: agentFunName,
|
||||
description: '需要执行的函数',
|
||||
parameters: {
|
||||
type: 'object',
|
||||
properties
|
||||
}
|
||||
};
|
||||
|
||||
return {
|
||||
filterMessages,
|
||||
agentFunction
|
||||
};
|
||||
};
|
||||
|
||||
const toolChoice = async (props: ActionProps) => {
|
||||
const { user, extractModel } = props;
|
||||
|
||||
const { filterMessages, agentFunction } = await getFunctionCallSchema(props);
|
||||
|
||||
const tools: ChatCompletionTool[] = [
|
||||
{
|
||||
type: 'function',
|
||||
function: agentFunction
|
||||
}
|
||||
];
|
||||
|
||||
const ai = getAIApi({
|
||||
userKey: user.openaiAccount,
|
||||
timeout: 480000
|
||||
});
|
||||
|
||||
const response = await ai.chat.completions.create({
|
||||
model: extractModel.model,
|
||||
temperature: 0,
|
||||
messages: filterMessages,
|
||||
tools,
|
||||
tool_choice: { type: 'function', function: { name: agentFunName } }
|
||||
});
|
||||
|
||||
const arg: Record<string, any> = (() => {
|
||||
try {
|
||||
return json5.parse(
|
||||
response?.choices?.[0]?.message?.tool_calls?.[0]?.function?.arguments || '{}'
|
||||
);
|
||||
} catch (error) {
|
||||
console.log(agentFunction.parameters);
|
||||
console.log(response.choices?.[0]?.message?.tool_calls?.[0]?.function);
|
||||
console.log('Your model may not support tool_call', error);
|
||||
return {};
|
||||
}
|
||||
})();
|
||||
|
||||
const completeMessages: ChatCompletionMessageParam[] = [
|
||||
...filterMessages,
|
||||
{
|
||||
role: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
tool_calls: response.choices?.[0]?.message?.tool_calls
|
||||
}
|
||||
];
|
||||
return {
|
||||
tokens: await countGptMessagesTokens(completeMessages, tools),
|
||||
arg
|
||||
};
|
||||
};
|
||||
|
||||
const functionCall = async (props: ActionProps) => {
|
||||
const { user, extractModel } = props;
|
||||
|
||||
const { agentFunction, filterMessages } = await getFunctionCallSchema(props);
|
||||
const functions: ChatCompletionCreateParams.Function[] = [agentFunction];
|
||||
|
||||
const ai = getAIApi({
|
||||
userKey: user.openaiAccount,
|
||||
timeout: 480000
|
||||
});
|
||||
|
||||
const response = await ai.chat.completions.create({
|
||||
model: extractModel.model,
|
||||
temperature: 0,
|
||||
messages: filterMessages,
|
||||
function_call: {
|
||||
name: agentFunName
|
||||
},
|
||||
functions
|
||||
});
|
||||
|
||||
try {
|
||||
const arg = JSON.parse(response?.choices?.[0]?.message?.function_call?.arguments || '');
|
||||
const completeMessages: ChatCompletionMessageParam[] = [
|
||||
...filterMessages,
|
||||
{
|
||||
role: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
function_call: response.choices?.[0]?.message?.function_call
|
||||
}
|
||||
];
|
||||
|
||||
return {
|
||||
arg,
|
||||
tokens: await countGptMessagesTokens(completeMessages, undefined, functions)
|
||||
};
|
||||
} catch (error) {
|
||||
console.log(response.choices?.[0]?.message);
|
||||
|
||||
console.log('Your model may not support toll_call', error);
|
||||
|
||||
return {
|
||||
arg: {},
|
||||
tokens: 0
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
const completions = async ({
|
||||
extractModel,
|
||||
user,
|
||||
histories,
|
||||
params: { content, extractKeys, description }
|
||||
}: ActionProps) => {
|
||||
const messages: ChatItemType[] = [
|
||||
{
|
||||
obj: ChatRoleEnum.Human,
|
||||
value: [
|
||||
{
|
||||
type: ChatItemValueTypeEnum.text,
|
||||
text: {
|
||||
content: replaceVariable(extractModel.customExtractPrompt || Prompt_ExtractJson, {
|
||||
description,
|
||||
json: extractKeys
|
||||
.map(
|
||||
(item) =>
|
||||
`{"key":"${item.key}", "description":"${item.desc}"${
|
||||
item.enum ? `, "enum":"[${item.enum.split('\n')}]"` : ''
|
||||
}}`
|
||||
)
|
||||
.join('\n'),
|
||||
text: `${histories.map((item) => `${item.obj}:${chatValue2RuntimePrompt(item.value).text}`).join('\n')}
|
||||
Human: ${content}`
|
||||
})
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
];
|
||||
|
||||
const ai = getAIApi({
|
||||
userKey: user.openaiAccount,
|
||||
timeout: 480000
|
||||
});
|
||||
const data = await ai.chat.completions.create({
|
||||
model: extractModel.model,
|
||||
temperature: 0.01,
|
||||
messages: chats2GPTMessages({ messages, reserveId: false }),
|
||||
stream: false
|
||||
});
|
||||
const answer = data.choices?.[0].message?.content || '';
|
||||
|
||||
// parse response
|
||||
const start = answer.indexOf('{');
|
||||
const end = answer.lastIndexOf('}');
|
||||
|
||||
if (start === -1 || end === -1) {
|
||||
return {
|
||||
rawResponse: answer,
|
||||
tokens: await countMessagesTokens(messages),
|
||||
arg: {}
|
||||
};
|
||||
}
|
||||
|
||||
const jsonStr = answer
|
||||
.substring(start, end + 1)
|
||||
.replace(/(\\n|\\)/g, '')
|
||||
.replace(/ /g, '');
|
||||
|
||||
try {
|
||||
return {
|
||||
rawResponse: answer,
|
||||
tokens: await countMessagesTokens(messages),
|
||||
arg: json5.parse(jsonStr) as Record<string, any>
|
||||
};
|
||||
} catch (error) {
|
||||
console.log(error);
|
||||
return {
|
||||
rawResponse: answer,
|
||||
tokens: await countMessagesTokens(messages),
|
||||
arg: {}
|
||||
};
|
||||
}
|
||||
};
|
||||
|
|
@ -1,39 +0,0 @@
|
|||
export const Prompt_Tool_Call = `<Instruction>
|
||||
你是一个智能机器人,除了可以回答用户问题外,你还掌握工具的使用能力。有时候,你可以依赖工具的运行结果,来更准确的回答用户。
|
||||
|
||||
工具使用了 JSON Schema 的格式声明,其中 toolId 是工具的 description 是工具的描述,parameters 是工具的参数,包括参数的类型和描述,required 是必填参数的列表。
|
||||
|
||||
请你根据工具描述,决定回答问题或是使用工具。在完成任务过程中,USER代表用户的输入,TOOL_RESPONSE代表工具运行结果。ASSISTANT 代表你的输出。
|
||||
你的每次输出都必须以0,1开头,代表是否需要调用工具:
|
||||
0: 不使用工具,直接回答内容。
|
||||
1: 使用工具,返回工具调用的参数。
|
||||
|
||||
例如:
|
||||
|
||||
USER: 你好呀
|
||||
ANSWER: 0: 你好,有什么可以帮助你的么?
|
||||
USER: 今天杭州的天气如何
|
||||
ANSWER: 1: {"toolId":"testToolId",arguments:{"city": "杭州"}}
|
||||
TOOL_RESPONSE: """
|
||||
晴天......
|
||||
"""
|
||||
ANSWER: 0: 今天杭州是晴天。
|
||||
USER: 今天杭州的天气适合去哪里玩?
|
||||
ANSWER: 1: {"toolId":"testToolId2",arguments:{"query": "杭州 天气 去哪里玩"}}
|
||||
TOOL_RESPONSE: """
|
||||
晴天. 西湖、灵隐寺、千岛湖……
|
||||
"""
|
||||
ANSWER: 0: 今天杭州是晴天,适合去西湖、灵隐寺、千岛湖等地玩。
|
||||
</Instruction>
|
||||
|
||||
现在,我们开始吧!下面是你本次可以使用的工具:
|
||||
|
||||
"""
|
||||
{{toolsPrompt}}
|
||||
"""
|
||||
|
||||
下面是正式的对话内容:
|
||||
|
||||
USER: {{question}}
|
||||
ANSWER:
|
||||
`;
|
||||
|
|
@ -1,410 +0,0 @@
|
|||
// @ts-nocheck
|
||||
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
|
||||
import { getAIApi } from '../../../../ai/config';
|
||||
import { filterGPTMessageByMaxTokens } from '../../../../chat/utils';
|
||||
import {
|
||||
ChatCompletion,
|
||||
StreamChatType,
|
||||
ChatCompletionMessageParam,
|
||||
ChatCompletionCreateParams,
|
||||
ChatCompletionMessageFunctionCall,
|
||||
ChatCompletionFunctionMessageParam,
|
||||
ChatCompletionAssistantMessageParam
|
||||
} from '@fastgpt/global/core/ai/type';
|
||||
import { NextApiResponse } from 'next';
|
||||
import {
|
||||
responseWrite,
|
||||
responseWriteController,
|
||||
responseWriteNodeStatus
|
||||
} from '../../../../../common/response';
|
||||
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import { textAdaptGptResponse } from '@fastgpt/global/core/workflow/runtime/utils';
|
||||
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
|
||||
import { dispatchWorkFlowV1 } from '../../index';
|
||||
import { DispatchToolModuleProps, RunToolResponse, ToolModuleItemType } from './type.d';
|
||||
import json5 from 'json5';
|
||||
import { DispatchFlowResponse } from '../../type';
|
||||
import { countGptMessagesTokens } from '../../../../../common/string/tiktoken';
|
||||
import { getNanoid } from '@fastgpt/global/common/string/tools';
|
||||
import { AIChatItemType, AIChatItemValueItemType } from '@fastgpt/global/core/chat/type';
|
||||
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
|
||||
|
||||
type FunctionRunResponseType = {
|
||||
moduleRunResponse: DispatchFlowResponse;
|
||||
functionCallMsg: ChatCompletionFunctionMessageParam;
|
||||
}[];
|
||||
|
||||
export const runToolWithFunctionCall = async (
|
||||
props: DispatchToolModuleProps & {
|
||||
messages: ChatCompletionMessageParam[];
|
||||
toolModules: ToolModuleItemType[];
|
||||
toolModel: LLMModelItemType;
|
||||
},
|
||||
response?: RunToolResponse
|
||||
): Promise<RunToolResponse> => {
|
||||
const {
|
||||
toolModel,
|
||||
toolModules,
|
||||
messages,
|
||||
res,
|
||||
runtimeModules,
|
||||
detail = false,
|
||||
module,
|
||||
stream
|
||||
} = props;
|
||||
const assistantResponses = response?.assistantResponses || [];
|
||||
|
||||
const functions: ChatCompletionCreateParams.Function[] = toolModules.map((module) => {
|
||||
const properties: Record<
|
||||
string,
|
||||
{
|
||||
type: string;
|
||||
description: string;
|
||||
required?: boolean;
|
||||
}
|
||||
> = {};
|
||||
module.toolParams.forEach((item) => {
|
||||
properties[item.key] = {
|
||||
type: 'string',
|
||||
description: item.toolDescription || ''
|
||||
};
|
||||
});
|
||||
|
||||
return {
|
||||
name: module.moduleId,
|
||||
description: module.intro,
|
||||
parameters: {
|
||||
type: 'object',
|
||||
properties,
|
||||
required: module.toolParams.filter((item) => item.required).map((item) => item.key)
|
||||
}
|
||||
};
|
||||
});
|
||||
|
||||
const filterMessages = await filterGPTMessageByMaxTokens({
|
||||
messages,
|
||||
maxTokens: toolModel.maxContext - 500 // filter token. not response maxToken
|
||||
});
|
||||
|
||||
/* Run llm */
|
||||
const ai = getAIApi({
|
||||
timeout: 480000
|
||||
});
|
||||
const aiResponse = await ai.chat.completions.create(
|
||||
{
|
||||
...toolModel?.defaultConfig,
|
||||
model: toolModel.model,
|
||||
temperature: 0,
|
||||
stream,
|
||||
messages: filterMessages,
|
||||
functions,
|
||||
function_call: 'auto'
|
||||
},
|
||||
{
|
||||
headers: {
|
||||
Accept: 'application/json, text/plain, */*'
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
const { answer, functionCalls } = await (async () => {
|
||||
if (stream) {
|
||||
return streamResponse({
|
||||
res,
|
||||
detail,
|
||||
toolModules,
|
||||
stream: aiResponse
|
||||
});
|
||||
} else {
|
||||
const result = aiResponse as ChatCompletion;
|
||||
const function_call = result.choices?.[0]?.message?.function_call;
|
||||
const toolModule = toolModules.find((module) => module.moduleId === function_call?.name);
|
||||
|
||||
const toolCalls = function_call
|
||||
? [
|
||||
{
|
||||
...function_call,
|
||||
id: getNanoid(),
|
||||
toolName: toolModule?.name,
|
||||
toolAvatar: toolModule?.avatar
|
||||
}
|
||||
]
|
||||
: [];
|
||||
|
||||
return {
|
||||
answer: result.choices?.[0]?.message?.content || '',
|
||||
functionCalls: toolCalls
|
||||
};
|
||||
}
|
||||
})();
|
||||
|
||||
// Run the selected tool.
|
||||
const toolsRunResponse = (
|
||||
await Promise.all(
|
||||
functionCalls.map(async (tool) => {
|
||||
if (!tool) return;
|
||||
|
||||
const toolModule = toolModules.find((module) => module.moduleId === tool.name);
|
||||
|
||||
if (!toolModule) return;
|
||||
|
||||
const startParams = (() => {
|
||||
try {
|
||||
return json5.parse(tool.arguments);
|
||||
} catch (error) {
|
||||
return {};
|
||||
}
|
||||
})();
|
||||
|
||||
const moduleRunResponse = await dispatchWorkFlowV1({
|
||||
...props,
|
||||
runtimeModules: runtimeModules.map((module) => ({
|
||||
...module,
|
||||
isEntry: module.moduleId === toolModule.moduleId
|
||||
})),
|
||||
startParams
|
||||
});
|
||||
|
||||
const stringToolResponse = (() => {
|
||||
if (typeof moduleRunResponse.toolResponses === 'object') {
|
||||
return JSON.stringify(moduleRunResponse.toolResponses, null, 2);
|
||||
}
|
||||
|
||||
return moduleRunResponse.toolResponses ? String(moduleRunResponse.toolResponses) : 'none';
|
||||
})();
|
||||
|
||||
const functionCallMsg: ChatCompletionFunctionMessageParam = {
|
||||
role: ChatCompletionRequestMessageRoleEnum.Function,
|
||||
name: tool.name,
|
||||
content: stringToolResponse
|
||||
};
|
||||
|
||||
if (stream && detail) {
|
||||
responseWrite({
|
||||
res,
|
||||
event: SseResponseEventEnum.toolResponse,
|
||||
data: JSON.stringify({
|
||||
tool: {
|
||||
id: tool.id,
|
||||
toolName: '',
|
||||
toolAvatar: '',
|
||||
params: '',
|
||||
response: stringToolResponse
|
||||
}
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
return {
|
||||
moduleRunResponse,
|
||||
functionCallMsg
|
||||
};
|
||||
})
|
||||
)
|
||||
).filter(Boolean) as FunctionRunResponseType;
|
||||
|
||||
const flatToolsResponseData = toolsRunResponse.map((item) => item.moduleRunResponse).flat();
|
||||
|
||||
const functionCall = functionCalls[0];
|
||||
if (functionCall && !res.closed) {
|
||||
// Run the tool, combine its results, and perform another round of AI calls
|
||||
const assistantToolMsgParams: ChatCompletionAssistantMessageParam = {
|
||||
role: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
function_call: functionCall
|
||||
};
|
||||
const concatToolMessages = [
|
||||
...filterMessages,
|
||||
assistantToolMsgParams
|
||||
] as ChatCompletionMessageParam[];
|
||||
const tokens = await countGptMessagesTokens(concatToolMessages, undefined, functions);
|
||||
const completeMessages = [
|
||||
...concatToolMessages,
|
||||
...toolsRunResponse.map((item) => item?.functionCallMsg)
|
||||
];
|
||||
// console.log(tokens, 'tool');
|
||||
|
||||
if (stream && detail) {
|
||||
responseWriteNodeStatus({
|
||||
res,
|
||||
name: module.name
|
||||
});
|
||||
}
|
||||
|
||||
// tool assistant
|
||||
const toolAssistants = toolsRunResponse
|
||||
.map((item) => {
|
||||
const assistantResponses = item.moduleRunResponse.assistantResponses || [];
|
||||
return assistantResponses;
|
||||
})
|
||||
.flat();
|
||||
// tool node assistant
|
||||
const adaptChatMessages = GPTMessages2Chats(completeMessages);
|
||||
const toolNodeAssistant = adaptChatMessages.pop() as AIChatItemType;
|
||||
|
||||
const toolNodeAssistants = [
|
||||
...assistantResponses,
|
||||
...toolAssistants,
|
||||
...toolNodeAssistant.value
|
||||
];
|
||||
|
||||
// concat tool responses
|
||||
const dispatchFlowResponse = response
|
||||
? response.dispatchFlowResponse.concat(flatToolsResponseData)
|
||||
: flatToolsResponseData;
|
||||
|
||||
/* check stop signal */
|
||||
const hasStopSignal = flatToolsResponseData.some(
|
||||
(item) => !!item.flowResponses?.find((item) => item.toolStop)
|
||||
);
|
||||
if (hasStopSignal) {
|
||||
return {
|
||||
dispatchFlowResponse,
|
||||
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
|
||||
completeMessages: filterMessages,
|
||||
assistantResponses: toolNodeAssistants
|
||||
};
|
||||
}
|
||||
|
||||
return runToolWithFunctionCall(
|
||||
{
|
||||
...props,
|
||||
messages: completeMessages
|
||||
},
|
||||
{
|
||||
dispatchFlowResponse,
|
||||
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
|
||||
assistantResponses: toolNodeAssistants
|
||||
}
|
||||
);
|
||||
} else {
|
||||
// No tool is invoked, indicating that the process is over
|
||||
const gptAssistantResponse: ChatCompletionAssistantMessageParam = {
|
||||
role: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
content: answer
|
||||
};
|
||||
const completeMessages = filterMessages.concat(gptAssistantResponse);
|
||||
const tokens = await countGptMessagesTokens(completeMessages, undefined, functions);
|
||||
// console.log(tokens, 'response token');
|
||||
|
||||
// concat tool assistant
|
||||
const toolNodeAssistant = GPTMessages2Chats([gptAssistantResponse])[0] as AIChatItemType;
|
||||
|
||||
return {
|
||||
dispatchFlowResponse: response?.dispatchFlowResponse || [],
|
||||
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
|
||||
completeMessages,
|
||||
assistantResponses: [...assistantResponses, ...toolNodeAssistant.value]
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
async function streamResponse({
|
||||
res,
|
||||
detail,
|
||||
toolModules,
|
||||
stream
|
||||
}: {
|
||||
res: NextApiResponse;
|
||||
detail: boolean;
|
||||
toolModules: ToolModuleItemType[];
|
||||
stream: StreamChatType;
|
||||
}) {
|
||||
const write = responseWriteController({
|
||||
res,
|
||||
readStream: stream
|
||||
});
|
||||
|
||||
let textAnswer = '';
|
||||
let functionCalls: ChatCompletionMessageFunctionCall[] = [];
|
||||
let functionId = getNanoid();
|
||||
|
||||
for await (const part of stream) {
|
||||
if (res.closed) {
|
||||
stream.controller?.abort();
|
||||
break;
|
||||
}
|
||||
|
||||
const responseChoice = part.choices?.[0]?.delta;
|
||||
if (responseChoice.content) {
|
||||
const content = responseChoice?.content || '';
|
||||
textAnswer += content;
|
||||
|
||||
responseWrite({
|
||||
write,
|
||||
event: detail ? SseResponseEventEnum.answer : undefined,
|
||||
data: textAdaptGptResponse({
|
||||
text: content
|
||||
})
|
||||
});
|
||||
} else if (responseChoice.function_call) {
|
||||
const functionCall: {
|
||||
arguments: string;
|
||||
name?: string;
|
||||
} = responseChoice.function_call;
|
||||
|
||||
// 流响应中,每次只会返回一个函数,如果带了name,说明触发某个函数
|
||||
if (functionCall?.name) {
|
||||
functionId = getNanoid();
|
||||
const toolModule = toolModules.find((module) => module.moduleId === functionCall?.name);
|
||||
|
||||
if (toolModule) {
|
||||
if (functionCall?.arguments === undefined) {
|
||||
functionCall.arguments = '';
|
||||
}
|
||||
functionCalls.push({
|
||||
...functionCall,
|
||||
id: functionId,
|
||||
name: functionCall.name,
|
||||
toolName: toolModule.name,
|
||||
toolAvatar: toolModule.avatar
|
||||
});
|
||||
|
||||
if (detail) {
|
||||
responseWrite({
|
||||
write,
|
||||
event: SseResponseEventEnum.toolCall,
|
||||
data: JSON.stringify({
|
||||
tool: {
|
||||
id: functionId,
|
||||
toolName: toolModule.name,
|
||||
toolAvatar: toolModule.avatar,
|
||||
functionName: functionCall.name,
|
||||
params: functionCall.arguments,
|
||||
response: ''
|
||||
}
|
||||
})
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
/* arg 插入最后一个工具的参数里 */
|
||||
const arg: string = functionCall?.arguments || '';
|
||||
const currentTool = functionCalls[functionCalls.length - 1];
|
||||
if (currentTool) {
|
||||
currentTool.arguments += arg;
|
||||
|
||||
if (detail) {
|
||||
responseWrite({
|
||||
write,
|
||||
event: SseResponseEventEnum.toolParams,
|
||||
data: JSON.stringify({
|
||||
tool: {
|
||||
id: functionId,
|
||||
toolName: '',
|
||||
toolAvatar: '',
|
||||
params: arg,
|
||||
response: ''
|
||||
}
|
||||
})
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!textAnswer && functionCalls.length === 0) {
|
||||
return Promise.reject('LLM api response empty');
|
||||
}
|
||||
|
||||
return { answer: textAnswer, functionCalls };
|
||||
}
|
||||
|
|
@ -1,158 +0,0 @@
|
|||
// @ts-nocheck
|
||||
import { NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import type {
|
||||
DispatchNodeResultType,
|
||||
RuntimeNodeItemType
|
||||
} from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { ModelTypeEnum, getLLMModel } from '../../../../ai/model';
|
||||
import { getHistories } from '../../utils';
|
||||
import { runToolWithToolChoice } from './toolChoice';
|
||||
import { DispatchToolModuleProps, ToolModuleItemType } from './type.d';
|
||||
import { ChatItemType } from '@fastgpt/global/core/chat/type';
|
||||
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
|
||||
import {
|
||||
GPTMessages2Chats,
|
||||
chats2GPTMessages,
|
||||
getSystemPrompt,
|
||||
runtimePrompt2ChatsValue
|
||||
} from '@fastgpt/global/core/chat/adapt';
|
||||
import { formatModelChars2Points } from '../../../../../support/wallet/usage/utils';
|
||||
import { getHistoryPreview } from '@fastgpt/global/core/chat/utils';
|
||||
import { runToolWithFunctionCall } from './functionCall';
|
||||
import { runToolWithPromptCall } from './promptCall';
|
||||
import { replaceVariable } from '@fastgpt/global/common/string/tools';
|
||||
import { Prompt_Tool_Call } from './constants';
|
||||
|
||||
type Response = DispatchNodeResultType<{}>;
|
||||
|
||||
export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<Response> => {
|
||||
const {
|
||||
module: { name, outputs },
|
||||
runtimeModules,
|
||||
histories,
|
||||
params: { model, systemPrompt, userChatInput, history = 6 }
|
||||
} = props;
|
||||
|
||||
const toolModel = getLLMModel(model);
|
||||
const chatHistories = getHistories(history, histories);
|
||||
|
||||
/* get tool params */
|
||||
|
||||
// get tool output targets
|
||||
const toolOutput = outputs.find((output) => output.key === NodeOutputKeyEnum.selectedTools);
|
||||
|
||||
if (!toolOutput) {
|
||||
return Promise.reject('No tool output found');
|
||||
}
|
||||
|
||||
const targets = toolOutput.targets;
|
||||
|
||||
// Gets the module to which the tool is connected
|
||||
const toolModules = targets
|
||||
.map((item) => {
|
||||
const tool = runtimeModules.find((module) => module.moduleId === item.moduleId);
|
||||
return tool;
|
||||
})
|
||||
.filter(Boolean)
|
||||
.map<ToolModuleItemType>((tool) => {
|
||||
const toolParams = tool?.inputs.filter((input) => !!input.toolDescription) || [];
|
||||
return {
|
||||
...(tool as RuntimeNodeItemType),
|
||||
toolParams
|
||||
};
|
||||
});
|
||||
|
||||
const messages: ChatItemType[] = [
|
||||
...getSystemPrompt(systemPrompt),
|
||||
...chatHistories,
|
||||
{
|
||||
obj: ChatRoleEnum.Human,
|
||||
value: runtimePrompt2ChatsValue({
|
||||
text: userChatInput,
|
||||
files: []
|
||||
})
|
||||
}
|
||||
];
|
||||
|
||||
const {
|
||||
dispatchFlowResponse, // tool flow response
|
||||
totalTokens,
|
||||
completeMessages = [], // The actual message sent to AI(just save text)
|
||||
assistantResponses = [] // FastGPT system store assistant.value response
|
||||
} = await (async () => {
|
||||
const adaptMessages = chats2GPTMessages({ messages, reserveId: false });
|
||||
|
||||
if (toolModel.toolChoice) {
|
||||
return runToolWithToolChoice({
|
||||
...props,
|
||||
toolModules,
|
||||
toolModel,
|
||||
messages: adaptMessages
|
||||
});
|
||||
}
|
||||
if (toolModel.functionCall) {
|
||||
return runToolWithFunctionCall({
|
||||
...props,
|
||||
toolModules,
|
||||
toolModel,
|
||||
messages: adaptMessages
|
||||
});
|
||||
}
|
||||
|
||||
const lastMessage = adaptMessages[adaptMessages.length - 1];
|
||||
if (typeof lastMessage.content !== 'string') {
|
||||
return Promise.reject('暂时只支持纯文本');
|
||||
}
|
||||
|
||||
lastMessage.content = replaceVariable(Prompt_Tool_Call, {
|
||||
question: userChatInput
|
||||
});
|
||||
|
||||
return runToolWithPromptCall({
|
||||
...props,
|
||||
toolModules,
|
||||
toolModel,
|
||||
messages: adaptMessages
|
||||
});
|
||||
})();
|
||||
|
||||
const { totalPoints, modelName } = formatModelChars2Points({
|
||||
model,
|
||||
tokens: totalTokens,
|
||||
modelType: ModelTypeEnum.llm
|
||||
});
|
||||
|
||||
// flat child tool response
|
||||
const childToolResponse = dispatchFlowResponse.map((item) => item.flowResponses).flat();
|
||||
|
||||
// concat tool usage
|
||||
const totalPointsUsage =
|
||||
totalPoints +
|
||||
dispatchFlowResponse.reduce((sum, item) => {
|
||||
const childrenTotal = item.flowUsages.reduce((sum, item) => sum + item.totalPoints, 0);
|
||||
return sum + childrenTotal;
|
||||
}, 0);
|
||||
const flatUsages = dispatchFlowResponse.map((item) => item.flowUsages).flat();
|
||||
|
||||
return {
|
||||
[DispatchNodeResponseKeyEnum.assistantResponses]: assistantResponses,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
totalPoints: totalPointsUsage,
|
||||
toolCallTokens: totalTokens,
|
||||
model: modelName,
|
||||
query: userChatInput,
|
||||
historyPreview: getHistoryPreview(GPTMessages2Chats(completeMessages, false)),
|
||||
toolDetail: childToolResponse
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
|
||||
{
|
||||
moduleName: name,
|
||||
totalPoints,
|
||||
model: modelName,
|
||||
tokens: totalTokens
|
||||
},
|
||||
...flatUsages
|
||||
]
|
||||
};
|
||||
};
|
||||
|
|
@ -1,388 +0,0 @@
|
|||
// @ts-nocheck
|
||||
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
|
||||
import { getAIApi } from '../../../../ai/config';
|
||||
import { filterGPTMessageByMaxTokens } from '../../../../chat/utils';
|
||||
import {
|
||||
ChatCompletion,
|
||||
StreamChatType,
|
||||
ChatCompletionMessageParam,
|
||||
ChatCompletionAssistantMessageParam
|
||||
} from '@fastgpt/global/core/ai/type';
|
||||
import { NextApiResponse } from 'next';
|
||||
import {
|
||||
responseWrite,
|
||||
responseWriteController,
|
||||
responseWriteNodeStatus
|
||||
} from '../../../../../common/response';
|
||||
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import { textAdaptGptResponse } from '@fastgpt/global/core/workflow/runtime/utils';
|
||||
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
|
||||
import { dispatchWorkFlowV1 } from '../../index';
|
||||
import { DispatchToolModuleProps, RunToolResponse, ToolModuleItemType } from './type.d';
|
||||
import json5 from 'json5';
|
||||
import { countGptMessagesTokens } from '../../../../../common/string/tiktoken';
|
||||
import { getNanoid, replaceVariable } from '@fastgpt/global/common/string/tools';
|
||||
import { AIChatItemType } from '@fastgpt/global/core/chat/type';
|
||||
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
|
||||
|
||||
type FunctionCallCompletion = {
|
||||
id: string;
|
||||
name: string;
|
||||
arguments: string;
|
||||
toolName?: string;
|
||||
toolAvatar?: string;
|
||||
};
|
||||
|
||||
export const runToolWithPromptCall = async (
|
||||
props: DispatchToolModuleProps & {
|
||||
messages: ChatCompletionMessageParam[];
|
||||
toolModules: ToolModuleItemType[];
|
||||
toolModel: LLMModelItemType;
|
||||
},
|
||||
response?: RunToolResponse
|
||||
): Promise<RunToolResponse> => {
|
||||
const {
|
||||
toolModel,
|
||||
toolModules,
|
||||
messages,
|
||||
res,
|
||||
runtimeModules,
|
||||
detail = false,
|
||||
module,
|
||||
stream
|
||||
} = props;
|
||||
const assistantResponses = response?.assistantResponses || [];
|
||||
|
||||
const toolsPrompt = JSON.stringify(
|
||||
toolModules.map((module) => {
|
||||
const properties: Record<
|
||||
string,
|
||||
{
|
||||
type: string;
|
||||
description: string;
|
||||
required?: boolean;
|
||||
}
|
||||
> = {};
|
||||
module.toolParams.forEach((item) => {
|
||||
properties[item.key] = {
|
||||
type: 'string',
|
||||
description: item.toolDescription || ''
|
||||
};
|
||||
});
|
||||
|
||||
return {
|
||||
toolId: module.moduleId,
|
||||
description: module.intro,
|
||||
parameters: {
|
||||
type: 'object',
|
||||
properties,
|
||||
required: module.toolParams.filter((item) => item.required).map((item) => item.key)
|
||||
}
|
||||
};
|
||||
})
|
||||
);
|
||||
|
||||
const lastMessage = messages[messages.length - 1];
|
||||
if (typeof lastMessage.content !== 'string') {
|
||||
return Promise.reject('暂时只支持纯文本');
|
||||
}
|
||||
lastMessage.content = replaceVariable(lastMessage.content, {
|
||||
toolsPrompt
|
||||
});
|
||||
|
||||
const filterMessages = await filterGPTMessageByMaxTokens({
|
||||
messages,
|
||||
maxTokens: toolModel.maxContext - 500 // filter token. not response maxToken
|
||||
});
|
||||
// console.log(JSON.stringify(filterMessages, null, 2));
|
||||
/* Run llm */
|
||||
const ai = getAIApi({
|
||||
timeout: 480000
|
||||
});
|
||||
const aiResponse = await ai.chat.completions.create(
|
||||
{
|
||||
...toolModel?.defaultConfig,
|
||||
model: toolModel.model,
|
||||
temperature: 0,
|
||||
stream,
|
||||
messages: filterMessages
|
||||
},
|
||||
{
|
||||
headers: {
|
||||
Accept: 'application/json, text/plain, */*'
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
const answer = await (async () => {
|
||||
if (stream) {
|
||||
const { answer } = await streamResponse({
|
||||
res,
|
||||
detail,
|
||||
toolModules,
|
||||
stream: aiResponse
|
||||
});
|
||||
|
||||
return answer;
|
||||
} else {
|
||||
const result = aiResponse as ChatCompletion;
|
||||
|
||||
return result.choices?.[0]?.message?.content || '';
|
||||
}
|
||||
})();
|
||||
|
||||
const parseAnswerResult = parseAnswer(answer);
|
||||
// console.log(parseAnswer, '==11==');
|
||||
// No tools
|
||||
if (typeof parseAnswerResult === 'string') {
|
||||
// No tool is invoked, indicating that the process is over
|
||||
const gptAssistantResponse: ChatCompletionAssistantMessageParam = {
|
||||
role: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
content: parseAnswerResult
|
||||
};
|
||||
const completeMessages = filterMessages.concat(gptAssistantResponse);
|
||||
const tokens = await countGptMessagesTokens(completeMessages, undefined);
|
||||
// console.log(tokens, 'response token');
|
||||
|
||||
// concat tool assistant
|
||||
const toolNodeAssistant = GPTMessages2Chats([gptAssistantResponse])[0] as AIChatItemType;
|
||||
|
||||
return {
|
||||
dispatchFlowResponse: response?.dispatchFlowResponse || [],
|
||||
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
|
||||
completeMessages,
|
||||
assistantResponses: [...assistantResponses, ...toolNodeAssistant.value]
|
||||
};
|
||||
}
|
||||
|
||||
// Run the selected tool.
|
||||
const toolsRunResponse = await (async () => {
|
||||
if (!parseAnswerResult) return Promise.reject('tool run error');
|
||||
|
||||
const toolModule = toolModules.find((module) => module.moduleId === parseAnswerResult.name);
|
||||
if (!toolModule) return Promise.reject('tool not found');
|
||||
|
||||
parseAnswerResult.toolName = toolModule.name;
|
||||
parseAnswerResult.toolAvatar = toolModule.avatar;
|
||||
|
||||
// run tool flow
|
||||
const startParams = (() => {
|
||||
try {
|
||||
return json5.parse(parseAnswerResult.arguments);
|
||||
} catch (error) {
|
||||
return {};
|
||||
}
|
||||
})();
|
||||
|
||||
// SSE response to client
|
||||
if (stream && detail) {
|
||||
responseWrite({
|
||||
res,
|
||||
event: SseResponseEventEnum.toolCall,
|
||||
data: JSON.stringify({
|
||||
tool: {
|
||||
id: parseAnswerResult.id,
|
||||
toolName: toolModule.name,
|
||||
toolAvatar: toolModule.avatar,
|
||||
functionName: parseAnswerResult.name,
|
||||
params: parseAnswerResult.arguments,
|
||||
response: ''
|
||||
}
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
const moduleRunResponse = await dispatchWorkFlowV1({
|
||||
...props,
|
||||
runtimeModules: runtimeModules.map((module) => ({
|
||||
...module,
|
||||
isEntry: module.moduleId === toolModule.moduleId
|
||||
})),
|
||||
startParams
|
||||
});
|
||||
|
||||
const stringToolResponse = (() => {
|
||||
if (typeof moduleRunResponse.toolResponses === 'object') {
|
||||
return JSON.stringify(moduleRunResponse.toolResponses, null, 2);
|
||||
}
|
||||
|
||||
return moduleRunResponse.toolResponses ? String(moduleRunResponse.toolResponses) : 'none';
|
||||
})();
|
||||
|
||||
if (stream && detail) {
|
||||
responseWrite({
|
||||
res,
|
||||
event: SseResponseEventEnum.toolResponse,
|
||||
data: JSON.stringify({
|
||||
tool: {
|
||||
id: parseAnswerResult.id,
|
||||
toolName: '',
|
||||
toolAvatar: '',
|
||||
params: '',
|
||||
response: stringToolResponse
|
||||
}
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
return {
|
||||
moduleRunResponse,
|
||||
toolResponsePrompt: stringToolResponse
|
||||
};
|
||||
})();
|
||||
|
||||
if (stream && detail) {
|
||||
responseWriteNodeStatus({
|
||||
res,
|
||||
name: module.name
|
||||
});
|
||||
}
|
||||
|
||||
// 合并工具调用的结果,使用 functionCall 格式存储。
|
||||
const assistantToolMsgParams: ChatCompletionAssistantMessageParam = {
|
||||
role: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
function_call: parseAnswerResult
|
||||
};
|
||||
const concatToolMessages = [
|
||||
...filterMessages,
|
||||
assistantToolMsgParams
|
||||
] as ChatCompletionMessageParam[];
|
||||
const tokens = await countGptMessagesTokens(concatToolMessages, undefined);
|
||||
const completeMessages: ChatCompletionMessageParam[] = [
|
||||
...concatToolMessages,
|
||||
{
|
||||
role: ChatCompletionRequestMessageRoleEnum.Function,
|
||||
name: parseAnswerResult.name,
|
||||
content: toolsRunResponse.toolResponsePrompt
|
||||
}
|
||||
];
|
||||
|
||||
// tool assistant
|
||||
const toolAssistants = toolsRunResponse.moduleRunResponse.assistantResponses || [];
|
||||
// tool node assistant
|
||||
const adaptChatMessages = GPTMessages2Chats(completeMessages);
|
||||
const toolNodeAssistant = adaptChatMessages.pop() as AIChatItemType;
|
||||
|
||||
const toolNodeAssistants = [...assistantResponses, ...toolAssistants, ...toolNodeAssistant.value];
|
||||
|
||||
const dispatchFlowResponse = response
|
||||
? response.dispatchFlowResponse.concat(toolsRunResponse.moduleRunResponse)
|
||||
: [toolsRunResponse.moduleRunResponse];
|
||||
|
||||
// get the next user prompt
|
||||
lastMessage.content += `${answer}
|
||||
TOOL_RESPONSE: """
|
||||
${toolsRunResponse.toolResponsePrompt}
|
||||
"""
|
||||
ANSWER: `;
|
||||
|
||||
/* check stop signal */
|
||||
const hasStopSignal = toolsRunResponse.moduleRunResponse.flowResponses.some(
|
||||
(item) => !!item.toolStop
|
||||
);
|
||||
if (hasStopSignal) {
|
||||
return {
|
||||
dispatchFlowResponse,
|
||||
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
|
||||
completeMessages: filterMessages,
|
||||
assistantResponses: toolNodeAssistants
|
||||
};
|
||||
}
|
||||
|
||||
return runToolWithPromptCall(
|
||||
{
|
||||
...props,
|
||||
messages
|
||||
},
|
||||
{
|
||||
dispatchFlowResponse,
|
||||
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
|
||||
assistantResponses: toolNodeAssistants
|
||||
}
|
||||
);
|
||||
};
|
||||
|
||||
async function streamResponse({
|
||||
res,
|
||||
detail,
|
||||
stream
|
||||
}: {
|
||||
res: NextApiResponse;
|
||||
detail: boolean;
|
||||
toolModules: ToolModuleItemType[];
|
||||
stream: StreamChatType;
|
||||
}) {
|
||||
const write = responseWriteController({
|
||||
res,
|
||||
readStream: stream
|
||||
});
|
||||
|
||||
let startResponseWrite = false;
|
||||
let textAnswer = '';
|
||||
|
||||
for await (const part of stream) {
|
||||
if (res.closed) {
|
||||
stream.controller?.abort();
|
||||
break;
|
||||
}
|
||||
|
||||
const responseChoice = part.choices?.[0]?.delta;
|
||||
if (responseChoice.content) {
|
||||
const content = responseChoice?.content || '';
|
||||
textAnswer += content;
|
||||
|
||||
if (startResponseWrite) {
|
||||
responseWrite({
|
||||
write,
|
||||
event: detail ? SseResponseEventEnum.answer : undefined,
|
||||
data: textAdaptGptResponse({
|
||||
text: content
|
||||
})
|
||||
});
|
||||
} else if (textAnswer.length >= 3) {
|
||||
textAnswer = textAnswer.trim();
|
||||
if (textAnswer.startsWith('0')) {
|
||||
startResponseWrite = true;
|
||||
// find first : index
|
||||
const firstIndex = textAnswer.indexOf(':');
|
||||
textAnswer = textAnswer.substring(firstIndex + 1).trim();
|
||||
responseWrite({
|
||||
write,
|
||||
event: detail ? SseResponseEventEnum.answer : undefined,
|
||||
data: textAdaptGptResponse({
|
||||
text: textAnswer
|
||||
})
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!textAnswer) {
|
||||
return Promise.reject('LLM api response empty');
|
||||
}
|
||||
// console.log(textAnswer, '---===');
|
||||
return { answer: textAnswer.trim() };
|
||||
}
|
||||
|
||||
const parseAnswer = (str: string): FunctionCallCompletion | string => {
|
||||
// 首先,使用正则表达式提取TOOL_ID和TOOL_ARGUMENTS
|
||||
const prefix = '1:';
|
||||
str = str.trim();
|
||||
if (str.startsWith(prefix)) {
|
||||
const toolString = str.substring(prefix.length).trim();
|
||||
|
||||
try {
|
||||
const toolCall = json5.parse(toolString);
|
||||
return {
|
||||
id: getNanoid(),
|
||||
name: toolCall.toolId,
|
||||
arguments: JSON.stringify(toolCall.arguments || toolCall.parameters)
|
||||
};
|
||||
} catch (error) {
|
||||
return str;
|
||||
}
|
||||
} else {
|
||||
return str;
|
||||
}
|
||||
};
|
||||
|
|
@ -1,14 +0,0 @@
|
|||
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
|
||||
export type AnswerProps = ModuleDispatchProps<{}>;
|
||||
export type AnswerResponse = DispatchNodeResultType<{}>;
|
||||
|
||||
export const dispatchStopToolCall = (props: Record<string, any>): AnswerResponse => {
|
||||
return {
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
toolStop: true
|
||||
}
|
||||
};
|
||||
};
|
||||
|
|
@ -1,413 +0,0 @@
|
|||
// @ts-nocheck
|
||||
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
|
||||
import { getAIApi } from '../../../../ai/config';
|
||||
import { filterGPTMessageByMaxTokens } from '../../../../chat/utils';
|
||||
import {
|
||||
ChatCompletion,
|
||||
ChatCompletionMessageToolCall,
|
||||
StreamChatType,
|
||||
ChatCompletionToolMessageParam,
|
||||
ChatCompletionAssistantToolParam,
|
||||
ChatCompletionMessageParam,
|
||||
ChatCompletionTool,
|
||||
ChatCompletionAssistantMessageParam
|
||||
} from '@fastgpt/global/core/ai/type';
|
||||
import { NextApiResponse } from 'next';
|
||||
import {
|
||||
responseWrite,
|
||||
responseWriteController,
|
||||
responseWriteNodeStatus
|
||||
} from '../../../../../common/response';
|
||||
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import { textAdaptGptResponse } from '@fastgpt/global/core/workflow/runtime/utils';
|
||||
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
|
||||
import { dispatchWorkFlowV1 } from '../../index';
|
||||
import { DispatchToolModuleProps, RunToolResponse, ToolModuleItemType } from './type.d';
|
||||
import json5 from 'json5';
|
||||
import { DispatchFlowResponse } from '../../type';
|
||||
import { countGptMessagesTokens } from '../../../../../common/string/tiktoken';
|
||||
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
|
||||
import { AIChatItemType } from '@fastgpt/global/core/chat/type';
|
||||
|
||||
type ToolRunResponseType = {
|
||||
moduleRunResponse: DispatchFlowResponse;
|
||||
toolMsgParams: ChatCompletionToolMessageParam;
|
||||
}[];
|
||||
|
||||
/*
|
||||
调用思路
|
||||
1. messages 接收发送给AI的消息
|
||||
2. response 记录递归运行结果(累计计算 dispatchFlowResponse, totalTokens和assistantResponses)
|
||||
3. 如果运行工具的话,则需要把工具中的结果累计加到dispatchFlowResponse中。 本次消耗的 token 加到 totalTokens, assistantResponses 记录当前工具运行的内容。
|
||||
*/
|
||||
|
||||
export const runToolWithToolChoice = async (
|
||||
props: DispatchToolModuleProps & {
|
||||
messages: ChatCompletionMessageParam[];
|
||||
toolModules: ToolModuleItemType[];
|
||||
toolModel: LLMModelItemType;
|
||||
},
|
||||
response?: RunToolResponse
|
||||
): Promise<RunToolResponse> => {
|
||||
const {
|
||||
toolModel,
|
||||
toolModules,
|
||||
messages,
|
||||
res,
|
||||
runtimeModules,
|
||||
detail = false,
|
||||
module,
|
||||
stream
|
||||
} = props;
|
||||
const assistantResponses = response?.assistantResponses || [];
|
||||
|
||||
const tools: ChatCompletionTool[] = toolModules.map((module) => {
|
||||
const properties: Record<
|
||||
string,
|
||||
{
|
||||
type: string;
|
||||
description: string;
|
||||
required?: boolean;
|
||||
}
|
||||
> = {};
|
||||
module.toolParams.forEach((item) => {
|
||||
properties[item.key] = {
|
||||
type: 'string',
|
||||
description: item.toolDescription || ''
|
||||
};
|
||||
});
|
||||
|
||||
return {
|
||||
type: 'function',
|
||||
function: {
|
||||
name: module.moduleId,
|
||||
description: module.intro,
|
||||
parameters: {
|
||||
type: 'object',
|
||||
properties,
|
||||
required: module.toolParams.filter((item) => item.required).map((item) => item.key)
|
||||
}
|
||||
}
|
||||
};
|
||||
});
|
||||
|
||||
const filterMessages = await filterGPTMessageByMaxTokens({
|
||||
messages,
|
||||
maxTokens: toolModel.maxContext - 300 // filter token. not response maxToken
|
||||
});
|
||||
|
||||
/* Run llm */
|
||||
const ai = getAIApi({
|
||||
timeout: 480000
|
||||
});
|
||||
const aiResponse = await ai.chat.completions.create(
|
||||
{
|
||||
...toolModel?.defaultConfig,
|
||||
model: toolModel.model,
|
||||
temperature: 0,
|
||||
stream,
|
||||
messages: filterMessages,
|
||||
tools,
|
||||
tool_choice: 'auto'
|
||||
},
|
||||
{
|
||||
headers: {
|
||||
Accept: 'application/json, text/plain, */*'
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
const { answer, toolCalls } = await (async () => {
|
||||
if (stream) {
|
||||
return streamResponse({
|
||||
res,
|
||||
detail,
|
||||
toolModules,
|
||||
stream: aiResponse
|
||||
});
|
||||
} else {
|
||||
const result = aiResponse as ChatCompletion;
|
||||
const calls = result.choices?.[0]?.message?.tool_calls || [];
|
||||
|
||||
// 加上name和avatar
|
||||
const toolCalls = calls.map((tool) => {
|
||||
const toolModule = toolModules.find((module) => module.moduleId === tool.function?.name);
|
||||
return {
|
||||
...tool,
|
||||
toolName: toolModule?.name || '',
|
||||
toolAvatar: toolModule?.avatar || ''
|
||||
};
|
||||
});
|
||||
|
||||
return {
|
||||
answer: result.choices?.[0]?.message?.content || '',
|
||||
toolCalls: toolCalls
|
||||
};
|
||||
}
|
||||
})();
|
||||
|
||||
// Run the selected tool.
|
||||
const toolsRunResponse = (
|
||||
await Promise.all(
|
||||
toolCalls.map(async (tool) => {
|
||||
const toolModule = toolModules.find((module) => module.moduleId === tool.function?.name);
|
||||
|
||||
if (!toolModule) return;
|
||||
|
||||
const startParams = (() => {
|
||||
try {
|
||||
return json5.parse(tool.function.arguments);
|
||||
} catch (error) {
|
||||
return {};
|
||||
}
|
||||
})();
|
||||
|
||||
const moduleRunResponse = await dispatchWorkFlowV1({
|
||||
...props,
|
||||
runtimeModules: runtimeModules.map((module) => ({
|
||||
...module,
|
||||
isEntry: module.moduleId === toolModule.moduleId
|
||||
})),
|
||||
startParams
|
||||
});
|
||||
|
||||
const stringToolResponse = (() => {
|
||||
if (typeof moduleRunResponse.toolResponses === 'object') {
|
||||
return JSON.stringify(moduleRunResponse.toolResponses, null, 2);
|
||||
}
|
||||
|
||||
return moduleRunResponse.toolResponses ? String(moduleRunResponse.toolResponses) : 'none';
|
||||
})();
|
||||
|
||||
const toolMsgParams: ChatCompletionToolMessageParam = {
|
||||
tool_call_id: tool.id,
|
||||
role: ChatCompletionRequestMessageRoleEnum.Tool,
|
||||
name: tool.function.name,
|
||||
content: stringToolResponse
|
||||
};
|
||||
|
||||
if (stream && detail) {
|
||||
responseWrite({
|
||||
res,
|
||||
event: SseResponseEventEnum.toolResponse,
|
||||
data: JSON.stringify({
|
||||
tool: {
|
||||
id: tool.id,
|
||||
toolName: '',
|
||||
toolAvatar: '',
|
||||
params: '',
|
||||
response: stringToolResponse
|
||||
}
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
return {
|
||||
moduleRunResponse,
|
||||
toolMsgParams
|
||||
};
|
||||
})
|
||||
)
|
||||
).filter(Boolean) as ToolRunResponseType;
|
||||
|
||||
const flatToolsResponseData = toolsRunResponse.map((item) => item.moduleRunResponse).flat();
|
||||
if (toolCalls.length > 0 && !res.closed) {
|
||||
// Run the tool, combine its results, and perform another round of AI calls
|
||||
const assistantToolMsgParams: ChatCompletionAssistantToolParam = {
|
||||
role: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
tool_calls: toolCalls
|
||||
};
|
||||
const concatToolMessages = [
|
||||
...filterMessages,
|
||||
assistantToolMsgParams
|
||||
] as ChatCompletionMessageParam[];
|
||||
const tokens = await countGptMessagesTokens(concatToolMessages, tools);
|
||||
const completeMessages = [
|
||||
...concatToolMessages,
|
||||
...toolsRunResponse.map((item) => item?.toolMsgParams)
|
||||
];
|
||||
|
||||
// console.log(tokens, 'tool');
|
||||
|
||||
if (stream && detail) {
|
||||
responseWriteNodeStatus({
|
||||
res,
|
||||
name: module.name
|
||||
});
|
||||
}
|
||||
|
||||
// tool assistant
|
||||
const toolAssistants = toolsRunResponse
|
||||
.map((item) => {
|
||||
const assistantResponses = item.moduleRunResponse.assistantResponses || [];
|
||||
return assistantResponses;
|
||||
})
|
||||
.flat();
|
||||
|
||||
// tool node assistant
|
||||
const adaptChatMessages = GPTMessages2Chats(completeMessages);
|
||||
const toolNodeAssistant = adaptChatMessages.pop() as AIChatItemType;
|
||||
|
||||
const toolNodeAssistants = [
|
||||
...assistantResponses,
|
||||
...toolAssistants,
|
||||
...toolNodeAssistant.value
|
||||
];
|
||||
|
||||
// concat tool responses
|
||||
const dispatchFlowResponse = response
|
||||
? response.dispatchFlowResponse.concat(flatToolsResponseData)
|
||||
: flatToolsResponseData;
|
||||
|
||||
/* check stop signal */
|
||||
const hasStopSignal = flatToolsResponseData.some(
|
||||
(item) => !!item.flowResponses?.find((item) => item.toolStop)
|
||||
);
|
||||
if (hasStopSignal) {
|
||||
return {
|
||||
dispatchFlowResponse,
|
||||
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
|
||||
completeMessages,
|
||||
assistantResponses: toolNodeAssistants
|
||||
};
|
||||
}
|
||||
|
||||
return runToolWithToolChoice(
|
||||
{
|
||||
...props,
|
||||
messages: completeMessages
|
||||
},
|
||||
{
|
||||
dispatchFlowResponse,
|
||||
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
|
||||
assistantResponses: toolNodeAssistants
|
||||
}
|
||||
);
|
||||
} else {
|
||||
// No tool is invoked, indicating that the process is over
|
||||
const gptAssistantResponse: ChatCompletionAssistantMessageParam = {
|
||||
role: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
content: answer
|
||||
};
|
||||
const completeMessages = filterMessages.concat(gptAssistantResponse);
|
||||
const tokens = await countGptMessagesTokens(completeMessages, tools);
|
||||
// console.log(tokens, 'response token');
|
||||
|
||||
// concat tool assistant
|
||||
const toolNodeAssistant = GPTMessages2Chats([gptAssistantResponse])[0] as AIChatItemType;
|
||||
|
||||
return {
|
||||
dispatchFlowResponse: response?.dispatchFlowResponse || [],
|
||||
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
|
||||
completeMessages,
|
||||
assistantResponses: [...assistantResponses, ...toolNodeAssistant.value]
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
async function streamResponse({
|
||||
res,
|
||||
detail,
|
||||
toolModules,
|
||||
stream
|
||||
}: {
|
||||
res: NextApiResponse;
|
||||
detail: boolean;
|
||||
toolModules: ToolModuleItemType[];
|
||||
stream: StreamChatType;
|
||||
}) {
|
||||
const write = responseWriteController({
|
||||
res,
|
||||
readStream: stream
|
||||
});
|
||||
|
||||
let textAnswer = '';
|
||||
let toolCalls: ChatCompletionMessageToolCall[] = [];
|
||||
|
||||
for await (const part of stream) {
|
||||
if (res.closed) {
|
||||
stream.controller?.abort();
|
||||
break;
|
||||
}
|
||||
|
||||
const responseChoice = part.choices?.[0]?.delta;
|
||||
// console.log(JSON.stringify(responseChoice, null, 2));
|
||||
if (responseChoice?.content) {
|
||||
const content = responseChoice.content || '';
|
||||
textAnswer += content;
|
||||
|
||||
responseWrite({
|
||||
write,
|
||||
event: detail ? SseResponseEventEnum.answer : undefined,
|
||||
data: textAdaptGptResponse({
|
||||
text: content
|
||||
})
|
||||
});
|
||||
} else if (responseChoice?.tool_calls?.[0]) {
|
||||
const toolCall: ChatCompletionMessageToolCall = responseChoice.tool_calls[0];
|
||||
|
||||
// 流响应中,每次只会返回一个工具. 如果带了 id,说明是执行一个工具
|
||||
if (toolCall.id) {
|
||||
const toolModule = toolModules.find(
|
||||
(module) => module.moduleId === toolCall.function?.name
|
||||
);
|
||||
|
||||
if (toolModule) {
|
||||
if (toolCall.function?.arguments === undefined) {
|
||||
toolCall.function.arguments = '';
|
||||
}
|
||||
toolCalls.push({
|
||||
...toolCall,
|
||||
toolName: toolModule.name,
|
||||
toolAvatar: toolModule.avatar
|
||||
});
|
||||
|
||||
if (detail) {
|
||||
responseWrite({
|
||||
write,
|
||||
event: SseResponseEventEnum.toolCall,
|
||||
data: JSON.stringify({
|
||||
tool: {
|
||||
id: toolCall.id,
|
||||
toolName: toolModule.name,
|
||||
toolAvatar: toolModule.avatar,
|
||||
functionName: toolCall.function.name,
|
||||
params: toolCall.function.arguments,
|
||||
response: ''
|
||||
}
|
||||
})
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
/* arg 插入最后一个工具的参数里 */
|
||||
const arg: string = responseChoice.tool_calls?.[0]?.function?.arguments;
|
||||
const currentTool = toolCalls[toolCalls.length - 1];
|
||||
if (currentTool) {
|
||||
currentTool.function.arguments += arg;
|
||||
|
||||
if (detail) {
|
||||
responseWrite({
|
||||
write,
|
||||
event: SseResponseEventEnum.toolParams,
|
||||
data: JSON.stringify({
|
||||
tool: {
|
||||
id: currentTool.id,
|
||||
toolName: '',
|
||||
toolAvatar: '',
|
||||
params: arg,
|
||||
response: ''
|
||||
}
|
||||
})
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!textAnswer && toolCalls.length === 0) {
|
||||
return Promise.reject('LLM api response empty');
|
||||
}
|
||||
|
||||
return { answer: textAnswer, toolCalls };
|
||||
}
|
||||
|
|
@ -1,28 +0,0 @@
|
|||
import { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type';
|
||||
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
import { FlowNodeInputItemType } from '@fastgpt/global/core/workflow/node/type';
|
||||
import type {
|
||||
ModuleDispatchProps,
|
||||
DispatchNodeResponseType
|
||||
} from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import type { RuntimeNodeItemType } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
|
||||
import type { DispatchFlowResponse } from '../../type.d';
|
||||
import { AIChatItemValueItemType, ChatItemValueItemType } from '@fastgpt/global/core/chat/type';
|
||||
|
||||
export type DispatchToolModuleProps = ModuleDispatchProps<{
|
||||
[NodeInputKeyEnum.history]?: ChatItemType[];
|
||||
[NodeInputKeyEnum.aiModel]: string;
|
||||
[NodeInputKeyEnum.aiSystemPrompt]: string;
|
||||
[NodeInputKeyEnum.userChatInput]: string;
|
||||
}>;
|
||||
|
||||
export type RunToolResponse = {
|
||||
dispatchFlowResponse: DispatchFlowResponse[];
|
||||
totalTokens: number;
|
||||
completeMessages?: ChatCompletionMessageParam[];
|
||||
assistantResponses?: AIChatItemValueItemType[];
|
||||
};
|
||||
export type ToolModuleItemType = RuntimeNodeItemType & {
|
||||
toolParams: RuntimeNodeItemType['inputs'];
|
||||
};
|
||||
|
|
@ -1,396 +0,0 @@
|
|||
// @ts-nocheck
|
||||
import type { NextApiResponse } from 'next';
|
||||
import { filterGPTMessageByMaxTokens, loadRequestMessages } from '../../../chat/utils';
|
||||
import type { ChatItemType, UserChatItemValueItemType } from '@fastgpt/global/core/chat/type.d';
|
||||
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
|
||||
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import { textAdaptGptResponse } from '@fastgpt/global/core/workflow/runtime/utils';
|
||||
import { getAIApi } from '../../../ai/config';
|
||||
import type {
|
||||
ChatCompletion,
|
||||
ChatCompletionMessageParam,
|
||||
StreamChatType
|
||||
} from '@fastgpt/global/core/ai/type.d';
|
||||
import { formatModelChars2Points } from '../../../../support/wallet/usage/utils';
|
||||
import type { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
|
||||
import { postTextCensor } from '../../../../common/api/requestPlusApi';
|
||||
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
|
||||
import type { FlowNodeItemType } from '@fastgpt/global/core/workflow/type/node';
|
||||
import type { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import {
|
||||
countMessagesTokens,
|
||||
countGptMessagesTokens
|
||||
} from '../../../../common/string/tiktoken/index';
|
||||
import {
|
||||
chats2GPTMessages,
|
||||
getSystemPrompt,
|
||||
GPTMessages2Chats,
|
||||
runtimePrompt2ChatsValue
|
||||
} from '@fastgpt/global/core/chat/adapt';
|
||||
import {
|
||||
Prompt_QuotePromptList,
|
||||
Prompt_QuoteTemplateList
|
||||
} from '@fastgpt/global/core/ai/prompt/AIChat';
|
||||
import type { AIChatNodeProps } from '@fastgpt/global/core/workflow/runtime/type.d';
|
||||
import { replaceVariable } from '@fastgpt/global/common/string/tools';
|
||||
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { responseWrite, responseWriteController } from '../../../../common/response';
|
||||
import { getLLMModel, ModelTypeEnum } from '../../../ai/model';
|
||||
import type { SearchDataResponseItemType } from '@fastgpt/global/core/dataset/type';
|
||||
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import { getHistories } from '../utils';
|
||||
import { filterSearchResultsByMaxChars } from '../../utils';
|
||||
import { getHistoryPreview } from '@fastgpt/global/core/chat/utils';
|
||||
|
||||
export type ChatProps = ModuleDispatchProps<
|
||||
AIChatNodeProps & {
|
||||
[NodeInputKeyEnum.userChatInput]: string;
|
||||
[NodeInputKeyEnum.history]?: ChatItemType[] | number;
|
||||
[NodeInputKeyEnum.aiChatDatasetQuote]?: SearchDataResponseItemType[];
|
||||
}
|
||||
>;
|
||||
export type ChatResponse = DispatchNodeResultType<{
|
||||
[NodeOutputKeyEnum.answerText]: string;
|
||||
[NodeOutputKeyEnum.history]: ChatItemType[];
|
||||
}>;
|
||||
|
||||
/* request openai chat */
|
||||
export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResponse> => {
|
||||
let {
|
||||
res,
|
||||
stream = false,
|
||||
detail = false,
|
||||
user,
|
||||
histories,
|
||||
module: { name, outputs },
|
||||
inputFiles = [],
|
||||
params: {
|
||||
model,
|
||||
temperature = 0,
|
||||
maxToken = 4000,
|
||||
history = 6,
|
||||
quoteQA,
|
||||
userChatInput,
|
||||
isResponseAnswerText = true,
|
||||
systemPrompt = '',
|
||||
quoteTemplate,
|
||||
quotePrompt
|
||||
}
|
||||
} = props;
|
||||
if (!userChatInput && inputFiles.length === 0) {
|
||||
return Promise.reject('Question is empty');
|
||||
}
|
||||
stream = stream && isResponseAnswerText;
|
||||
|
||||
const chatHistories = getHistories(history, histories);
|
||||
|
||||
// temperature adapt
|
||||
const modelConstantsData = getLLMModel(model);
|
||||
|
||||
if (!modelConstantsData) {
|
||||
return Promise.reject('The chat model is undefined, you need to select a chat model.');
|
||||
}
|
||||
|
||||
const { quoteText } = await filterQuote({
|
||||
quoteQA,
|
||||
model: modelConstantsData,
|
||||
quoteTemplate
|
||||
});
|
||||
|
||||
// censor model and system key
|
||||
if (modelConstantsData.censor && !user.openaiAccount?.key) {
|
||||
await postTextCensor({
|
||||
text: `${systemPrompt}
|
||||
${quoteText}
|
||||
${userChatInput}
|
||||
`
|
||||
});
|
||||
}
|
||||
|
||||
const { filterMessages } = await getChatMessages({
|
||||
model: modelConstantsData,
|
||||
histories: chatHistories,
|
||||
quoteQA,
|
||||
quoteText,
|
||||
quotePrompt,
|
||||
userChatInput,
|
||||
inputFiles,
|
||||
systemPrompt
|
||||
});
|
||||
|
||||
const { max_tokens } = await getMaxTokens({
|
||||
model: modelConstantsData,
|
||||
maxToken,
|
||||
filterMessages
|
||||
});
|
||||
|
||||
// FastGPT temperature range: 1~10
|
||||
temperature = +(modelConstantsData.maxTemperature * (temperature / 10)).toFixed(2);
|
||||
temperature = Math.max(temperature, 0.01);
|
||||
const ai = getAIApi({
|
||||
userKey: user.openaiAccount,
|
||||
timeout: 480000
|
||||
});
|
||||
|
||||
const concatMessages = [
|
||||
...(modelConstantsData.defaultSystemChatPrompt
|
||||
? [
|
||||
{
|
||||
role: ChatCompletionRequestMessageRoleEnum.System,
|
||||
content: modelConstantsData.defaultSystemChatPrompt
|
||||
}
|
||||
]
|
||||
: []),
|
||||
...filterMessages
|
||||
] as ChatCompletionMessageParam[];
|
||||
|
||||
if (concatMessages.length === 0) {
|
||||
return Promise.reject('core.chat.error.Messages empty');
|
||||
}
|
||||
|
||||
const loadMessages = await loadRequestMessages({
|
||||
messages: concatMessages,
|
||||
useVision: false
|
||||
});
|
||||
|
||||
const response = await ai.chat.completions.create(
|
||||
{
|
||||
...modelConstantsData?.defaultConfig,
|
||||
model: modelConstantsData.model,
|
||||
temperature,
|
||||
max_tokens,
|
||||
stream,
|
||||
messages: loadMessages
|
||||
},
|
||||
{
|
||||
headers: {
|
||||
Accept: 'application/json, text/plain, */*'
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
const { answerText } = await (async () => {
|
||||
if (res && stream) {
|
||||
// sse response
|
||||
const { answer } = await streamResponse({
|
||||
res,
|
||||
detail,
|
||||
stream: response
|
||||
});
|
||||
|
||||
targetResponse({ res, detail, outputs });
|
||||
|
||||
return {
|
||||
answerText: answer
|
||||
};
|
||||
} else {
|
||||
const unStreamResponse = response as ChatCompletion;
|
||||
const answer = unStreamResponse.choices?.[0]?.message?.content || '';
|
||||
|
||||
return {
|
||||
answerText: answer
|
||||
};
|
||||
}
|
||||
})();
|
||||
|
||||
const completeMessages = filterMessages.concat({
|
||||
role: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
content: answerText
|
||||
});
|
||||
const chatCompleteMessages = GPTMessages2Chats(completeMessages);
|
||||
|
||||
const tokens = await countMessagesTokens(chatCompleteMessages);
|
||||
const { totalPoints, modelName } = formatModelChars2Points({
|
||||
model,
|
||||
tokens,
|
||||
modelType: ModelTypeEnum.llm
|
||||
});
|
||||
|
||||
return {
|
||||
answerText,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
totalPoints: user.openaiAccount?.key ? 0 : totalPoints,
|
||||
model: modelName,
|
||||
tokens,
|
||||
query: `${userChatInput}`,
|
||||
maxToken: max_tokens,
|
||||
historyPreview: getHistoryPreview(chatCompleteMessages),
|
||||
contextTotalLen: completeMessages.length
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
|
||||
{
|
||||
moduleName: name,
|
||||
totalPoints: user.openaiAccount?.key ? 0 : totalPoints,
|
||||
model: modelName,
|
||||
tokens
|
||||
}
|
||||
],
|
||||
[DispatchNodeResponseKeyEnum.toolResponses]: answerText,
|
||||
history: chatCompleteMessages
|
||||
};
|
||||
};
|
||||
|
||||
async function filterQuote({
|
||||
quoteQA = [],
|
||||
model,
|
||||
quoteTemplate
|
||||
}: {
|
||||
quoteQA: ChatProps['params']['quoteQA'];
|
||||
model: LLMModelItemType;
|
||||
quoteTemplate?: string;
|
||||
}) {
|
||||
function getValue(item: SearchDataResponseItemType, index: number) {
|
||||
return replaceVariable(quoteTemplate || Prompt_QuoteTemplateList[0].value, {
|
||||
q: item.q,
|
||||
a: item.a,
|
||||
source: item.sourceName,
|
||||
sourceId: String(item.sourceId || 'UnKnow'),
|
||||
index: index + 1
|
||||
});
|
||||
}
|
||||
|
||||
// slice filterSearch
|
||||
const filterQuoteQA = await filterSearchResultsByMaxChars(quoteQA, model.quoteMaxToken);
|
||||
|
||||
const quoteText =
|
||||
filterQuoteQA.length > 0
|
||||
? `${filterQuoteQA.map((item, index) => getValue(item, index).trim()).join('\n------\n')}`
|
||||
: '';
|
||||
|
||||
return {
|
||||
quoteText
|
||||
};
|
||||
}
|
||||
async function getChatMessages({
|
||||
quotePrompt,
|
||||
quoteText,
|
||||
quoteQA,
|
||||
histories = [],
|
||||
systemPrompt,
|
||||
userChatInput,
|
||||
inputFiles,
|
||||
model
|
||||
}: {
|
||||
quotePrompt?: string;
|
||||
quoteText: string;
|
||||
quoteQA: ChatProps['params']['quoteQA'];
|
||||
histories: ChatItemType[];
|
||||
systemPrompt: string;
|
||||
userChatInput: string;
|
||||
inputFiles: UserChatItemValueItemType['file'][];
|
||||
model: LLMModelItemType;
|
||||
}) {
|
||||
const replaceInputValue =
|
||||
quoteQA !== undefined
|
||||
? replaceVariable(quotePrompt || Prompt_QuotePromptList[0].value, {
|
||||
quote: quoteText,
|
||||
question: userChatInput
|
||||
})
|
||||
: userChatInput;
|
||||
|
||||
const messages: ChatItemType[] = [
|
||||
...getSystemPrompt(systemPrompt),
|
||||
...histories,
|
||||
{
|
||||
obj: ChatRoleEnum.Human,
|
||||
value: runtimePrompt2ChatsValue({
|
||||
files: inputFiles,
|
||||
text: replaceInputValue
|
||||
})
|
||||
}
|
||||
];
|
||||
const adaptMessages = chats2GPTMessages({ messages, reserveId: false });
|
||||
|
||||
const filterMessages = await filterGPTMessageByMaxTokens({
|
||||
messages: adaptMessages,
|
||||
maxTokens: model.maxContext - 300 // filter token. not response maxToken
|
||||
});
|
||||
|
||||
return {
|
||||
filterMessages
|
||||
};
|
||||
}
|
||||
async function getMaxTokens({
|
||||
maxToken,
|
||||
model,
|
||||
filterMessages = []
|
||||
}: {
|
||||
maxToken: number;
|
||||
model: LLMModelItemType;
|
||||
filterMessages: ChatCompletionMessageParam[];
|
||||
}) {
|
||||
maxToken = Math.min(maxToken, model.maxResponse);
|
||||
const tokensLimit = model.maxContext;
|
||||
|
||||
/* count response max token */
|
||||
const promptsToken = await countGptMessagesTokens(filterMessages);
|
||||
maxToken = promptsToken + maxToken > tokensLimit ? tokensLimit - promptsToken : maxToken;
|
||||
|
||||
if (maxToken <= 0) {
|
||||
maxToken = 200;
|
||||
}
|
||||
return {
|
||||
max_tokens: maxToken
|
||||
};
|
||||
}
|
||||
|
||||
function targetResponse({
|
||||
res,
|
||||
outputs,
|
||||
detail
|
||||
}: {
|
||||
res: NextApiResponse;
|
||||
outputs: FlowNodeItemType['outputs'];
|
||||
detail: boolean;
|
||||
}) {
|
||||
const targets =
|
||||
outputs.find((output) => output.key === NodeOutputKeyEnum.answerText)?.targets || [];
|
||||
|
||||
if (targets.length === 0) return;
|
||||
responseWrite({
|
||||
res,
|
||||
event: detail ? SseResponseEventEnum.answer : undefined,
|
||||
data: textAdaptGptResponse({
|
||||
text: '\n'
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
async function streamResponse({
|
||||
res,
|
||||
detail,
|
||||
stream
|
||||
}: {
|
||||
res: NextApiResponse;
|
||||
detail: boolean;
|
||||
stream: StreamChatType;
|
||||
}) {
|
||||
const write = responseWriteController({
|
||||
res,
|
||||
readStream: stream
|
||||
});
|
||||
let answer = '';
|
||||
for await (const part of stream) {
|
||||
if (res.closed) {
|
||||
stream.controller?.abort();
|
||||
break;
|
||||
}
|
||||
const content = part.choices?.[0]?.delta?.content || '';
|
||||
answer += content;
|
||||
|
||||
responseWrite({
|
||||
write,
|
||||
event: detail ? SseResponseEventEnum.answer : undefined,
|
||||
data: textAdaptGptResponse({
|
||||
text: content
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
if (!answer) {
|
||||
return Promise.reject('core.chat.Chat API is error or undefined');
|
||||
}
|
||||
|
||||
return { answer };
|
||||
}
|
||||
|
|
@ -1,35 +0,0 @@
|
|||
import type { SearchDataResponseItemType } from '@fastgpt/global/core/dataset/type';
|
||||
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
import { datasetSearchResultConcat } from '@fastgpt/global/core/dataset/search/utils';
|
||||
import { filterSearchResultsByMaxChars } from '../../utils';
|
||||
|
||||
type DatasetConcatProps = ModuleDispatchProps<
|
||||
{
|
||||
[NodeInputKeyEnum.datasetMaxTokens]: number;
|
||||
} & { [key: string]: SearchDataResponseItemType[] }
|
||||
>;
|
||||
type DatasetConcatResponse = {
|
||||
[NodeOutputKeyEnum.datasetQuoteQA]: SearchDataResponseItemType[];
|
||||
};
|
||||
|
||||
export async function dispatchDatasetConcat(
|
||||
props: DatasetConcatProps
|
||||
): Promise<DatasetConcatResponse> {
|
||||
const {
|
||||
params: { limit = 1500, ...quoteMap }
|
||||
} = props as DatasetConcatProps;
|
||||
|
||||
const quoteList = Object.values(quoteMap).filter((list) => Array.isArray(list));
|
||||
|
||||
const rrfConcatResults = datasetSearchResultConcat(
|
||||
quoteList.map((list) => ({
|
||||
k: 60,
|
||||
list
|
||||
}))
|
||||
);
|
||||
|
||||
return {
|
||||
[NodeOutputKeyEnum.datasetQuoteQA]: await filterSearchResultsByMaxChars(rrfConcatResults, limit)
|
||||
};
|
||||
}
|
||||
|
|
@ -1,165 +0,0 @@
|
|||
// @ts-nocheck
|
||||
import {
|
||||
DispatchNodeResponseType,
|
||||
DispatchNodeResultType
|
||||
} from '@fastgpt/global/core/workflow/runtime/type.d';
|
||||
import { formatModelChars2Points } from '../../../../support/wallet/usage/utils';
|
||||
import type { SelectedDatasetType } from '@fastgpt/global/core/workflow/api.d';
|
||||
import type { SearchDataResponseItemType } from '@fastgpt/global/core/dataset/type';
|
||||
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { ModelTypeEnum, getLLMModel, getVectorModel } from '../../../ai/model';
|
||||
import { searchDatasetData } from '../../../dataset/search/controller';
|
||||
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import { DatasetSearchModeEnum } from '@fastgpt/global/core/dataset/constants';
|
||||
import { getHistories } from '../utils';
|
||||
import { datasetSearchQueryExtension } from '../../../dataset/search/utils';
|
||||
import { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
|
||||
import { checkTeamReRankPermission } from '../../../../support/permission/teamLimit';
|
||||
|
||||
type DatasetSearchProps = ModuleDispatchProps<{
|
||||
[NodeInputKeyEnum.datasetSelectList]: SelectedDatasetType;
|
||||
[NodeInputKeyEnum.datasetSimilarity]: number;
|
||||
[NodeInputKeyEnum.datasetMaxTokens]: number;
|
||||
[NodeInputKeyEnum.datasetSearchMode]: `${DatasetSearchModeEnum}`;
|
||||
[NodeInputKeyEnum.userChatInput]: string;
|
||||
[NodeInputKeyEnum.datasetSearchUsingReRank]: boolean;
|
||||
[NodeInputKeyEnum.datasetSearchUsingExtensionQuery]: boolean;
|
||||
[NodeInputKeyEnum.datasetSearchExtensionModel]: string;
|
||||
[NodeInputKeyEnum.datasetSearchExtensionBg]: string;
|
||||
}>;
|
||||
export type DatasetSearchResponse = DispatchNodeResultType<{
|
||||
isEmpty?: boolean;
|
||||
unEmpty?: boolean;
|
||||
[NodeOutputKeyEnum.datasetQuoteQA]: SearchDataResponseItemType[];
|
||||
}>;
|
||||
|
||||
export async function dispatchDatasetSearch(
|
||||
props: DatasetSearchProps
|
||||
): Promise<DatasetSearchResponse> {
|
||||
const {
|
||||
teamId,
|
||||
histories,
|
||||
module,
|
||||
params: {
|
||||
datasets = [],
|
||||
similarity,
|
||||
limit = 1500,
|
||||
usingReRank,
|
||||
searchMode,
|
||||
userChatInput,
|
||||
|
||||
datasetSearchUsingExtensionQuery,
|
||||
datasetSearchExtensionModel,
|
||||
datasetSearchExtensionBg
|
||||
}
|
||||
} = props as DatasetSearchProps;
|
||||
|
||||
if (!Array.isArray(datasets)) {
|
||||
return Promise.reject('Quote type error');
|
||||
}
|
||||
|
||||
if (datasets.length === 0) {
|
||||
return Promise.reject('core.chat.error.Select dataset empty');
|
||||
}
|
||||
|
||||
if (!userChatInput) {
|
||||
return Promise.reject('core.chat.error.User input empty');
|
||||
}
|
||||
|
||||
// query extension
|
||||
const extensionModel =
|
||||
datasetSearchUsingExtensionQuery && datasetSearchExtensionModel
|
||||
? getLLMModel(datasetSearchExtensionModel)
|
||||
: undefined;
|
||||
const { concatQueries, rewriteQuery, aiExtensionResult } = await datasetSearchQueryExtension({
|
||||
query: userChatInput,
|
||||
extensionModel,
|
||||
extensionBg: datasetSearchExtensionBg,
|
||||
histories: getHistories(6, histories)
|
||||
});
|
||||
|
||||
// console.log(concatQueries, rewriteQuery, aiExtensionResult);
|
||||
|
||||
// get vector
|
||||
const vectorModel = getVectorModel(datasets[0]?.vectorModel?.model);
|
||||
|
||||
// start search
|
||||
const {
|
||||
searchRes,
|
||||
tokens,
|
||||
usingSimilarityFilter,
|
||||
usingReRank: searchUsingReRank
|
||||
} = await searchDatasetData({
|
||||
teamId,
|
||||
reRankQuery: `${rewriteQuery}`,
|
||||
queries: concatQueries,
|
||||
model: vectorModel.model,
|
||||
similarity,
|
||||
limit,
|
||||
datasetIds: datasets.map((item) => item.datasetId),
|
||||
searchMode,
|
||||
usingReRank: usingReRank && (await checkTeamReRankPermission(teamId))
|
||||
});
|
||||
|
||||
// count bill results
|
||||
// vector
|
||||
const { totalPoints, modelName } = formatModelChars2Points({
|
||||
model: vectorModel.model,
|
||||
tokens,
|
||||
modelType: ModelTypeEnum.vector
|
||||
});
|
||||
const responseData: DispatchNodeResponseType & { totalPoints: number } = {
|
||||
totalPoints,
|
||||
query: concatQueries.join('\n'),
|
||||
model: modelName,
|
||||
tokens,
|
||||
similarity: usingSimilarityFilter ? similarity : undefined,
|
||||
limit,
|
||||
searchMode,
|
||||
searchUsingReRank: searchUsingReRank,
|
||||
quoteList: searchRes
|
||||
};
|
||||
const nodeDispatchUsages: ChatNodeUsageType[] = [
|
||||
{
|
||||
totalPoints,
|
||||
moduleName: module.name,
|
||||
model: modelName,
|
||||
tokens
|
||||
}
|
||||
];
|
||||
|
||||
if (aiExtensionResult) {
|
||||
const { totalPoints, modelName } = formatModelChars2Points({
|
||||
model: aiExtensionResult.model,
|
||||
tokens: aiExtensionResult.tokens,
|
||||
modelType: ModelTypeEnum.llm
|
||||
});
|
||||
|
||||
responseData.totalPoints += totalPoints;
|
||||
responseData.tokens = aiExtensionResult.tokens;
|
||||
responseData.extensionModel = modelName;
|
||||
responseData.extensionResult =
|
||||
aiExtensionResult.extensionQueries?.join('\n') ||
|
||||
JSON.stringify(aiExtensionResult.extensionQueries);
|
||||
|
||||
nodeDispatchUsages.push({
|
||||
totalPoints,
|
||||
moduleName: 'core.module.template.Query extension',
|
||||
model: modelName,
|
||||
tokens: aiExtensionResult.tokens
|
||||
});
|
||||
}
|
||||
|
||||
return {
|
||||
isEmpty: searchRes.length === 0 ? true : undefined,
|
||||
unEmpty: searchRes.length > 0 ? true : undefined,
|
||||
quoteQA: searchRes,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: responseData,
|
||||
nodeDispatchUsages,
|
||||
[DispatchNodeResponseKeyEnum.toolResponses]: searchRes.map((item) => ({
|
||||
id: item.id,
|
||||
text: `${item.q}\n${item.a}`.trim()
|
||||
}))
|
||||
};
|
||||
}
|
||||
|
|
@ -1,430 +0,0 @@
|
|||
// @ts-nocheck
|
||||
import { NextApiResponse } from 'next';
|
||||
import { NodeInputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import { NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
import type { ChatDispatchProps } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import type { RuntimeNodeItemType } from '@fastgpt/global/core/workflow/runtime/type.d';
|
||||
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import type {
|
||||
AIChatItemValueItemType,
|
||||
ChatHistoryItemResType,
|
||||
ToolRunResponseItemType
|
||||
} from '@fastgpt/global/core/chat/type.d';
|
||||
import { FlowNodeTypeEnum } from '@fastgpt/global/core/workflow/node/constant';
|
||||
import { FlowNodeItemType } from '@fastgpt/global/core/workflow/type/node';
|
||||
import { replaceVariable } from '@fastgpt/global/common/string/tools';
|
||||
import { responseWriteNodeStatus } from '../../../common/response';
|
||||
import { getSystemTime } from '@fastgpt/global/common/time/timezone';
|
||||
|
||||
import { dispatchChatInput } from './init/userChatInput';
|
||||
import { dispatchChatCompletion } from './chat/oneapi';
|
||||
import { dispatchDatasetSearch } from './dataset/search';
|
||||
import { dispatchDatasetConcat } from './dataset/concat';
|
||||
import { dispatchAnswer } from './tools/answer';
|
||||
import { dispatchClassifyQuestion } from './agent/classifyQuestion';
|
||||
import { dispatchContentExtract } from './agent/extract';
|
||||
import { dispatchHttpRequest } from './tools/http';
|
||||
import { dispatchHttp468Request } from './tools/http468';
|
||||
import { dispatchAppRequest } from './tools/runApp';
|
||||
import { dispatchQueryExtension } from './tools/queryExternsion';
|
||||
import { dispatchRunPlugin } from './plugin/run';
|
||||
import { dispatchPluginInput } from './plugin/runInput';
|
||||
import { dispatchPluginOutput } from './plugin/runOutput';
|
||||
import { checkTheModuleConnectedByTool, valueTypeFormat } from './utils';
|
||||
import { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
|
||||
import { dispatchRunTools } from './agent/runTool/index';
|
||||
import { ChatItemValueTypeEnum } from '@fastgpt/global/core/chat/constants';
|
||||
import { DispatchFlowResponse } from './type';
|
||||
import { dispatchStopToolCall } from './agent/runTool/stopTool';
|
||||
import { dispatchLafRequest } from './tools/runLaf';
|
||||
|
||||
const callbackMap: Record<string, Function> = {
|
||||
questionInput: dispatchChatInput,
|
||||
[FlowNodeTypeEnum.answerNode]: dispatchAnswer,
|
||||
[FlowNodeTypeEnum.chatNode]: dispatchChatCompletion,
|
||||
[FlowNodeTypeEnum.datasetSearchNode]: dispatchDatasetSearch,
|
||||
[FlowNodeTypeEnum.datasetConcatNode]: dispatchDatasetConcat,
|
||||
[FlowNodeTypeEnum.classifyQuestion]: dispatchClassifyQuestion,
|
||||
[FlowNodeTypeEnum.contentExtract]: dispatchContentExtract,
|
||||
[FlowNodeTypeEnum.httpRequest468]: dispatchHttp468Request,
|
||||
[FlowNodeTypeEnum.runApp]: dispatchAppRequest,
|
||||
[FlowNodeTypeEnum.pluginModule]: dispatchRunPlugin,
|
||||
[FlowNodeTypeEnum.pluginInput]: dispatchPluginInput,
|
||||
[FlowNodeTypeEnum.pluginOutput]: dispatchPluginOutput,
|
||||
[FlowNodeTypeEnum.queryExtension]: dispatchQueryExtension,
|
||||
[FlowNodeTypeEnum.tools]: dispatchRunTools,
|
||||
[FlowNodeTypeEnum.stopTool]: dispatchStopToolCall,
|
||||
[FlowNodeTypeEnum.lafModule]: dispatchLafRequest
|
||||
};
|
||||
|
||||
/* running */
|
||||
export async function dispatchWorkFlowV1({
|
||||
res,
|
||||
modules = [],
|
||||
runtimeModules,
|
||||
startParams = {},
|
||||
histories = [],
|
||||
variables = {},
|
||||
user,
|
||||
stream = false,
|
||||
detail = false,
|
||||
...props
|
||||
}: ChatDispatchProps & {
|
||||
modules?: FlowNodeItemType[]; // app modules
|
||||
runtimeModules?: RuntimeNodeItemType[];
|
||||
startParams?: Record<string, any>; // entry module params
|
||||
}): Promise<DispatchFlowResponse> {
|
||||
// set sse response headers
|
||||
if (res && stream) {
|
||||
res.setHeader('Content-Type', 'text/event-stream;charset=utf-8');
|
||||
res.setHeader('Access-Control-Allow-Origin', '*');
|
||||
res.setHeader('X-Accel-Buffering', 'no');
|
||||
res.setHeader('Cache-Control', 'no-cache, no-transform');
|
||||
}
|
||||
|
||||
variables = {
|
||||
...getSystemVariable({ timezone: user.timezone }),
|
||||
...variables
|
||||
};
|
||||
const runningModules = runtimeModules ? runtimeModules : loadModules(modules, variables);
|
||||
|
||||
let chatResponses: ChatHistoryItemResType[] = []; // response request and save to database
|
||||
let chatAssistantResponse: AIChatItemValueItemType[] = []; // The value will be returned to the user
|
||||
let chatNodeUsages: ChatNodeUsageType[] = [];
|
||||
let toolRunResponse: ToolRunResponseItemType;
|
||||
let runningTime = Date.now();
|
||||
|
||||
/* Store special response field */
|
||||
function pushStore(
|
||||
{ inputs = [] }: RuntimeNodeItemType,
|
||||
{
|
||||
answerText = '',
|
||||
responseData,
|
||||
nodeDispatchUsages,
|
||||
toolResponses,
|
||||
assistantResponses
|
||||
}: {
|
||||
[NodeOutputKeyEnum.answerText]?: string;
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]?: ChatHistoryItemResType;
|
||||
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]?: ChatNodeUsageType[];
|
||||
[DispatchNodeResponseKeyEnum.toolResponses]?: ToolRunResponseItemType;
|
||||
[DispatchNodeResponseKeyEnum.assistantResponses]?: AIChatItemValueItemType[]; // tool module, save the response value
|
||||
}
|
||||
) {
|
||||
const time = Date.now();
|
||||
|
||||
if (responseData) {
|
||||
chatResponses.push({
|
||||
...responseData,
|
||||
runningTime: +((time - runningTime) / 1000).toFixed(2)
|
||||
});
|
||||
}
|
||||
if (nodeDispatchUsages) {
|
||||
chatNodeUsages = chatNodeUsages.concat(nodeDispatchUsages);
|
||||
props.maxRunTimes -= nodeDispatchUsages.length;
|
||||
}
|
||||
if (toolResponses !== undefined) {
|
||||
if (Array.isArray(toolResponses) && toolResponses.length === 0) return;
|
||||
if (typeof toolResponses === 'object' && Object.keys(toolResponses).length === 0) {
|
||||
return;
|
||||
}
|
||||
toolRunResponse = toolResponses;
|
||||
}
|
||||
if (assistantResponses) {
|
||||
chatAssistantResponse = chatAssistantResponse.concat(assistantResponses);
|
||||
}
|
||||
|
||||
// save assistant text response
|
||||
if (answerText) {
|
||||
const isResponseAnswerText =
|
||||
inputs.find((item) => item.key === NodeInputKeyEnum.aiChatIsResponseText)?.value ?? true;
|
||||
if (isResponseAnswerText) {
|
||||
chatAssistantResponse.push({
|
||||
type: ChatItemValueTypeEnum.text,
|
||||
text: {
|
||||
content: answerText
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
runningTime = time;
|
||||
}
|
||||
/* Inject data into module input */
|
||||
function moduleInput(module: RuntimeNodeItemType, data: Record<string, any> = {}) {
|
||||
const updateInputValue = (key: string, value: any) => {
|
||||
const index = module.inputs.findIndex((item: any) => item.key === key);
|
||||
if (index === -1) return;
|
||||
module.inputs[index].value = value;
|
||||
};
|
||||
Object.entries(data).map(([key, val]: any) => {
|
||||
updateInputValue(key, val);
|
||||
});
|
||||
|
||||
return;
|
||||
}
|
||||
/* Pass the output of the module to the next stage */
|
||||
function moduleOutput(
|
||||
module: RuntimeNodeItemType,
|
||||
result: Record<string, any> = {}
|
||||
): Promise<any> {
|
||||
pushStore(module, result);
|
||||
|
||||
const nextRunModules: RuntimeNodeItemType[] = [];
|
||||
|
||||
// Assign the output value to the next module
|
||||
module.outputs.map((outputItem) => {
|
||||
if (result[outputItem.key] === undefined) return;
|
||||
/* update output value */
|
||||
outputItem.value = result[outputItem.key];
|
||||
|
||||
/* update target */
|
||||
outputItem.targets.map((target: any) => {
|
||||
// find module
|
||||
const targetModule = runningModules.find((item) => item.moduleId === target.moduleId);
|
||||
if (!targetModule) return;
|
||||
|
||||
// push to running queue
|
||||
nextRunModules.push(targetModule);
|
||||
|
||||
// update input
|
||||
moduleInput(targetModule, { [target.key]: outputItem.value });
|
||||
});
|
||||
});
|
||||
|
||||
// Ensure the uniqueness of running modules
|
||||
const set = new Set<string>();
|
||||
const filterModules = nextRunModules.filter((module) => {
|
||||
if (set.has(module.moduleId)) return false;
|
||||
set.add(module.moduleId);
|
||||
return true;
|
||||
});
|
||||
|
||||
return checkModulesCanRun(filterModules);
|
||||
}
|
||||
function checkModulesCanRun(modules: RuntimeNodeItemType[] = []) {
|
||||
return Promise.all(
|
||||
modules.map((module) => {
|
||||
if (!module.inputs.find((item: any) => item.value === undefined)) {
|
||||
// remove switch
|
||||
moduleInput(module, { [NodeInputKeyEnum.switch]: undefined });
|
||||
return moduleRun(module);
|
||||
}
|
||||
})
|
||||
);
|
||||
}
|
||||
async function moduleRun(module: RuntimeNodeItemType): Promise<any> {
|
||||
if (res?.closed || props.maxRunTimes <= 0) return Promise.resolve();
|
||||
|
||||
if (res && stream && detail && module.showStatus) {
|
||||
responseStatus({
|
||||
res,
|
||||
name: module.name,
|
||||
status: 'running'
|
||||
});
|
||||
}
|
||||
|
||||
// get module running params
|
||||
const params: Record<string, any> = {};
|
||||
module.inputs.forEach((item) => {
|
||||
params[item.key] = valueTypeFormat(item.value, item.valueType);
|
||||
});
|
||||
|
||||
const dispatchData: ModuleDispatchProps<Record<string, any>> = {
|
||||
...props,
|
||||
res,
|
||||
variables,
|
||||
histories,
|
||||
user,
|
||||
stream,
|
||||
detail,
|
||||
module,
|
||||
runtimeModules: runningModules,
|
||||
params
|
||||
};
|
||||
|
||||
// run module
|
||||
const dispatchRes: Record<string, any> = await (async () => {
|
||||
if (callbackMap[module.flowType]) {
|
||||
return callbackMap[module.flowType](dispatchData);
|
||||
}
|
||||
return {};
|
||||
})();
|
||||
|
||||
// format response data. Add modulename and module type
|
||||
const formatResponseData: ChatHistoryItemResType = (() => {
|
||||
if (!dispatchRes[DispatchNodeResponseKeyEnum.nodeResponse]) return undefined;
|
||||
return {
|
||||
moduleName: module.name,
|
||||
moduleType: module.flowType,
|
||||
...dispatchRes[DispatchNodeResponseKeyEnum.nodeResponse]
|
||||
};
|
||||
})();
|
||||
|
||||
// Add output default value
|
||||
module.outputs.forEach((item) => {
|
||||
if (!item.required) return;
|
||||
if (dispatchRes[item.key] !== undefined) return;
|
||||
dispatchRes[item.key] = valueTypeFormat(item.defaultValue, item.valueType);
|
||||
});
|
||||
|
||||
// Pass userChatInput
|
||||
const hasUserChatInputTarget = !!module.outputs.find(
|
||||
(item) => item.key === NodeOutputKeyEnum.userChatInput
|
||||
)?.targets?.length;
|
||||
|
||||
return moduleOutput(module, {
|
||||
finish: true,
|
||||
[NodeOutputKeyEnum.userChatInput]: hasUserChatInputTarget
|
||||
? params[NodeOutputKeyEnum.userChatInput]
|
||||
: undefined,
|
||||
...dispatchRes,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: formatResponseData,
|
||||
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]:
|
||||
dispatchRes[DispatchNodeResponseKeyEnum.nodeDispatchUsages]
|
||||
});
|
||||
}
|
||||
// start process width initInput
|
||||
const initModules = runningModules.filter((item) => item.isEntry);
|
||||
// reset entry
|
||||
modules.forEach((item) => {
|
||||
item.isEntry = false;
|
||||
});
|
||||
// console.log(JSON.stringify(runningModules, null, 2));
|
||||
initModules.map((module) =>
|
||||
moduleInput(module, {
|
||||
...startParams,
|
||||
history: [] // abandon history field. History module will get histories from other fields.
|
||||
})
|
||||
);
|
||||
|
||||
await checkModulesCanRun(initModules);
|
||||
|
||||
// focus try to run pluginOutput
|
||||
const pluginOutputModule = runningModules.find(
|
||||
(item) => item.flowType === FlowNodeTypeEnum.pluginOutput
|
||||
);
|
||||
if (pluginOutputModule) {
|
||||
await moduleRun(pluginOutputModule);
|
||||
}
|
||||
|
||||
return {
|
||||
flowResponses: chatResponses,
|
||||
flowUsages: chatNodeUsages,
|
||||
[DispatchNodeResponseKeyEnum.assistantResponses]:
|
||||
concatAssistantResponseAnswerText(chatAssistantResponse),
|
||||
[DispatchNodeResponseKeyEnum.toolResponses]: toolRunResponse
|
||||
};
|
||||
}
|
||||
|
||||
/* init store modules to running modules */
|
||||
function loadModules(
|
||||
modules: FlowNodeItemType[],
|
||||
variables: Record<string, any>
|
||||
): RuntimeNodeItemType[] {
|
||||
return modules
|
||||
.filter((item) => {
|
||||
return ![FlowNodeTypeEnum.userGuide].includes(item.moduleId as any);
|
||||
})
|
||||
.map<RuntimeNodeItemType>((module) => {
|
||||
return {
|
||||
moduleId: module.moduleId,
|
||||
name: module.name,
|
||||
avatar: module.avatar,
|
||||
intro: module.intro,
|
||||
flowType: module.flowType,
|
||||
showStatus: module.showStatus,
|
||||
isEntry: module.isEntry,
|
||||
inputs: module.inputs
|
||||
.filter(
|
||||
/*
|
||||
1. system input must be save
|
||||
2. connected by source handle
|
||||
3. manual input value or have default value
|
||||
4. For the module connected by the tool, leave the toolDescription input
|
||||
*/
|
||||
(item) => {
|
||||
const isTool = checkTheModuleConnectedByTool(modules, module);
|
||||
|
||||
if (isTool && item.toolDescription) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return item.type === 'systemInput' || item.connected || item.value !== undefined;
|
||||
}
|
||||
) // filter unconnected target input
|
||||
.map((item) => {
|
||||
const replace = ['string'].includes(typeof item.value);
|
||||
|
||||
return {
|
||||
key: item.key,
|
||||
// variables replace
|
||||
value: replace ? replaceVariable(item.value, variables) : item.value,
|
||||
valueType: item.valueType,
|
||||
required: item.required,
|
||||
toolDescription: item.toolDescription
|
||||
};
|
||||
}),
|
||||
outputs: module.outputs
|
||||
.map((item) => ({
|
||||
key: item.key,
|
||||
required: item.required,
|
||||
defaultValue: item.defaultValue,
|
||||
answer: item.key === NodeOutputKeyEnum.answerText,
|
||||
value: undefined,
|
||||
valueType: item.valueType,
|
||||
targets: item.targets
|
||||
}))
|
||||
.sort((a, b) => {
|
||||
// finish output always at last
|
||||
if (a.key === NodeOutputKeyEnum.finish) return 1;
|
||||
if (b.key === NodeOutputKeyEnum.finish) return -1;
|
||||
return 0;
|
||||
})
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
/* sse response modules staus */
|
||||
export function responseStatus({
|
||||
res,
|
||||
status,
|
||||
name
|
||||
}: {
|
||||
res: NextApiResponse;
|
||||
status?: 'running' | 'finish';
|
||||
name?: string;
|
||||
}) {
|
||||
if (!name) return;
|
||||
responseWriteNodeStatus({
|
||||
res,
|
||||
name
|
||||
});
|
||||
}
|
||||
|
||||
/* get system variable */
|
||||
export function getSystemVariable({ timezone }: { timezone: string }) {
|
||||
return {
|
||||
cTime: getSystemTime(timezone)
|
||||
};
|
||||
}
|
||||
|
||||
export const concatAssistantResponseAnswerText = (response: AIChatItemValueItemType[]) => {
|
||||
const result: AIChatItemValueItemType[] = [];
|
||||
// 合并连续的text
|
||||
for (let i = 0; i < response.length; i++) {
|
||||
const item = response[i];
|
||||
if (item.type === ChatItemValueTypeEnum.text) {
|
||||
let text = item.text?.content || '';
|
||||
const lastItem = result[result.length - 1];
|
||||
if (lastItem && lastItem.type === ChatItemValueTypeEnum.text && lastItem.text?.content) {
|
||||
lastItem.text.content += text;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
result.push(item);
|
||||
}
|
||||
|
||||
return result;
|
||||
};
|
||||
|
|
@ -1,19 +0,0 @@
|
|||
import { NodeInputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
|
||||
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { getHistories } from '../utils';
|
||||
export type HistoryProps = ModuleDispatchProps<{
|
||||
maxContext?: number;
|
||||
[NodeInputKeyEnum.history]: ChatItemType[];
|
||||
}>;
|
||||
|
||||
export const dispatchHistory = (props: Record<string, any>) => {
|
||||
const {
|
||||
histories,
|
||||
params: { maxContext }
|
||||
} = props as HistoryProps;
|
||||
|
||||
return {
|
||||
history: getHistories(maxContext, histories)
|
||||
};
|
||||
};
|
||||
|
|
@ -1,14 +0,0 @@
|
|||
import { NodeInputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
export type UserChatInputProps = ModuleDispatchProps<{
|
||||
[NodeInputKeyEnum.userChatInput]: string;
|
||||
}>;
|
||||
|
||||
export const dispatchChatInput = (props: Record<string, any>) => {
|
||||
const {
|
||||
params: { userChatInput }
|
||||
} = props as UserChatInputProps;
|
||||
return {
|
||||
userChatInput
|
||||
};
|
||||
};
|
||||
|
|
@ -1,146 +0,0 @@
|
|||
// @ts-nocheck
|
||||
|
||||
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { dispatchWorkFlowV1 } from '../index';
|
||||
import { FlowNodeTypeEnum } from '@fastgpt/global/core/workflow/node/constant';
|
||||
import {
|
||||
FlowNodeTemplateTypeEnum,
|
||||
NodeInputKeyEnum
|
||||
} from '@fastgpt/global/core/workflow/constants';
|
||||
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import { splitCombinePluginId } from '../../../app/plugin/controller';
|
||||
import { setEntryEntries, DYNAMIC_INPUT_KEY } from '../utils';
|
||||
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { PluginRuntimeType, PluginTemplateType } from '@fastgpt/global/core/plugin/type';
|
||||
import { PluginSourceEnum } from '@fastgpt/global/core/plugin/constants';
|
||||
import { MongoPlugin } from '../../../plugin/schema';
|
||||
|
||||
type RunPluginProps = ModuleDispatchProps<{
|
||||
[NodeInputKeyEnum.pluginId]: string;
|
||||
[key: string]: any;
|
||||
}>;
|
||||
type RunPluginResponse = DispatchNodeResultType<{}>;
|
||||
|
||||
const getPluginTemplateById = async (id: string): Promise<PluginTemplateType> => {
|
||||
const { source, pluginId } = await splitCombinePluginId(id);
|
||||
if (source === PluginSourceEnum.community) {
|
||||
const item = global.communityPluginsV1?.find((plugin) => plugin.id === pluginId);
|
||||
|
||||
if (!item) return Promise.reject('plugin not found');
|
||||
|
||||
return item;
|
||||
}
|
||||
if (source === PluginSourceEnum.personal) {
|
||||
const item = await MongoPlugin.findById(id).lean();
|
||||
if (!item) return Promise.reject('plugin not found');
|
||||
return {
|
||||
id: String(item._id),
|
||||
teamId: String(item.teamId),
|
||||
name: item.name,
|
||||
avatar: item.avatar,
|
||||
intro: item.intro,
|
||||
showStatus: true,
|
||||
source: PluginSourceEnum.personal,
|
||||
modules: item.modules,
|
||||
templateType: FlowNodeTemplateTypeEnum.teamApp
|
||||
};
|
||||
}
|
||||
return Promise.reject('plugin not found');
|
||||
};
|
||||
|
||||
const getPluginRuntimeById = async (id: string): Promise<PluginRuntimeType> => {
|
||||
const plugin = await getPluginTemplateById(id);
|
||||
|
||||
return {
|
||||
teamId: plugin.teamId,
|
||||
name: plugin.name,
|
||||
avatar: plugin.avatar,
|
||||
showStatus: plugin.showStatus,
|
||||
modules: plugin.modules
|
||||
};
|
||||
};
|
||||
|
||||
export const dispatchRunPlugin = async (props: RunPluginProps): Promise<RunPluginResponse> => {
|
||||
const {
|
||||
mode,
|
||||
teamId,
|
||||
tmbId,
|
||||
params: { pluginId, ...data }
|
||||
} = props;
|
||||
|
||||
if (!pluginId) {
|
||||
return Promise.reject('pluginId can not find');
|
||||
}
|
||||
|
||||
const plugin = await getPluginRuntimeById(pluginId);
|
||||
if (plugin.teamId && plugin.teamId !== teamId) {
|
||||
return Promise.reject('plugin not found');
|
||||
}
|
||||
|
||||
// concat dynamic inputs
|
||||
const inputModule = plugin.modules.find((item) => item.flowType === FlowNodeTypeEnum.pluginInput);
|
||||
if (!inputModule) return Promise.reject('Plugin error, It has no set input.');
|
||||
const hasDynamicInput = inputModule.inputs.find((input) => input.key === DYNAMIC_INPUT_KEY);
|
||||
|
||||
const startParams: Record<string, any> = (() => {
|
||||
if (!hasDynamicInput) return data;
|
||||
|
||||
const params: Record<string, any> = {
|
||||
[DYNAMIC_INPUT_KEY]: {}
|
||||
};
|
||||
|
||||
for (const key in data) {
|
||||
const input = inputModule.inputs.find((input) => input.key === key);
|
||||
if (input) {
|
||||
params[key] = data[key];
|
||||
} else {
|
||||
params[DYNAMIC_INPUT_KEY][key] = data[key];
|
||||
}
|
||||
}
|
||||
|
||||
return params;
|
||||
})();
|
||||
|
||||
const { flowResponses, flowUsages, assistantResponses } = await dispatchWorkFlowV1({
|
||||
...props,
|
||||
modules: setEntryEntries(plugin.modules).map((module) => ({
|
||||
...module,
|
||||
showStatus: false
|
||||
})),
|
||||
runtimeModules: undefined, // must reset
|
||||
startParams
|
||||
});
|
||||
|
||||
const output = flowResponses.find((item) => item.moduleType === FlowNodeTypeEnum.pluginOutput);
|
||||
|
||||
if (output) {
|
||||
output.moduleLogo = plugin.avatar;
|
||||
}
|
||||
|
||||
return {
|
||||
assistantResponses,
|
||||
// responseData, // debug
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
moduleLogo: plugin.avatar,
|
||||
totalPoints: flowResponses.reduce((sum, item) => sum + (item.totalPoints || 0), 0),
|
||||
pluginOutput: output?.pluginOutput,
|
||||
pluginDetail:
|
||||
mode === 'test' && plugin.teamId === teamId
|
||||
? flowResponses.filter((item) => {
|
||||
const filterArr = [FlowNodeTypeEnum.pluginOutput];
|
||||
return !filterArr.includes(item.moduleType as any);
|
||||
})
|
||||
: undefined
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
|
||||
{
|
||||
moduleName: plugin.name,
|
||||
totalPoints: flowUsages.reduce((sum, item) => sum + (item.totalPoints || 0), 0),
|
||||
model: plugin.name,
|
||||
tokens: 0
|
||||
}
|
||||
],
|
||||
[DispatchNodeResponseKeyEnum.toolResponses]: output?.pluginOutput ? output.pluginOutput : {},
|
||||
...(output ? output.pluginOutput : {})
|
||||
};
|
||||
};
|
||||
|
|
@ -1,11 +0,0 @@
|
|||
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
|
||||
export type PluginInputProps = ModuleDispatchProps<{
|
||||
[key: string]: any;
|
||||
}>;
|
||||
|
||||
export const dispatchPluginInput = (props: PluginInputProps) => {
|
||||
const { params } = props;
|
||||
|
||||
return params;
|
||||
};
|
||||
|
|
@ -1,19 +0,0 @@
|
|||
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type.d';
|
||||
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
|
||||
export type PluginOutputProps = ModuleDispatchProps<{
|
||||
[key: string]: any;
|
||||
}>;
|
||||
export type PluginOutputResponse = DispatchNodeResultType<{}>;
|
||||
|
||||
export const dispatchPluginOutput = (props: PluginOutputProps): PluginOutputResponse => {
|
||||
const { params } = props;
|
||||
|
||||
return {
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
totalPoints: 0,
|
||||
pluginOutput: params
|
||||
}
|
||||
};
|
||||
};
|
||||
|
|
@ -1,37 +0,0 @@
|
|||
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import { responseWrite } from '../../../../common/response';
|
||||
import { textAdaptGptResponse } from '@fastgpt/global/core/workflow/runtime/utils';
|
||||
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
export type AnswerProps = ModuleDispatchProps<{
|
||||
text: string;
|
||||
}>;
|
||||
export type AnswerResponse = DispatchNodeResultType<{
|
||||
[NodeOutputKeyEnum.answerText]: string;
|
||||
}>;
|
||||
|
||||
export const dispatchAnswer = (props: Record<string, any>): AnswerResponse => {
|
||||
const {
|
||||
res,
|
||||
detail,
|
||||
stream,
|
||||
params: { text = '' }
|
||||
} = props as AnswerProps;
|
||||
|
||||
const formatText = typeof text === 'string' ? text : JSON.stringify(text, null, 2);
|
||||
|
||||
if (stream) {
|
||||
responseWrite({
|
||||
res,
|
||||
event: detail ? SseResponseEventEnum.fastAnswer : undefined,
|
||||
data: textAdaptGptResponse({
|
||||
text: `\n${formatText}`
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
return {
|
||||
[NodeOutputKeyEnum.answerText]: `\n${formatText}`
|
||||
};
|
||||
};
|
||||
|
|
@ -1,249 +0,0 @@
|
|||
// @ts-nocheck
|
||||
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import axios from 'axios';
|
||||
import { valueTypeFormat } from '../utils';
|
||||
import { SERVICE_LOCAL_HOST } from '../../../../common/system/tools';
|
||||
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { DYNAMIC_INPUT_KEY } from '../utils';
|
||||
|
||||
type HttpRequestProps = ModuleDispatchProps<{
|
||||
[NodeInputKeyEnum.abandon_httpUrl]: string;
|
||||
[NodeInputKeyEnum.httpMethod]: string;
|
||||
[NodeInputKeyEnum.httpReqUrl]: string;
|
||||
[NodeInputKeyEnum.httpHeaders]: string;
|
||||
[key: string]: any;
|
||||
}>;
|
||||
type HttpResponse = DispatchNodeResultType<{
|
||||
[NodeOutputKeyEnum.failed]?: boolean;
|
||||
[key: string]: any;
|
||||
}>;
|
||||
|
||||
const flatDynamicParams = (params: Record<string, any>) => {
|
||||
const dynamicParams = params[DYNAMIC_INPUT_KEY];
|
||||
if (!dynamicParams) return params;
|
||||
return {
|
||||
...params,
|
||||
...dynamicParams,
|
||||
[DYNAMIC_INPUT_KEY]: undefined
|
||||
};
|
||||
};
|
||||
|
||||
export const dispatchHttpRequest = async (props: HttpRequestProps): Promise<HttpResponse> => {
|
||||
let {
|
||||
appId,
|
||||
chatId,
|
||||
responseChatItemId,
|
||||
variables,
|
||||
module: { outputs },
|
||||
params: {
|
||||
system_httpMethod: httpMethod = 'POST',
|
||||
system_httpReqUrl: httpReqUrl,
|
||||
system_httpHeader: httpHeader,
|
||||
...body
|
||||
}
|
||||
} = props;
|
||||
|
||||
if (!httpReqUrl) {
|
||||
return Promise.reject('Http url is empty');
|
||||
}
|
||||
|
||||
body = flatDynamicParams(body);
|
||||
|
||||
const requestBody = {
|
||||
appId,
|
||||
chatId,
|
||||
responseChatItemId,
|
||||
variables,
|
||||
data: body
|
||||
};
|
||||
const requestQuery = {
|
||||
appId,
|
||||
chatId,
|
||||
...variables,
|
||||
...body
|
||||
};
|
||||
|
||||
const formatBody = transformFlatJson({ ...requestBody });
|
||||
|
||||
// parse header
|
||||
const headers = await (() => {
|
||||
try {
|
||||
if (!httpHeader) return {};
|
||||
return JSON.parse(httpHeader);
|
||||
} catch (error) {
|
||||
return Promise.reject('Header 为非法 JSON 格式');
|
||||
}
|
||||
})();
|
||||
|
||||
try {
|
||||
const response = await fetchData({
|
||||
method: httpMethod,
|
||||
url: httpReqUrl,
|
||||
headers,
|
||||
body: formatBody,
|
||||
query: requestQuery
|
||||
});
|
||||
|
||||
// format output value type
|
||||
const results: Record<string, any> = {};
|
||||
for (const key in response) {
|
||||
const output = outputs.find((item) => item.key === key);
|
||||
if (!output) continue;
|
||||
results[key] = valueTypeFormat(response[key], output.valueType);
|
||||
}
|
||||
|
||||
return {
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
totalPoints: 0,
|
||||
body: formatBody,
|
||||
httpResult: response
|
||||
},
|
||||
...results
|
||||
};
|
||||
} catch (error) {
|
||||
console.log(error);
|
||||
|
||||
return {
|
||||
[NodeOutputKeyEnum.failed]: true,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
totalPoints: 0,
|
||||
body: formatBody,
|
||||
httpResult: { error }
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
async function fetchData({
|
||||
method,
|
||||
url,
|
||||
headers,
|
||||
body,
|
||||
query
|
||||
}: {
|
||||
method: string;
|
||||
url: string;
|
||||
headers: Record<string, any>;
|
||||
body: Record<string, any>;
|
||||
query: Record<string, any>;
|
||||
}): Promise<Record<string, any>> {
|
||||
const { data: response } = await axios<Record<string, any>>({
|
||||
method,
|
||||
baseURL: `http://${SERVICE_LOCAL_HOST}`,
|
||||
url,
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
...headers
|
||||
},
|
||||
timeout: 360000,
|
||||
params: method === 'GET' ? query : {},
|
||||
data: method === 'POST' ? body : {}
|
||||
});
|
||||
|
||||
/*
|
||||
parse the json:
|
||||
{
|
||||
user: {
|
||||
name: 'xxx',
|
||||
age: 12
|
||||
},
|
||||
list: [
|
||||
{
|
||||
name: 'xxx',
|
||||
age: 50
|
||||
},
|
||||
[{ test: 22 }]
|
||||
],
|
||||
psw: 'xxx'
|
||||
}
|
||||
|
||||
result: {
|
||||
'user': { name: 'xxx', age: 12 },
|
||||
'user.name': 'xxx',
|
||||
'user.age': 12,
|
||||
'list': [ { name: 'xxx', age: 50 }, [ [Object] ] ],
|
||||
'list[0]': { name: 'xxx', age: 50 },
|
||||
'list[0].name': 'xxx',
|
||||
'list[0].age': 50,
|
||||
'list[1]': [ { test: 22 } ],
|
||||
'list[1][0]': { test: 22 },
|
||||
'list[1][0].test': 22,
|
||||
'psw': 'xxx'
|
||||
}
|
||||
*/
|
||||
const parseJson = (obj: Record<string, any>, prefix = '') => {
|
||||
let result: Record<string, any> = {};
|
||||
|
||||
if (Array.isArray(obj)) {
|
||||
for (let i = 0; i < obj.length; i++) {
|
||||
result[`${prefix}[${i}]`] = obj[i];
|
||||
|
||||
if (Array.isArray(obj[i])) {
|
||||
result = {
|
||||
...result,
|
||||
...parseJson(obj[i], `${prefix}[${i}]`)
|
||||
};
|
||||
} else if (typeof obj[i] === 'object') {
|
||||
result = {
|
||||
...result,
|
||||
...parseJson(obj[i], `${prefix}[${i}].`)
|
||||
};
|
||||
}
|
||||
}
|
||||
} else if (typeof obj == 'object') {
|
||||
for (const key in obj) {
|
||||
result[`${prefix}${key}`] = obj[key];
|
||||
|
||||
if (Array.isArray(obj[key])) {
|
||||
result = {
|
||||
...result,
|
||||
...parseJson(obj[key], `${prefix}${key}`)
|
||||
};
|
||||
} else if (typeof obj[key] === 'object') {
|
||||
result = {
|
||||
...result,
|
||||
...parseJson(obj[key], `${prefix}${key}.`)
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
};
|
||||
|
||||
return parseJson(response);
|
||||
}
|
||||
|
||||
function transformFlatJson(obj: Record<string, any>) {
|
||||
for (let key in obj) {
|
||||
if (typeof obj[key] === 'object') {
|
||||
transformFlatJson(obj[key]);
|
||||
}
|
||||
if (key.includes('.')) {
|
||||
let parts = key.split('.');
|
||||
if (parts.length <= 1) continue;
|
||||
|
||||
const firstKey = parts.shift();
|
||||
|
||||
if (!firstKey) continue;
|
||||
|
||||
const lastKey = parts.join('.');
|
||||
|
||||
if (obj[firstKey]) {
|
||||
obj[firstKey] = {
|
||||
...obj[firstKey],
|
||||
[lastKey]: obj[key]
|
||||
};
|
||||
} else {
|
||||
obj[firstKey] = { [lastKey]: obj[key] };
|
||||
}
|
||||
|
||||
transformFlatJson(obj[firstKey]);
|
||||
|
||||
delete obj[key];
|
||||
}
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
|
|
@ -1,294 +0,0 @@
|
|||
// @ts-nocheck
|
||||
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import axios from 'axios';
|
||||
import { DYNAMIC_INPUT_KEY, valueTypeFormat } from '../utils';
|
||||
import { SERVICE_LOCAL_HOST } from '../../../../common/system/tools';
|
||||
import { addLog } from '../../../../common/system/log';
|
||||
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { getErrText } from '@fastgpt/global/common/error/utils';
|
||||
|
||||
type PropsArrType = {
|
||||
key: string;
|
||||
type: string;
|
||||
value: string;
|
||||
};
|
||||
type HttpRequestProps = ModuleDispatchProps<{
|
||||
[NodeInputKeyEnum.abandon_httpUrl]: string;
|
||||
[NodeInputKeyEnum.httpMethod]: string;
|
||||
[NodeInputKeyEnum.httpReqUrl]: string;
|
||||
[NodeInputKeyEnum.httpHeaders]: PropsArrType[];
|
||||
[NodeInputKeyEnum.httpParams]: PropsArrType[];
|
||||
[NodeInputKeyEnum.httpJsonBody]: string;
|
||||
[DYNAMIC_INPUT_KEY]: Record<string, any>;
|
||||
[key: string]: any;
|
||||
}>;
|
||||
type HttpResponse = DispatchNodeResultType<{
|
||||
[NodeOutputKeyEnum.failed]?: boolean;
|
||||
[key: string]: any;
|
||||
}>;
|
||||
|
||||
const UNDEFINED_SIGN = 'UNDEFINED_SIGN';
|
||||
|
||||
export const dispatchHttp468Request = async (props: HttpRequestProps): Promise<HttpResponse> => {
|
||||
let {
|
||||
appId,
|
||||
chatId,
|
||||
responseChatItemId,
|
||||
variables,
|
||||
module: { outputs },
|
||||
histories,
|
||||
params: {
|
||||
system_httpMethod: httpMethod = 'POST',
|
||||
system_httpReqUrl: httpReqUrl,
|
||||
system_httpHeader: httpHeader,
|
||||
system_httpParams: httpParams = [],
|
||||
system_httpJsonBody: httpJsonBody,
|
||||
[DYNAMIC_INPUT_KEY]: dynamicInput,
|
||||
...body
|
||||
}
|
||||
} = props;
|
||||
|
||||
if (!httpReqUrl) {
|
||||
return Promise.reject('Http url is empty');
|
||||
}
|
||||
|
||||
const concatVariables = {
|
||||
appId,
|
||||
chatId,
|
||||
responseChatItemId,
|
||||
...variables,
|
||||
histories: histories.slice(-10),
|
||||
...body
|
||||
};
|
||||
|
||||
httpReqUrl = replaceVariable(httpReqUrl, concatVariables);
|
||||
// parse header
|
||||
const headers = await (() => {
|
||||
try {
|
||||
if (!httpHeader || httpHeader.length === 0) return {};
|
||||
// array
|
||||
return httpHeader.reduce((acc: Record<string, string>, item) => {
|
||||
const key = replaceVariable(item.key, concatVariables);
|
||||
const value = replaceVariable(item.value, concatVariables);
|
||||
acc[key] = valueTypeFormat(value, 'string');
|
||||
return acc;
|
||||
}, {});
|
||||
} catch (error) {
|
||||
return Promise.reject('Header 为非法 JSON 格式');
|
||||
}
|
||||
})();
|
||||
const params = httpParams.reduce((acc: Record<string, string>, item) => {
|
||||
const key = replaceVariable(item.key, concatVariables);
|
||||
const value = replaceVariable(item.value, concatVariables);
|
||||
acc[key] = valueTypeFormat(value, 'string');
|
||||
return acc;
|
||||
}, {});
|
||||
const requestBody = await (() => {
|
||||
if (!httpJsonBody) return { [DYNAMIC_INPUT_KEY]: dynamicInput };
|
||||
httpJsonBody = replaceVariable(httpJsonBody, concatVariables);
|
||||
try {
|
||||
const jsonParse = JSON.parse(httpJsonBody);
|
||||
const removeSignJson = removeUndefinedSign(jsonParse);
|
||||
return { [DYNAMIC_INPUT_KEY]: dynamicInput, ...removeSignJson };
|
||||
} catch (error) {
|
||||
console.log(error);
|
||||
return Promise.reject(`Invalid JSON body: ${httpJsonBody}`);
|
||||
}
|
||||
})();
|
||||
|
||||
try {
|
||||
const { formatResponse, rawResponse } = await fetchData({
|
||||
method: httpMethod,
|
||||
url: httpReqUrl,
|
||||
headers,
|
||||
body: requestBody,
|
||||
params
|
||||
});
|
||||
|
||||
// format output value type
|
||||
const results: Record<string, any> = {};
|
||||
for (const key in formatResponse) {
|
||||
const output = outputs.find((item) => item.key === key);
|
||||
if (!output) continue;
|
||||
results[key] = valueTypeFormat(formatResponse[key], output.valueType);
|
||||
}
|
||||
|
||||
return {
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
totalPoints: 0,
|
||||
params: Object.keys(params).length > 0 ? params : undefined,
|
||||
body: Object.keys(requestBody).length > 0 ? requestBody : undefined,
|
||||
headers: Object.keys(headers).length > 0 ? headers : undefined,
|
||||
httpResult: rawResponse
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.toolResponses]: results,
|
||||
[NodeOutputKeyEnum.httpRawResponse]: rawResponse,
|
||||
...results
|
||||
};
|
||||
} catch (error) {
|
||||
addLog.error('Http request error', error);
|
||||
return {
|
||||
[NodeOutputKeyEnum.failed]: true,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
totalPoints: 0,
|
||||
params: Object.keys(params).length > 0 ? params : undefined,
|
||||
body: Object.keys(requestBody).length > 0 ? requestBody : undefined,
|
||||
headers: Object.keys(headers).length > 0 ? headers : undefined,
|
||||
httpResult: { error: formatHttpError(error) }
|
||||
},
|
||||
[NodeOutputKeyEnum.httpRawResponse]: getErrText(error)
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
async function fetchData({
|
||||
method,
|
||||
url,
|
||||
headers,
|
||||
body,
|
||||
params
|
||||
}: {
|
||||
method: string;
|
||||
url: string;
|
||||
headers: Record<string, any>;
|
||||
body: Record<string, any>;
|
||||
params: Record<string, any>;
|
||||
}): Promise<Record<string, any>> {
|
||||
const { data: response } = await axios({
|
||||
method,
|
||||
baseURL: `http://${SERVICE_LOCAL_HOST}`,
|
||||
url,
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
...headers
|
||||
},
|
||||
timeout: 120000,
|
||||
params: params,
|
||||
data: ['POST', 'PUT', 'PATCH'].includes(method) ? body : undefined
|
||||
});
|
||||
|
||||
/*
|
||||
parse the json:
|
||||
{
|
||||
user: {
|
||||
name: 'xxx',
|
||||
age: 12
|
||||
},
|
||||
list: [
|
||||
{
|
||||
name: 'xxx',
|
||||
age: 50
|
||||
},
|
||||
[{ test: 22 }]
|
||||
],
|
||||
psw: 'xxx'
|
||||
}
|
||||
|
||||
result: {
|
||||
'user': { name: 'xxx', age: 12 },
|
||||
'user.name': 'xxx',
|
||||
'user.age': 12,
|
||||
'list': [ { name: 'xxx', age: 50 }, [ [Object] ] ],
|
||||
'list[0]': { name: 'xxx', age: 50 },
|
||||
'list[0].name': 'xxx',
|
||||
'list[0].age': 50,
|
||||
'list[1]': [ { test: 22 } ],
|
||||
'list[1][0]': { test: 22 },
|
||||
'list[1][0].test': 22,
|
||||
'psw': 'xxx'
|
||||
}
|
||||
*/
|
||||
const parseJson = (obj: Record<string, any>, prefix = '') => {
|
||||
let result: Record<string, any> = {};
|
||||
|
||||
if (Array.isArray(obj)) {
|
||||
for (let i = 0; i < obj.length; i++) {
|
||||
result[`${prefix}[${i}]`] = obj[i];
|
||||
|
||||
if (Array.isArray(obj[i])) {
|
||||
result = {
|
||||
...result,
|
||||
...parseJson(obj[i], `${prefix}[${i}]`)
|
||||
};
|
||||
} else if (typeof obj[i] === 'object') {
|
||||
result = {
|
||||
...result,
|
||||
...parseJson(obj[i], `${prefix}[${i}].`)
|
||||
};
|
||||
}
|
||||
}
|
||||
} else if (typeof obj == 'object') {
|
||||
for (const key in obj) {
|
||||
result[`${prefix}${key}`] = obj[key];
|
||||
|
||||
if (Array.isArray(obj[key])) {
|
||||
result = {
|
||||
...result,
|
||||
...parseJson(obj[key], `${prefix}${key}`)
|
||||
};
|
||||
} else if (typeof obj[key] === 'object') {
|
||||
result = {
|
||||
...result,
|
||||
...parseJson(obj[key], `${prefix}${key}.`)
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
};
|
||||
|
||||
return {
|
||||
formatResponse:
|
||||
typeof response === 'object' && !Array.isArray(response) ? parseJson(response) : {},
|
||||
rawResponse: response
|
||||
};
|
||||
}
|
||||
|
||||
function replaceVariable(text: string, obj: Record<string, any>) {
|
||||
for (const [key, value] of Object.entries(obj)) {
|
||||
if (value === undefined) {
|
||||
text = text.replace(new RegExp(`{{${key}}}`, 'g'), UNDEFINED_SIGN);
|
||||
} else {
|
||||
const replacement = JSON.stringify(value);
|
||||
const unquotedReplacement =
|
||||
replacement.startsWith('"') && replacement.endsWith('"')
|
||||
? replacement.slice(1, -1)
|
||||
: replacement;
|
||||
text = text.replace(new RegExp(`{{${key}}}`, 'g'), unquotedReplacement);
|
||||
}
|
||||
}
|
||||
return text || '';
|
||||
}
|
||||
function removeUndefinedSign(obj: Record<string, any>) {
|
||||
for (const key in obj) {
|
||||
if (obj[key] === UNDEFINED_SIGN) {
|
||||
obj[key] = undefined;
|
||||
} else if (Array.isArray(obj[key])) {
|
||||
obj[key] = obj[key].map((item: any) => {
|
||||
if (item === UNDEFINED_SIGN) {
|
||||
return undefined;
|
||||
} else if (typeof item === 'object') {
|
||||
removeUndefinedSign(item);
|
||||
}
|
||||
return item;
|
||||
});
|
||||
} else if (typeof obj[key] === 'object') {
|
||||
removeUndefinedSign(obj[key]);
|
||||
}
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
function formatHttpError(error: any) {
|
||||
return {
|
||||
message: error?.message,
|
||||
name: error?.name,
|
||||
method: error?.config?.method,
|
||||
baseURL: error?.config?.baseURL,
|
||||
url: error?.config?.url,
|
||||
code: error?.code,
|
||||
status: error?.status
|
||||
};
|
||||
}
|
||||
|
|
@ -1,77 +0,0 @@
|
|||
// @ts-nocheck
|
||||
import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
|
||||
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import { ModelTypeEnum, getLLMModel } from '../../../../core/ai/model';
|
||||
import { formatModelChars2Points } from '../../../../support/wallet/usage/utils';
|
||||
import { queryExtension } from '../../../../core/ai/functions/queryExtension';
|
||||
import { getHistories } from '../utils';
|
||||
import { hashStr } from '@fastgpt/global/common/string/tools';
|
||||
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
|
||||
type Props = ModuleDispatchProps<{
|
||||
[NodeInputKeyEnum.aiModel]: string;
|
||||
[NodeInputKeyEnum.aiSystemPrompt]?: string;
|
||||
[NodeInputKeyEnum.history]?: ChatItemType[] | number;
|
||||
[NodeInputKeyEnum.userChatInput]: string;
|
||||
}>;
|
||||
type Response = DispatchNodeResultType<{
|
||||
[NodeOutputKeyEnum.text]: string;
|
||||
}>;
|
||||
|
||||
export const dispatchQueryExtension = async ({
|
||||
histories,
|
||||
module,
|
||||
params: { model, systemPrompt, history, userChatInput }
|
||||
}: Props): Promise<Response> => {
|
||||
if (!userChatInput) {
|
||||
return Promise.reject('Question is empty');
|
||||
}
|
||||
|
||||
const queryExtensionModel = getLLMModel(model);
|
||||
const chatHistories = getHistories(history, histories);
|
||||
|
||||
const { extensionQueries, tokens } = await queryExtension({
|
||||
chatBg: systemPrompt,
|
||||
query: userChatInput,
|
||||
histories: chatHistories,
|
||||
model: queryExtensionModel.model
|
||||
});
|
||||
|
||||
extensionQueries.unshift(userChatInput);
|
||||
|
||||
const { totalPoints, modelName } = formatModelChars2Points({
|
||||
model: queryExtensionModel.model,
|
||||
tokens,
|
||||
modelType: ModelTypeEnum.llm
|
||||
});
|
||||
|
||||
const set = new Set<string>();
|
||||
const filterSameQueries = extensionQueries.filter((item) => {
|
||||
// 删除所有的标点符号与空格等,只对文本进行比较
|
||||
const str = hashStr(item.replace(/[^\p{L}\p{N}]/gu, ''));
|
||||
if (set.has(str)) return false;
|
||||
set.add(str);
|
||||
return true;
|
||||
});
|
||||
|
||||
return {
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
totalPoints,
|
||||
model: modelName,
|
||||
tokens,
|
||||
query: userChatInput,
|
||||
textOutput: JSON.stringify(filterSameQueries)
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
|
||||
{
|
||||
moduleName: module.name,
|
||||
totalPoints,
|
||||
model: modelName,
|
||||
tokens
|
||||
}
|
||||
],
|
||||
[NodeOutputKeyEnum.text]: JSON.stringify(filterSameQueries)
|
||||
};
|
||||
};
|
||||
|
|
@ -1,209 +0,0 @@
|
|||
// @ts-nocheck
|
||||
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import axios from 'axios';
|
||||
import { DYNAMIC_INPUT_KEY, valueTypeFormat } from '../utils';
|
||||
import { SERVICE_LOCAL_HOST } from '../../../../common/system/tools';
|
||||
import { addLog } from '../../../../common/system/log';
|
||||
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
|
||||
type LafRequestProps = ModuleDispatchProps<{
|
||||
[NodeInputKeyEnum.httpReqUrl]: string;
|
||||
[DYNAMIC_INPUT_KEY]: Record<string, any>;
|
||||
[key: string]: any;
|
||||
}>;
|
||||
type LafResponse = DispatchNodeResultType<{
|
||||
[NodeOutputKeyEnum.failed]?: boolean;
|
||||
[key: string]: any;
|
||||
}>;
|
||||
|
||||
const UNDEFINED_SIGN = 'UNDEFINED_SIGN';
|
||||
|
||||
export const dispatchLafRequest = async (props: LafRequestProps): Promise<LafResponse> => {
|
||||
let {
|
||||
appId,
|
||||
chatId,
|
||||
responseChatItemId,
|
||||
variables,
|
||||
module: { outputs },
|
||||
histories,
|
||||
params: { system_httpReqUrl: httpReqUrl, [DYNAMIC_INPUT_KEY]: dynamicInput, ...body }
|
||||
} = props;
|
||||
|
||||
if (!httpReqUrl) {
|
||||
return Promise.reject('Http url is empty');
|
||||
}
|
||||
|
||||
const concatVariables = {
|
||||
appId,
|
||||
chatId,
|
||||
responseChatItemId,
|
||||
...variables,
|
||||
...body
|
||||
};
|
||||
|
||||
httpReqUrl = replaceVariable(httpReqUrl, concatVariables);
|
||||
|
||||
const requestBody = {
|
||||
systemParams: {
|
||||
appId,
|
||||
chatId,
|
||||
responseChatItemId,
|
||||
histories: histories.slice(0, 10)
|
||||
},
|
||||
variables,
|
||||
...dynamicInput,
|
||||
...body
|
||||
};
|
||||
|
||||
try {
|
||||
const { formatResponse, rawResponse } = await fetchData({
|
||||
method: 'POST',
|
||||
url: httpReqUrl,
|
||||
body: requestBody
|
||||
});
|
||||
|
||||
// format output value type
|
||||
const results: Record<string, any> = {};
|
||||
for (const key in formatResponse) {
|
||||
const output = outputs.find((item) => item.key === key);
|
||||
if (!output) continue;
|
||||
results[key] = valueTypeFormat(formatResponse[key], output.valueType);
|
||||
}
|
||||
|
||||
return {
|
||||
assistantResponses: [],
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
totalPoints: 0,
|
||||
body: Object.keys(requestBody).length > 0 ? requestBody : undefined,
|
||||
httpResult: rawResponse
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.toolResponses]: rawResponse,
|
||||
[NodeOutputKeyEnum.httpRawResponse]: rawResponse,
|
||||
...results
|
||||
};
|
||||
} catch (error) {
|
||||
addLog.error('Http request error', error);
|
||||
return {
|
||||
[NodeOutputKeyEnum.failed]: true,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
totalPoints: 0,
|
||||
body: Object.keys(requestBody).length > 0 ? requestBody : undefined,
|
||||
httpResult: { error: formatHttpError(error) }
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
async function fetchData({
|
||||
method,
|
||||
url,
|
||||
body
|
||||
}: {
|
||||
method: string;
|
||||
url: string;
|
||||
body: Record<string, any>;
|
||||
}): Promise<Record<string, any>> {
|
||||
const { data: response } = await axios({
|
||||
method,
|
||||
baseURL: `http://${SERVICE_LOCAL_HOST}`,
|
||||
url,
|
||||
headers: {
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
data: body
|
||||
});
|
||||
|
||||
const parseJson = (obj: Record<string, any>, prefix = '') => {
|
||||
let result: Record<string, any> = {};
|
||||
|
||||
if (Array.isArray(obj)) {
|
||||
for (let i = 0; i < obj.length; i++) {
|
||||
result[`${prefix}[${i}]`] = obj[i];
|
||||
|
||||
if (Array.isArray(obj[i])) {
|
||||
result = {
|
||||
...result,
|
||||
...parseJson(obj[i], `${prefix}[${i}]`)
|
||||
};
|
||||
} else if (typeof obj[i] === 'object') {
|
||||
result = {
|
||||
...result,
|
||||
...parseJson(obj[i], `${prefix}[${i}].`)
|
||||
};
|
||||
}
|
||||
}
|
||||
} else if (typeof obj == 'object') {
|
||||
for (const key in obj) {
|
||||
result[`${prefix}${key}`] = obj[key];
|
||||
|
||||
if (Array.isArray(obj[key])) {
|
||||
result = {
|
||||
...result,
|
||||
...parseJson(obj[key], `${prefix}${key}`)
|
||||
};
|
||||
} else if (typeof obj[key] === 'object') {
|
||||
result = {
|
||||
...result,
|
||||
...parseJson(obj[key], `${prefix}${key}.`)
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
};
|
||||
|
||||
return {
|
||||
formatResponse:
|
||||
typeof response === 'object' && !Array.isArray(response) ? parseJson(response) : {},
|
||||
rawResponse: response
|
||||
};
|
||||
}
|
||||
|
||||
function replaceVariable(text: string, obj: Record<string, any>) {
|
||||
for (const [key, value] of Object.entries(obj)) {
|
||||
if (value === undefined) {
|
||||
text = text.replace(new RegExp(`{{${key}}}`, 'g'), UNDEFINED_SIGN);
|
||||
} else {
|
||||
const replacement = JSON.stringify(value);
|
||||
const unquotedReplacement =
|
||||
replacement.startsWith('"') && replacement.endsWith('"')
|
||||
? replacement.slice(1, -1)
|
||||
: replacement;
|
||||
text = text.replace(new RegExp(`{{${key}}}`, 'g'), unquotedReplacement);
|
||||
}
|
||||
}
|
||||
return text || '';
|
||||
}
|
||||
function removeUndefinedSign(obj: Record<string, any>) {
|
||||
for (const key in obj) {
|
||||
if (obj[key] === UNDEFINED_SIGN) {
|
||||
obj[key] = undefined;
|
||||
} else if (Array.isArray(obj[key])) {
|
||||
obj[key] = obj[key].map((item: any) => {
|
||||
if (item === UNDEFINED_SIGN) {
|
||||
return undefined;
|
||||
} else if (typeof item === 'object') {
|
||||
removeUndefinedSign(item);
|
||||
}
|
||||
return item;
|
||||
});
|
||||
} else if (typeof obj[key] === 'object') {
|
||||
removeUndefinedSign(obj[key]);
|
||||
}
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
function formatHttpError(error: any) {
|
||||
return {
|
||||
message: error?.message,
|
||||
name: error?.name,
|
||||
method: error?.config?.method,
|
||||
baseURL: error?.config?.baseURL,
|
||||
url: error?.config?.url,
|
||||
code: error?.code,
|
||||
status: error?.status
|
||||
};
|
||||
}
|
||||
|
|
@ -1,16 +0,0 @@
|
|||
import {
|
||||
AIChatItemValueItemType,
|
||||
ChatHistoryItemResType,
|
||||
ChatItemValueItemType,
|
||||
ToolRunResponseItemType
|
||||
} from '@fastgpt/global/core/chat/type';
|
||||
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
|
||||
|
||||
export type DispatchFlowResponse = {
|
||||
flowResponses: ChatHistoryItemResType[];
|
||||
flowUsages: ChatNodeUsageType[];
|
||||
[DispatchNodeResponseKeyEnum.toolResponses]: ToolRunResponseItemType;
|
||||
[DispatchNodeResponseKeyEnum.assistantResponses]: AIChatItemValueItemType[];
|
||||
newVariables: Record<string, string>;
|
||||
};
|
||||
|
|
@ -1,66 +0,0 @@
|
|||
// @ts-nocheck
|
||||
import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
|
||||
import {
|
||||
WorkflowIOValueTypeEnum,
|
||||
NodeOutputKeyEnum
|
||||
} from '@fastgpt/global/core/workflow/constants';
|
||||
import { FlowNodeTypeEnum } from '@fastgpt/global/core/workflow/node/constant';
|
||||
import { FlowNodeItemType, StoreNodeItemType } from '@fastgpt/global/core/workflow/type/node';
|
||||
export const DYNAMIC_INPUT_KEY = 'DYNAMIC_INPUT_KEY';
|
||||
|
||||
export const setEntryEntries = (modules: StoreNodeItemType[]) => {
|
||||
const initRunningModuleType: Record<string, boolean> = {
|
||||
questionInput: true,
|
||||
[FlowNodeTypeEnum.pluginInput]: true
|
||||
};
|
||||
|
||||
modules.forEach((item) => {
|
||||
if (initRunningModuleType[item.flowType]) {
|
||||
item.isEntry = true;
|
||||
}
|
||||
});
|
||||
return modules;
|
||||
};
|
||||
|
||||
export const checkTheModuleConnectedByTool = (
|
||||
modules: FlowNodeItemType[],
|
||||
module: FlowNodeItemType
|
||||
) => {
|
||||
let sign = false;
|
||||
const toolModules = modules.filter((item) => item.flowType === FlowNodeTypeEnum.tools);
|
||||
|
||||
toolModules.forEach((item) => {
|
||||
const toolOutput = item.outputs.find(
|
||||
(output) => output.key === NodeOutputKeyEnum.selectedTools
|
||||
);
|
||||
toolOutput?.targets.forEach((target) => {
|
||||
if (target.moduleId === module.moduleId) {
|
||||
sign = true;
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
return sign;
|
||||
};
|
||||
|
||||
export const getHistories = (history?: ChatItemType[] | number, histories: ChatItemType[] = []) => {
|
||||
if (!history) return [];
|
||||
if (typeof history === 'number') return histories.slice(-history);
|
||||
if (Array.isArray(history)) return history;
|
||||
|
||||
return [];
|
||||
};
|
||||
|
||||
/* value type format */
|
||||
export const valueTypeFormat = (value: any, type?: `${WorkflowIOValueTypeEnum}`) => {
|
||||
if (value === undefined) return;
|
||||
|
||||
if (type === 'string') {
|
||||
if (typeof value !== 'object') return String(value);
|
||||
return JSON.stringify(value);
|
||||
}
|
||||
if (type === 'number') return Number(value);
|
||||
if (type === 'boolean') return Boolean(value);
|
||||
|
||||
return value;
|
||||
};
|
||||
|
|
@ -50,6 +50,9 @@ const MySelect = <T = any,>(
|
|||
}>
|
||||
) => {
|
||||
const ButtonRef = useRef<HTMLButtonElement>(null);
|
||||
const MenuListRef = useRef<HTMLDivElement>(null);
|
||||
const SelectedItemRef = useRef<HTMLDivElement>(null);
|
||||
|
||||
const menuItemStyles: MenuItemProps = {
|
||||
borderRadius: 'sm',
|
||||
py: 2,
|
||||
|
|
@ -71,6 +74,14 @@ const MySelect = <T = any,>(
|
|||
}
|
||||
}));
|
||||
|
||||
useEffect(() => {
|
||||
if (isOpen && MenuListRef.current && SelectedItemRef.current) {
|
||||
const menu = MenuListRef.current;
|
||||
const selectedItem = SelectedItemRef.current;
|
||||
menu.scrollTop = selectedItem.offsetTop - menu.offsetTop - 100;
|
||||
}
|
||||
}, [isOpen]);
|
||||
|
||||
return (
|
||||
<Box
|
||||
css={css({
|
||||
|
|
@ -113,6 +124,7 @@ const MySelect = <T = any,>(
|
|||
</MenuButton>
|
||||
|
||||
<MenuList
|
||||
ref={MenuListRef}
|
||||
className={props.className}
|
||||
minW={(() => {
|
||||
const w = ButtonRef.current?.clientWidth;
|
||||
|
|
@ -140,6 +152,7 @@ const MySelect = <T = any,>(
|
|||
{...menuItemStyles}
|
||||
{...(value === item.value
|
||||
? {
|
||||
ref: SelectedItemRef,
|
||||
color: 'primary.600',
|
||||
bg: 'myGray.100'
|
||||
}
|
||||
|
|
|
|||
|
|
@ -45,7 +45,11 @@ const NodeInputSelect = ({
|
|||
{
|
||||
type: FlowNodeInputTypeEnum.switch,
|
||||
icon: FlowNodeInputMap[FlowNodeInputTypeEnum.switch].icon,
|
||||
|
||||
title: t('common:core.workflow.inputType.Manual select')
|
||||
},
|
||||
{
|
||||
type: FlowNodeInputTypeEnum.select,
|
||||
icon: FlowNodeInputMap[FlowNodeInputTypeEnum.select].icon,
|
||||
title: t('common:core.workflow.inputType.Manual select')
|
||||
},
|
||||
{
|
||||
|
|
|
|||
|
|
@ -902,6 +902,7 @@
|
|||
"System Plugin": "System",
|
||||
"System input module": "System input",
|
||||
"Team Plugin": "Team",
|
||||
"Team app": "Team",
|
||||
"Tool module": "Tool",
|
||||
"UnKnow Module": "Unknown module",
|
||||
"http body placeholder": "Same syntax as APIFox"
|
||||
|
|
@ -1162,6 +1163,7 @@
|
|||
"Please bind laf accout first": "Please bind laf account first",
|
||||
"Plugin List": "Plugin list",
|
||||
"Search plugin": "Search plugin",
|
||||
"Search_app": "Search app",
|
||||
"Set Name": "Name the plugin",
|
||||
"contribute": "Contribute plugins",
|
||||
"go to laf": "Go to write",
|
||||
|
|
|
|||
|
|
@ -902,6 +902,7 @@
|
|||
"System Plugin": "系统插件",
|
||||
"System input module": "系统输入",
|
||||
"Team Plugin": "团队插件",
|
||||
"Team app": "团队应用",
|
||||
"Tool module": "工具",
|
||||
"UnKnow Module": "未知模块",
|
||||
"http body placeholder": "与 Apifox 相同的语法"
|
||||
|
|
@ -1162,6 +1163,7 @@
|
|||
"Please bind laf accout first": "请先绑定 laf 账号",
|
||||
"Plugin List": "插件列表",
|
||||
"Search plugin": "搜索插件",
|
||||
"Search_app": "搜索应用",
|
||||
"Set Name": "给插件取个名字",
|
||||
"contribute": "贡献插件",
|
||||
"go to laf": "去编写",
|
||||
|
|
|
|||
|
|
@ -114,7 +114,7 @@ export const onCreateApp = async ({
|
|||
type,
|
||||
version: 'v2',
|
||||
pluginData,
|
||||
...(type === AppTypeEnum.plugin && { 'pluginData.nodeVersion': defaultNodeVersion })
|
||||
'pluginData.nodeVersion': defaultNodeVersion
|
||||
}
|
||||
],
|
||||
{ session }
|
||||
|
|
|
|||
|
|
@ -56,7 +56,7 @@ async function handler(req: NextApiRequest, res: NextApiResponse<any>): Promise<
|
|||
scheduledTriggerNextTime: chatConfig?.scheduledTriggerConfig?.cronString
|
||||
? getNextTimeByCronStringAndTimezone(chatConfig.scheduledTriggerConfig)
|
||||
: null,
|
||||
...(app.type === AppTypeEnum.plugin && { 'pluginData.nodeVersion': _id })
|
||||
'pluginData.nodeVersion': _id
|
||||
},
|
||||
{
|
||||
session
|
||||
|
|
|
|||
|
|
@ -73,7 +73,7 @@ async function handler(req: NextApiRequest, res: NextApiResponse<any>): Promise<
|
|||
scheduledTriggerNextTime: scheduledTriggerConfig?.cronString
|
||||
? getNextTimeByCronStringAndTimezone(scheduledTriggerConfig)
|
||||
: null,
|
||||
...(app.type === AppTypeEnum.plugin && { 'pluginData.nodeVersion': _id })
|
||||
'pluginData.nodeVersion': _id
|
||||
});
|
||||
});
|
||||
|
||||
|
|
|
|||
|
|
@ -28,6 +28,8 @@ import {
|
|||
storeNodes2RuntimeNodes
|
||||
} from '@fastgpt/global/core/workflow/runtime/utils';
|
||||
import { StoreNodeItemType } from '@fastgpt/global/core/workflow/type/node';
|
||||
import { getWorkflowResponseWrite } from '@fastgpt/service/core/workflow/dispatch/utils';
|
||||
import { getNanoid } from '@fastgpt/global/common/string/tools';
|
||||
|
||||
export type Props = {
|
||||
messages: ChatCompletionMessageParam[];
|
||||
|
|
@ -95,6 +97,12 @@ async function handler(req: NextApiRequest, res: NextApiResponse) {
|
|||
}
|
||||
|
||||
runtimeNodes = rewriteNodeOutputByHistories(chatMessages, runtimeNodes);
|
||||
const workflowResponseWrite = getWorkflowResponseWrite({
|
||||
res,
|
||||
detail: true,
|
||||
streamResponse: true,
|
||||
id: getNanoid(24)
|
||||
});
|
||||
|
||||
/* start process */
|
||||
const { flowResponses, flowUsages } = await dispatchWorkFlow({
|
||||
|
|
@ -112,8 +120,8 @@ async function handler(req: NextApiRequest, res: NextApiResponse) {
|
|||
chatConfig,
|
||||
histories: chatMessages,
|
||||
stream: true,
|
||||
detail: true,
|
||||
maxRunTimes: 200
|
||||
maxRunTimes: 200,
|
||||
workflowStreamResponse: workflowResponseWrite
|
||||
});
|
||||
|
||||
responseWrite({
|
||||
|
|
|
|||
|
|
@ -54,7 +54,6 @@ async function handler(
|
|||
chatConfig: defaultApp.chatConfig,
|
||||
histories: [],
|
||||
stream: false,
|
||||
detail: true,
|
||||
maxRunTimes: 200
|
||||
});
|
||||
|
||||
|
|
|
|||
|
|
@ -48,8 +48,6 @@ import { OutLinkChatAuthProps } from '@fastgpt/global/support/permission/chat';
|
|||
import { UserChatItemType } from '@fastgpt/global/core/chat/type';
|
||||
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
|
||||
import { dispatchWorkFlowV1 } from '@fastgpt/service/core/workflow/dispatchV1';
|
||||
import { setEntryEntries } from '@fastgpt/service/core/workflow/dispatchV1/utils';
|
||||
import { NextAPI } from '@/service/middleware/entry';
|
||||
import { getAppLatestVersion } from '@fastgpt/service/core/app/controller';
|
||||
import { ReadPermissionVal } from '@fastgpt/global/support/permission/constant';
|
||||
|
|
@ -65,6 +63,7 @@ import {
|
|||
} from '@fastgpt/global/core/app/plugin/utils';
|
||||
import { getSystemTime } from '@fastgpt/global/common/time/timezone';
|
||||
import { rewriteNodeOutputByHistories } from '@fastgpt/global/core/workflow/runtime/utils';
|
||||
import { getWorkflowResponseWrite } from '@fastgpt/service/core/workflow/dispatch/utils';
|
||||
|
||||
type FastGptWebChatProps = {
|
||||
chatId?: string; // undefined: get histories from messages, '': new chat, 'xxxxx': get histories from db
|
||||
|
|
@ -243,6 +242,13 @@ async function handler(req: NextApiRequest, res: NextApiResponse) {
|
|||
|
||||
runtimeNodes = rewriteNodeOutputByHistories(newHistories, runtimeNodes);
|
||||
|
||||
const workflowResponseWrite = getWorkflowResponseWrite({
|
||||
res,
|
||||
detail,
|
||||
streamResponse: stream,
|
||||
id: chatId || getNanoid(24)
|
||||
});
|
||||
|
||||
/* start flow controller */
|
||||
const { flowResponses, flowUsages, assistantResponses, newVariables } = await (async () => {
|
||||
if (app.version === 'v2') {
|
||||
|
|
@ -263,31 +269,11 @@ async function handler(req: NextApiRequest, res: NextApiResponse) {
|
|||
chatConfig,
|
||||
histories: newHistories,
|
||||
stream,
|
||||
detail,
|
||||
maxRunTimes: 200
|
||||
maxRunTimes: 200,
|
||||
workflowStreamResponse: workflowResponseWrite
|
||||
});
|
||||
}
|
||||
return dispatchWorkFlowV1({
|
||||
res,
|
||||
mode: 'chat',
|
||||
user,
|
||||
teamId: String(teamId),
|
||||
tmbId: String(tmbId),
|
||||
appId: String(app._id),
|
||||
chatId,
|
||||
responseChatItemId,
|
||||
//@ts-ignore
|
||||
modules: setEntryEntries(app.modules),
|
||||
variables,
|
||||
inputFiles: files,
|
||||
histories: newHistories,
|
||||
startParams: {
|
||||
userChatInput: text
|
||||
},
|
||||
stream,
|
||||
detail,
|
||||
maxRunTimes: 200
|
||||
});
|
||||
return Promise.reject('请升级工作流');
|
||||
})();
|
||||
|
||||
// save chat
|
||||
|
|
@ -346,9 +332,8 @@ async function handler(req: NextApiRequest, res: NextApiResponse) {
|
|||
: filterPublicNodeResponseData({ flowResponses });
|
||||
|
||||
if (stream) {
|
||||
responseWrite({
|
||||
res,
|
||||
event: detail ? SseResponseEventEnum.answer : undefined,
|
||||
workflowResponseWrite({
|
||||
event: SseResponseEventEnum.answer,
|
||||
data: textAdaptGptResponse({
|
||||
text: null,
|
||||
finish_reason: 'stop'
|
||||
|
|
@ -362,10 +347,9 @@ async function handler(req: NextApiRequest, res: NextApiResponse) {
|
|||
|
||||
if (detail) {
|
||||
if (responseDetail || isPlugin) {
|
||||
responseWrite({
|
||||
res,
|
||||
workflowResponseWrite({
|
||||
event: SseResponseEventEnum.flowResponses,
|
||||
data: JSON.stringify(feResponseData)
|
||||
data: feResponseData
|
||||
});
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -142,16 +142,15 @@ const NodeTemplatesModal = ({ isOpen, onClose }: ModuleTemplateListProps) => {
|
|||
searchVal?: string;
|
||||
}) => {
|
||||
if (type === TemplateTypeEnum.teamPlugin) {
|
||||
const plugins = await getTeamPlugTemplates({
|
||||
const teamApps = await getTeamPlugTemplates({
|
||||
parentId,
|
||||
searchKey: searchVal,
|
||||
type: [AppTypeEnum.folder, AppTypeEnum.httpPlugin, AppTypeEnum.plugin]
|
||||
searchKey: searchVal
|
||||
}).then((res) => res.filter((app) => app.id !== appId));
|
||||
|
||||
return plugins.map<NodeTemplateListItemType>((plugin) => {
|
||||
const member = members.find((member) => member.tmbId === plugin.tmbId);
|
||||
return teamApps.map<NodeTemplateListItemType>((app) => {
|
||||
const member = members.find((member) => member.tmbId === app.tmbId);
|
||||
return {
|
||||
...plugin,
|
||||
...app,
|
||||
author: member?.memberName,
|
||||
authorAvatar: member?.avatar
|
||||
};
|
||||
|
|
@ -266,7 +265,7 @@ const NodeTemplatesModal = ({ isOpen, onClose }: ModuleTemplateListProps) => {
|
|||
},
|
||||
{
|
||||
icon: 'core/modules/teamPlugin',
|
||||
label: t('common:core.module.template.Team Plugin'),
|
||||
label: t('common:core.module.template.Team app'),
|
||||
value: TemplateTypeEnum.teamPlugin
|
||||
}
|
||||
]}
|
||||
|
|
@ -302,7 +301,11 @@ const NodeTemplatesModal = ({ isOpen, onClose }: ModuleTemplateListProps) => {
|
|||
<Input
|
||||
h={'full'}
|
||||
bg={'myGray.50'}
|
||||
placeholder={t('common:plugin.Search plugin')}
|
||||
placeholder={
|
||||
templateType === TemplateTypeEnum.teamPlugin
|
||||
? t('common:plugin.Search_app')
|
||||
: t('common:plugin.Search plugin')
|
||||
}
|
||||
onChange={(e) => setSearchKey(e.target.value)}
|
||||
/>
|
||||
</InputGroup>
|
||||
|
|
@ -424,7 +427,10 @@ const RenderList = React.memo(function RenderList({
|
|||
const templateNode = await (async () => {
|
||||
try {
|
||||
// get plugin preview module
|
||||
if (template.flowNodeType === FlowNodeTypeEnum.pluginModule) {
|
||||
if (
|
||||
template.flowNodeType === FlowNodeTypeEnum.pluginModule ||
|
||||
template.flowNodeType === FlowNodeTypeEnum.appModule
|
||||
) {
|
||||
setLoading(true);
|
||||
const res = await getPreviewPluginNode({ appId: template.id });
|
||||
|
||||
|
|
|
|||
|
|
@ -38,6 +38,7 @@ const nodeTypes: Record<FlowNodeTypeEnum, any> = {
|
|||
[FlowNodeTypeEnum.contentExtract]: dynamic(() => import('./nodes/NodeExtract')),
|
||||
[FlowNodeTypeEnum.httpRequest468]: dynamic(() => import('./nodes/NodeHttp')),
|
||||
[FlowNodeTypeEnum.runApp]: NodeSimple,
|
||||
[FlowNodeTypeEnum.appModule]: NodeSimple,
|
||||
[FlowNodeTypeEnum.pluginInput]: dynamic(() => import('./nodes/NodePluginIO/PluginInput')),
|
||||
[FlowNodeTypeEnum.pluginOutput]: dynamic(() => import('./nodes/NodePluginIO/PluginOutput')),
|
||||
[FlowNodeTypeEnum.pluginModule]: NodeSimple,
|
||||
|
|
|
|||
|
|
@ -19,7 +19,6 @@ import { storeNode2FlowNode, getLatestNodeTemplate } from '@/web/core/workflow/u
|
|||
import { getNanoid } from '@fastgpt/global/common/string/tools';
|
||||
import { useContextSelector } from 'use-context-selector';
|
||||
import { WorkflowContext } from '../../../context';
|
||||
import { useI18n } from '@/web/context/I18n';
|
||||
import { moduleTemplatesFlat } from '@fastgpt/global/core/workflow/template/constants';
|
||||
import { QuestionOutlineIcon } from '@chakra-ui/icons';
|
||||
import MyTooltip from '@fastgpt/web/components/common/MyTooltip';
|
||||
|
|
@ -84,7 +83,10 @@ const NodeCard = (props: Props) => {
|
|||
|
||||
const { data: nodeTemplate, runAsync: getNodeLatestTemplate } = useRequest2(
|
||||
async () => {
|
||||
if (node?.flowNodeType === FlowNodeTypeEnum.pluginModule) {
|
||||
if (
|
||||
node?.flowNodeType === FlowNodeTypeEnum.pluginModule ||
|
||||
node?.flowNodeType === FlowNodeTypeEnum.appModule
|
||||
) {
|
||||
if (!node?.pluginId) return;
|
||||
const template = await getPreviewPluginNode({ appId: node.pluginId });
|
||||
|
||||
|
|
@ -115,7 +117,10 @@ const NodeCard = (props: Props) => {
|
|||
const template = moduleTemplatesFlat.find((item) => item.flowNodeType === node?.flowNodeType);
|
||||
if (!node || !template) return;
|
||||
|
||||
if (node?.flowNodeType === FlowNodeTypeEnum.pluginModule) {
|
||||
if (
|
||||
node?.flowNodeType === FlowNodeTypeEnum.pluginModule ||
|
||||
node?.flowNodeType === FlowNodeTypeEnum.appModule
|
||||
) {
|
||||
if (!node.pluginId) return;
|
||||
onResetNode({
|
||||
id: nodeId,
|
||||
|
|
@ -298,11 +303,6 @@ const MenuRender = React.memo(function MenuRender({
|
|||
const { t } = useTranslation();
|
||||
const { openDebugNode, DebugInputModal } = useDebug();
|
||||
|
||||
const { openConfirm: onOpenConfirmDeleteNode, ConfirmModal: ConfirmDeleteModal } = useConfirm({
|
||||
content: t('common:core.module.Confirm Delete Node'),
|
||||
type: 'delete'
|
||||
});
|
||||
|
||||
const setNodes = useContextSelector(WorkflowContext, (v) => v.setNodes);
|
||||
const setEdges = useContextSelector(WorkflowContext, (v) => v.setEdges);
|
||||
const { computedNewNodeName } = useWorkflowUtils();
|
||||
|
|
@ -420,7 +420,6 @@ const MenuRender = React.memo(function MenuRender({
|
|||
</Box>
|
||||
))}
|
||||
</Box>
|
||||
<ConfirmDeleteModal />
|
||||
<DebugInputModal />
|
||||
</>
|
||||
);
|
||||
|
|
@ -429,7 +428,6 @@ const MenuRender = React.memo(function MenuRender({
|
|||
menuForbid?.copy,
|
||||
menuForbid?.delete,
|
||||
t,
|
||||
ConfirmDeleteModal,
|
||||
DebugInputModal,
|
||||
openDebugNode,
|
||||
nodeId,
|
||||
|
|
|
|||
|
|
@ -20,6 +20,10 @@ const RenderList: {
|
|||
types: [FlowNodeInputTypeEnum.input],
|
||||
Component: dynamic(() => import('./templates/TextInput'))
|
||||
},
|
||||
{
|
||||
types: [FlowNodeInputTypeEnum.select],
|
||||
Component: dynamic(() => import('./templates/Select'))
|
||||
},
|
||||
{
|
||||
types: [FlowNodeInputTypeEnum.numberInput],
|
||||
Component: dynamic(() => import('./templates/NumberInput'))
|
||||
|
|
|
|||
|
|
@ -52,7 +52,6 @@ export const getScheduleTriggerApp = async () => {
|
|||
chatConfig: defaultApp.chatConfig,
|
||||
histories: [],
|
||||
stream: false,
|
||||
detail: false,
|
||||
maxRunTimes: 200
|
||||
});
|
||||
pushChatUsage({
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
import { useTranslation } from 'next-i18next';
|
||||
import { useToast } from '@fastgpt/web/hooks/useToast';
|
||||
import { useCallback } from 'react';
|
||||
import { hasHttps } from '@fastgpt/web/common/system/utils';
|
||||
|
||||
/**
|
||||
* copy text data
|
||||
|
|
@ -16,7 +17,7 @@ export const useCopyData = () => {
|
|||
duration = 1000
|
||||
) => {
|
||||
try {
|
||||
if (navigator.clipboard) {
|
||||
if (hasHttps() && navigator.clipboard) {
|
||||
await navigator.clipboard.writeText(data);
|
||||
} else {
|
||||
throw new Error('');
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ import type {
|
|||
} from '@fastgpt/global/core/workflow/type/node';
|
||||
import { getMyApps } from '../api';
|
||||
import type { ListAppBody } from '@/pages/api/core/app/list';
|
||||
import { FlowNodeTypeEnum } from '@fastgpt/global/core/workflow/node/constant';
|
||||
import { defaultNodeVersion, FlowNodeTypeEnum } from '@fastgpt/global/core/workflow/node/constant';
|
||||
import { FlowNodeTemplateTypeEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
import type { GetPreviewNodeQuery } from '@/pages/api/core/app/plugin/getPreviewNode';
|
||||
import { AppTypeEnum } from '@fastgpt/global/core/app/constants';
|
||||
|
|
@ -23,12 +23,15 @@ export const getTeamPlugTemplates = (data?: ListAppBody) =>
|
|||
pluginId: app._id,
|
||||
isFolder: app.type === AppTypeEnum.folder || app.type === AppTypeEnum.httpPlugin,
|
||||
templateType: FlowNodeTemplateTypeEnum.teamApp,
|
||||
flowNodeType: FlowNodeTypeEnum.pluginModule,
|
||||
flowNodeType:
|
||||
app.type === AppTypeEnum.workflow
|
||||
? FlowNodeTypeEnum.appModule
|
||||
: FlowNodeTypeEnum.pluginModule,
|
||||
avatar: app.avatar,
|
||||
name: app.name,
|
||||
intro: app.intro,
|
||||
showStatus: false,
|
||||
version: app.pluginData?.nodeVersion || '481',
|
||||
version: app.pluginData?.nodeVersion || defaultNodeVersion,
|
||||
isTool: true
|
||||
}))
|
||||
);
|
||||
|
|
|
|||
Loading…
Reference in New Issue