diff --git a/packages/global/core/ai/prompt/AIChat.ts b/packages/global/core/ai/prompt/AIChat.ts index 36abbaeca..9fb0c6995 100644 --- a/packages/global/core/ai/prompt/AIChat.ts +++ b/packages/global/core/ai/prompt/AIChat.ts @@ -1,54 +1,70 @@ import { PromptTemplateItem } from '../type.d'; import { i18nT } from '../../../../web/i18n/utils'; +import { getPromptByVersion } from './agent'; export const Prompt_QuoteTemplateList: PromptTemplateItem[] = [ { title: i18nT('app:template.standard_template'), desc: i18nT('app:template.standard_template_des'), - value: `{ + value: { + ['4.9.0']: `{ "sourceName": "{{source}}", "updateTime": "{{updateTime}}", "content": "{{q}}\n{{a}}" } ` + } }, { title: i18nT('app:template.qa_template'), desc: i18nT('app:template.qa_template_des'), - value: ` + value: { + ['4.9.0']: ` {{q}} {{a}} ` + } }, { title: i18nT('app:template.standard_strict'), desc: i18nT('app:template.standard_strict_des'), - value: `{ + value: { + ['4.9.0']: `{ "sourceName": "{{source}}", "updateTime": "{{updateTime}}", "content": "{{q}}\n{{a}}" } ` + } }, { title: i18nT('app:template.hard_strict'), desc: i18nT('app:template.hard_strict_des'), - value: ` + value: { + ['4.9.0']: ` {{q}} {{a}} ` + } } ]; +export const getQuoteTemplate = (version?: string) => { + const defaultTemplate = Prompt_QuoteTemplateList[0].value; + + return getPromptByVersion(version, defaultTemplate); +}; + export const Prompt_userQuotePromptList: PromptTemplateItem[] = [ { title: i18nT('app:template.standard_template'), desc: '', - value: `使用 标记中的内容作为本次对话的参考: + value: { + ['4.9.0']: `使用 标记中的内容作为本次对话的参考: {{quote}} @@ -62,11 +78,13 @@ export const Prompt_userQuotePromptList: PromptTemplateItem[] = [ - 使用与问题相同的语言回答。 问题:"""{{question}}"""` + } }, { title: i18nT('app:template.qa_template'), desc: '', - value: `使用 标记中的问答对进行回答。 + value: { + ['4.9.0']: `使用 标记中的问答对进行回答。 {{quote}} @@ -79,11 +97,13 @@ export const Prompt_userQuotePromptList: PromptTemplateItem[] = [ - 避免提及你是从 QA 获取的知识,只需要回复答案。 问题:"""{{question}}"""` + } }, { title: i18nT('app:template.standard_strict'), desc: '', - value: `忘记你已有的知识,仅使用 标记中的内容作为本次对话的参考: + value: { + ['4.9.0']: `忘记你已有的知识,仅使用 标记中的内容作为本次对话的参考: {{quote}} @@ -101,11 +121,13 @@ export const Prompt_userQuotePromptList: PromptTemplateItem[] = [ - 使用与问题相同的语言回答。 问题:"""{{question}}"""` + } }, { title: i18nT('app:template.hard_strict'), desc: '', - value: `忘记你已有的知识,仅使用 标记中的问答对进行回答。 + value: { + ['4.9.0']: `忘记你已有的知识,仅使用 标记中的问答对进行回答。 {{quote}} @@ -126,6 +148,7 @@ export const Prompt_userQuotePromptList: PromptTemplateItem[] = [ - 使用与问题相同的语言回答。 问题:"""{{question}}"""` + } } ]; @@ -133,7 +156,8 @@ export const Prompt_systemQuotePromptList: PromptTemplateItem[] = [ { title: i18nT('app:template.standard_template'), desc: '', - value: `使用 标记中的内容作为本次对话的参考: + value: { + ['4.9.0']: `使用 标记中的内容作为本次对话的参考: {{quote}} @@ -145,11 +169,13 @@ export const Prompt_systemQuotePromptList: PromptTemplateItem[] = [ - 保持答案与 中描述的一致。 - 使用 Markdown 语法优化回答格式。 - 使用与问题相同的语言回答。` + } }, { title: i18nT('app:template.qa_template'), desc: '', - value: `使用 标记中的问答对进行回答。 + value: { + ['4.9.0']: `使用 标记中的问答对进行回答。 {{quote}} @@ -160,11 +186,13 @@ export const Prompt_systemQuotePromptList: PromptTemplateItem[] = [ - 回答的内容应尽可能与 <答案> 中的内容一致。 - 如果没有相关的问答对,你需要澄清。 - 避免提及你是从 QA 获取的知识,只需要回复答案。` + } }, { title: i18nT('app:template.standard_strict'), desc: '', - value: `忘记你已有的知识,仅使用 标记中的内容作为本次对话的参考: + value: { + ['4.9.0']: `忘记你已有的知识,仅使用 标记中的内容作为本次对话的参考: {{quote}} @@ -180,11 +208,13 @@ export const Prompt_systemQuotePromptList: PromptTemplateItem[] = [ - 保持答案与 中描述的一致。 - 使用 Markdown 语法优化回答格式。 - 使用与问题相同的语言回答。` + } }, { title: i18nT('app:template.hard_strict'), desc: '', - value: `忘记你已有的知识,仅使用 标记中的问答对进行回答。 + value: { + ['4.9.0']: `忘记你已有的知识,仅使用 标记中的问答对进行回答。 {{quote}} @@ -203,12 +233,28 @@ export const Prompt_systemQuotePromptList: PromptTemplateItem[] = [ - 避免提及你是从 QA 获取的知识,只需要回复答案。 - 使用 Markdown 语法优化回答格式。 - 使用与问题相同的语言回答。` + } } ]; +export const getQuotePrompt = (version?: string, role: 'user' | 'system' = 'user') => { + const quotePromptTemplates = + role === 'user' ? Prompt_userQuotePromptList : Prompt_systemQuotePromptList; + + const defaultTemplate = quotePromptTemplates[0].value; + + return getPromptByVersion(version, defaultTemplate); +}; + // Document quote prompt -export const Prompt_DocumentQuote = `将 中的内容作为本次对话的参考: - -{{quote}} - -`; +export const getDocumentQuotePrompt = (version: string) => { + const promptMap = { + ['4.9.0']: `将 中的内容作为本次对话的参考: + + {{quote}} + + ` + }; + + return getPromptByVersion(version, promptMap); +}; diff --git a/packages/global/core/ai/prompt/agent.ts b/packages/global/core/ai/prompt/agent.ts index ea3362ed8..5340df77c 100644 --- a/packages/global/core/ai/prompt/agent.ts +++ b/packages/global/core/ai/prompt/agent.ts @@ -25,7 +25,29 @@ A2: ` }; -export const Prompt_ExtractJson = `你可以从 <对话记录> 中提取指定 Json 信息,你仅需返回 Json 字符串,无需回答问题。 +export const getPromptByVersion = (version?: string, promptMap: Record = {}) => { + const versions = Object.keys(promptMap).sort((a, b) => { + const [majorA, minorA, patchA] = a.split('.').map(Number); + const [majorB, minorB, patchB] = b.split('.').map(Number); + + if (majorA !== majorB) return majorB - majorA; + if (minorA !== minorB) return minorB - minorA; + return patchB - patchA; + }); + + if (!version) { + return promptMap[versions[0]]; + } + + if (version in promptMap) { + return promptMap[version]; + } + return promptMap[versions[versions.length - 1]]; +}; + +export const getExtractJsonPrompt = (version?: string) => { + const promptMap: Record = { + ['4.8.1']: `你可以从 <对话记录> 中提取指定 Json 信息,你仅需返回 Json 字符串,无需回答问题。 <提取要求> {{description}} @@ -44,9 +66,31 @@ export const Prompt_ExtractJson = `你可以从 <对话记录> {{text}} -提取的 json 字符串:`; +提取的 json 字符串:` + }; -export const Prompt_CQJson = `请帮我执行一个“问题分类”任务,将问题分类为以下几种类型之一: + return getPromptByVersion(version, promptMap); +}; + +export const getExtractJsonToolPrompt = (version?: string) => { + const promptMap: Record = { + ['4.8.1']: `我正在执行一个函数,需要你提供一些参数,请以 JSON 字符串格式返回这些参数,要求: +""" +- {{description}} +- 不是每个参数都是必须生成的,如果没有合适的参数值,不要生成该参数,或返回空字符串。 +- 需要结合前面的对话内容,一起生成合适的参数。 +""" + +本次输入内容: """{{content}}""" + ` + }; + + return getPromptByVersion(version, promptMap); +}; + +export const getCQPrompt = (version?: string) => { + const promptMap: Record = { + ['4.8.1']: `请帮我执行一个"问题分类"任务,将问题分类为以下几种类型之一: """ {{typeList}} @@ -64,9 +108,14 @@ export const Prompt_CQJson = `请帮我执行一个“问题分类”任务, 问题:"{{question}}" 类型ID= -`; +` + }; -export const PROMPT_QUESTION_GUIDE = `You are an AI assistant tasked with predicting the user's next question based on the conversation history. Your goal is to generate 3 potential questions that will guide the user to continue the conversation. When generating these questions, adhere to the following rules: + return getPromptByVersion(version, promptMap); +}; + +export const getQuestionGuidePrompt = () => { + return `You are an AI assistant tasked with predicting the user's next question based on the conversation history. Your goal is to generate 3 potential questions that will guide the user to continue the conversation. When generating these questions, adhere to the following rules: 1. Use the same language as the user's last question in the conversation history. 2. Keep each question under 20 characters in length. @@ -74,4 +123,8 @@ export const PROMPT_QUESTION_GUIDE = `You are an AI assistant tasked with predic Analyze the conversation history provided to you and use it as context to generate relevant and engaging follow-up questions. Your predictions should be logical extensions of the current topic or related areas that the user might be interested in exploring further. Remember to maintain consistency in tone and style with the existing conversation while providing diverse options for the user to choose from. Your goal is to keep the conversation flowing naturally and help the user delve deeper into the subject matter or explore related topics.`; -export const PROMPT_QUESTION_GUIDE_FOOTER = `Please strictly follow the format rules: \nReturn questions in JSON format: ['Question 1', 'Question 2', 'Question 3']. Your output: `; +}; + +export const getQuestionGuideFooterPrompt = () => { + return `Please strictly follow the format rules: \nReturn questions in JSON format: ['Question 1', 'Question 2', 'Question 3']. Your output: `; +}; diff --git a/packages/global/core/ai/type.d.ts b/packages/global/core/ai/type.d.ts index bb7f87c38..c87cd931c 100644 --- a/packages/global/core/ai/type.d.ts +++ b/packages/global/core/ai/type.d.ts @@ -80,5 +80,5 @@ export * from 'openai'; export type PromptTemplateItem = { title: string; desc: string; - value: string; + value: Record; }; diff --git a/packages/global/core/app/constants.ts b/packages/global/core/app/constants.ts index a57a7a797..f24a194ae 100644 --- a/packages/global/core/app/constants.ts +++ b/packages/global/core/app/constants.ts @@ -1,4 +1,3 @@ -import { PROMPT_QUESTION_GUIDE } from '../ai/prompt/agent'; import { AppTTSConfigType, AppFileSelectConfigType, diff --git a/packages/global/core/workflow/template/input.ts b/packages/global/core/workflow/template/input.ts index 72b4067c3..c1e8ffe37 100644 --- a/packages/global/core/workflow/template/input.ts +++ b/packages/global/core/workflow/template/input.ts @@ -76,13 +76,6 @@ export const Input_Template_Text_Quote: FlowNodeInputItemType = { valueType: WorkflowIOValueTypeEnum.string }; -export const Input_Template_File_Link_Prompt: FlowNodeInputItemType = { - key: NodeInputKeyEnum.fileUrlList, - renderTypeList: [FlowNodeInputTypeEnum.reference, FlowNodeInputTypeEnum.input], - label: i18nT('app:file_quote_link'), - debugLabel: i18nT('app:file_quote_link'), - valueType: WorkflowIOValueTypeEnum.arrayString -}; export const Input_Template_File_Link: FlowNodeInputItemType = { key: NodeInputKeyEnum.fileUrlList, renderTypeList: [FlowNodeInputTypeEnum.reference], diff --git a/packages/global/core/workflow/template/system/aiChat/index.ts b/packages/global/core/workflow/template/system/aiChat/index.ts index db5df3d6d..2ba390860 100644 --- a/packages/global/core/workflow/template/system/aiChat/index.ts +++ b/packages/global/core/workflow/template/system/aiChat/index.ts @@ -17,7 +17,7 @@ import { Input_Template_History, Input_Template_System_Prompt, Input_Template_UserChatInput, - Input_Template_File_Link_Prompt + Input_Template_File_Link } from '../../input'; import { chatNodeSystemPromptTip, systemPromptTip } from '../../tip'; import { getHandleConfig } from '../../utils'; @@ -129,7 +129,7 @@ export const AiChatModule: FlowNodeTemplateType = { }, Input_Template_History, Input_Template_Dataset_Quote, - Input_Template_File_Link_Prompt, + Input_Template_File_Link, { ...Input_Template_UserChatInput, toolDescription: i18nT('workflow:user_question') } ], outputs: [ diff --git a/packages/global/core/workflow/template/system/tools.ts b/packages/global/core/workflow/template/system/tools.ts index 672deaffa..1def9fa69 100644 --- a/packages/global/core/workflow/template/system/tools.ts +++ b/packages/global/core/workflow/template/system/tools.ts @@ -20,7 +20,7 @@ import { chatNodeSystemPromptTip, systemPromptTip } from '../tip'; import { LLMModelTypeEnum } from '../../../ai/constants'; import { getHandleConfig } from '../utils'; import { i18nT } from '../../../../../web/i18n/utils'; -import { Input_Template_File_Link_Prompt } from '../input'; +import { Input_Template_File_Link } from '../input'; export const ToolModule: FlowNodeTemplateType = { id: FlowNodeTypeEnum.tools, @@ -97,7 +97,7 @@ export const ToolModule: FlowNodeTemplateType = { placeholder: chatNodeSystemPromptTip }, Input_Template_History, - Input_Template_File_Link_Prompt, + Input_Template_File_Link, Input_Template_UserChatInput ], outputs: [ diff --git a/packages/service/core/ai/functions/createQuestionGuide.ts b/packages/service/core/ai/functions/createQuestionGuide.ts index 7ceb33e51..3e0bcd65f 100644 --- a/packages/service/core/ai/functions/createQuestionGuide.ts +++ b/packages/service/core/ai/functions/createQuestionGuide.ts @@ -4,8 +4,8 @@ import { countGptMessagesTokens, countPromptTokens } from '../../../common/strin import { loadRequestMessages } from '../../chat/utils'; import { llmCompletionsBodyFormat } from '../utils'; import { - PROMPT_QUESTION_GUIDE, - PROMPT_QUESTION_GUIDE_FOOTER + getQuestionGuideFooterPrompt, + getQuestionGuidePrompt } from '@fastgpt/global/core/ai/prompt/agent'; import { addLog } from '../../../common/system/log'; import json5 from 'json5'; @@ -27,7 +27,7 @@ export async function createQuestionGuide({ ...messages, { role: 'user', - content: `${customPrompt || PROMPT_QUESTION_GUIDE}\n${PROMPT_QUESTION_GUIDE_FOOTER}` + content: `${customPrompt || getQuestionGuidePrompt()}\n${getQuestionGuideFooterPrompt()}` } ]; const requestMessages = await loadRequestMessages({ diff --git a/packages/service/core/workflow/dispatch/agent/classifyQuestion.ts b/packages/service/core/workflow/dispatch/agent/classifyQuestion.ts index 034c483f6..e0b608e83 100644 --- a/packages/service/core/workflow/dispatch/agent/classifyQuestion.ts +++ b/packages/service/core/workflow/dispatch/agent/classifyQuestion.ts @@ -10,8 +10,7 @@ import type { ClassifyQuestionAgentItemType } from '@fastgpt/global/core/workflo import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants'; import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants'; import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type'; -import { replaceVariable } from '@fastgpt/global/common/string/tools'; -import { Prompt_CQJson } from '@fastgpt/global/core/ai/prompt/agent'; +import { getCQPrompt } from '@fastgpt/global/core/ai/prompt/agent'; import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d'; import { getLLMModel } from '../../../ai/model'; import { getHistories } from '../utils'; @@ -23,6 +22,7 @@ import { loadRequestMessages } from '../../../chat/utils'; import { llmCompletionsBodyFormat } from '../../../ai/utils'; import { addLog } from '../../../../common/system/log'; import { ModelTypeEnum } from '../../../../../global/core/ai/model'; +import { replaceVariable } from '@fastgpt/global/common/string/tools'; type Props = ModuleDispatchProps<{ [NodeInputKeyEnum.aiModel]: string; @@ -99,7 +99,8 @@ const completions = async ({ cqModel, externalProvider, histories, - params: { agents, systemPrompt = '', userChatInput } + params: { agents, systemPrompt = '', userChatInput }, + node: { version } }: ActionProps) => { const messages: ChatItemType[] = [ { @@ -108,7 +109,7 @@ const completions = async ({ { type: ChatItemValueTypeEnum.text, text: { - content: replaceVariable(cqModel.customCQPrompt || Prompt_CQJson, { + content: replaceVariable(cqModel.customCQPrompt || getCQPrompt(version), { systemPrompt: systemPrompt || 'null', typeList: agents .map((item) => `{"类型ID":"${item.key}", "问题类型":"${item.value}"}`) diff --git a/packages/service/core/workflow/dispatch/agent/extract.ts b/packages/service/core/workflow/dispatch/agent/extract.ts index 2fb04061d..53008680d 100644 --- a/packages/service/core/workflow/dispatch/agent/extract.ts +++ b/packages/service/core/workflow/dispatch/agent/extract.ts @@ -16,7 +16,6 @@ import { } from '@fastgpt/global/core/workflow/constants'; import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants'; import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type'; -import { Prompt_ExtractJson } from '@fastgpt/global/core/ai/prompt/agent'; import { replaceVariable, sliceJsonStr } from '@fastgpt/global/common/string/tools'; import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d'; import { getHistories } from '../utils'; @@ -33,6 +32,10 @@ import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/ty import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt'; import { llmCompletionsBodyFormat } from '../../../ai/utils'; import { ModelTypeEnum } from '../../../../../global/core/ai/model'; +import { + getExtractJsonPrompt, + getExtractJsonToolPrompt +} from '@fastgpt/global/core/ai/prompt/agent'; type Props = ModuleDispatchProps<{ [NodeInputKeyEnum.history]?: ChatItemType[]; @@ -154,7 +157,8 @@ export async function dispatchContentExtract(props: Props): Promise { const getFunctionCallSchema = async ({ extractModel, histories, - params: { content, extractKeys, description } + params: { content, extractKeys, description }, + node: { version } }: ActionProps) => { const messages: ChatItemType[] = [ ...histories, @@ -164,15 +168,10 @@ const getFunctionCallSchema = async ({ { type: ChatItemValueTypeEnum.text, text: { - content: `我正在执行一个函数,需要你提供一些参数,请以 JSON 字符串格式返回这些参数,要求: -""" -${description ? `- ${description}` : ''} -- 不是每个参数都是必须生成的,如果没有合适的参数值,不要生成该参数,或返回空字符串。 -- 需要结合前面的对话内容,一起生成合适的参数。 -""" - -本次输入内容: """${content}""" - ` + content: replaceVariable(getExtractJsonToolPrompt(version), { + description, + content + }) } } ] @@ -334,7 +333,8 @@ const completions = async ({ extractModel, externalProvider, histories, - params: { content, extractKeys, description = 'No special requirements' } + params: { content, extractKeys, description = 'No special requirements' }, + node: { version } }: ActionProps) => { const messages: ChatItemType[] = [ { @@ -343,23 +343,26 @@ const completions = async ({ { type: ChatItemValueTypeEnum.text, text: { - content: replaceVariable(extractModel.customExtractPrompt || Prompt_ExtractJson, { - description, - json: extractKeys - .map((item) => { - const valueType = item.valueType || 'string'; - if (valueType !== 'string' && valueType !== 'number') { - item.enum = undefined; - } + content: replaceVariable( + extractModel.customExtractPrompt || getExtractJsonPrompt(version), + { + description, + json: extractKeys + .map((item) => { + const valueType = item.valueType || 'string'; + if (valueType !== 'string' && valueType !== 'number') { + item.enum = undefined; + } - return `{"type":${item.valueType || 'string'}, "key":"${item.key}", "description":"${item.desc}" ${ - item.enum ? `, "enum":"[${item.enum.split('\n')}]"` : '' - }}`; - }) - .join('\n'), - text: `${histories.map((item) => `${item.obj}:${chatValue2RuntimePrompt(item.value).text}`).join('\n')} + return `{"type":${item.valueType || 'string'}, "key":"${item.key}", "description":"${item.desc}" ${ + item.enum ? `, "enum":"[${item.enum.split('\n')}]"` : '' + }}`; + }) + .join('\n'), + text: `${histories.map((item) => `${item.obj}:${chatValue2RuntimePrompt(item.value).text}`).join('\n')} Human: ${content}` - }) + } + ) } } ] diff --git a/packages/service/core/workflow/dispatch/agent/runTool/index.ts b/packages/service/core/workflow/dispatch/agent/runTool/index.ts index b10b57eac..ccf03ba6b 100644 --- a/packages/service/core/workflow/dispatch/agent/runTool/index.ts +++ b/packages/service/core/workflow/dispatch/agent/runTool/index.ts @@ -28,10 +28,10 @@ import { filterToolResponseToPreview } from './utils'; import { InteractiveNodeResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type'; import { getFileContentFromLinks, getHistoryFileLinks } from '../../tools/readFiles'; import { parseUrlToFileType } from '@fastgpt/global/common/file/tools'; -import { Prompt_DocumentQuote } from '@fastgpt/global/core/ai/prompt/AIChat'; import { FlowNodeTypeEnum } from '@fastgpt/global/core/workflow/node/constant'; import { postTextCensor } from '../../../../../common/api/requestPlusApi'; import { ModelTypeEnum } from '@fastgpt/global/core/ai/model'; +import { getDocumentQuotePrompt } from '@fastgpt/global/core/ai/prompt/AIChat'; type Response = DispatchNodeResultType<{ [NodeOutputKeyEnum.answerText]: string; @@ -40,7 +40,7 @@ type Response = DispatchNodeResultType<{ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise => { const { - node: { nodeId, name, isEntry }, + node: { nodeId, name, isEntry, version }, runtimeNodes, runtimeEdges, histories, @@ -118,7 +118,7 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise< toolModel.defaultSystemChatPrompt, systemPrompt, documentQuoteText - ? replaceVariable(Prompt_DocumentQuote, { + ? replaceVariable(getDocumentQuotePrompt(version), { quote: documentQuoteText }) : '' diff --git a/packages/service/core/workflow/dispatch/chat/oneapi.ts b/packages/service/core/workflow/dispatch/chat/oneapi.ts index 2e0abebb3..743c0f30f 100644 --- a/packages/service/core/workflow/dispatch/chat/oneapi.ts +++ b/packages/service/core/workflow/dispatch/chat/oneapi.ts @@ -24,10 +24,9 @@ import { runtimePrompt2ChatsValue } from '@fastgpt/global/core/chat/adapt'; import { - Prompt_DocumentQuote, - Prompt_userQuotePromptList, - Prompt_QuoteTemplateList, - Prompt_systemQuotePromptList + getQuoteTemplate, + getQuotePrompt, + getDocumentQuotePrompt } from '@fastgpt/global/core/ai/prompt/AIChat'; import type { AIChatNodeProps } from '@fastgpt/global/core/workflow/runtime/type.d'; import { replaceVariable } from '@fastgpt/global/common/string/tools'; @@ -70,7 +69,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise - {t('common:core.app.Question Guide')} + {t('common:core.app.Question Guide')} @@ -168,7 +168,7 @@ const QGConfigModal = ({ } }} > - {customPrompt || PROMPT_QUESTION_GUIDE} + {customPrompt || getQuestionGuidePrompt()} @@ -178,8 +178,8 @@ const QGConfigModal = ({ {isOpenCustomPrompt && ( { onChange({ ...value, diff --git a/projects/app/src/pageComponents/account/model/AddModelBox.tsx b/projects/app/src/pageComponents/account/model/AddModelBox.tsx index a94d8b881..0caef71b1 100644 --- a/projects/app/src/pageComponents/account/model/AddModelBox.tsx +++ b/projects/app/src/pageComponents/account/model/AddModelBox.tsx @@ -36,9 +36,9 @@ import JsonEditor from '@fastgpt/web/components/common/Textarea/JsonEditor'; import MyMenu from '@fastgpt/web/components/common/MyMenu'; import { useSystemStore } from '@/web/common/system/useSystemStore'; import QuestionTip from '@fastgpt/web/components/common/MyTooltip/QuestionTip'; -import { Prompt_CQJson, Prompt_ExtractJson } from '@fastgpt/global/core/ai/prompt/agent'; import MyModal from '@fastgpt/web/components/common/MyModal'; import FormLabel from '@fastgpt/web/components/common/MyBox/FormLabel'; +import { getCQPrompt, getExtractJsonPrompt } from '@fastgpt/global/core/ai/prompt/agent'; export const AddModelButton = ({ onCreate, @@ -677,7 +677,9 @@ export const ModelEditModal = ({ {t('account:model.custom_cq_prompt')} @@ -691,7 +693,7 @@ export const ModelEditModal = ({ {t('account:model.custom_extract_prompt')} diff --git a/projects/app/src/pageComponents/app/detail/WorkflowComponents/Flow/nodes/render/RenderInput/templates/SettingQuotePrompt.tsx b/projects/app/src/pageComponents/app/detail/WorkflowComponents/Flow/nodes/render/RenderInput/templates/SettingQuotePrompt.tsx index 26cc7a66e..1a95e5105 100644 --- a/projects/app/src/pageComponents/app/detail/WorkflowComponents/Flow/nodes/render/RenderInput/templates/SettingQuotePrompt.tsx +++ b/projects/app/src/pageComponents/app/detail/WorkflowComponents/Flow/nodes/render/RenderInput/templates/SettingQuotePrompt.tsx @@ -10,7 +10,9 @@ import MyTooltip from '@fastgpt/web/components/common/MyTooltip'; import { Prompt_userQuotePromptList, Prompt_QuoteTemplateList, - Prompt_systemQuotePromptList + Prompt_systemQuotePromptList, + getQuoteTemplate, + getQuotePrompt } from '@fastgpt/global/core/ai/prompt/AIChat'; import PromptEditor from '@fastgpt/web/components/common/Textarea/PromptEditor'; import PromptTemplate from '@/components/PromptTemplate'; @@ -48,6 +50,8 @@ const EditModal = ({ onClose, ...props }: RenderInputProps & { onClose: () => vo const { t } = useTranslation(); const onChangeNode = useContextSelector(WorkflowContext, (v) => v.onChangeNode); const nodeList = useContextSelector(WorkflowContext, (v) => v.nodeList); + const node = nodeList.find((item) => item.id === nodeId); + const nodeVersion = node?.version; const { watch, setValue, handleSubmit } = useForm({ defaultValues: { @@ -219,7 +223,7 @@ const EditModal = ({ onClose, ...props }: RenderInputProps & { onClose: () => vo @@ -254,7 +258,7 @@ const EditModal = ({ onClose, ...props }: RenderInputProps & { onClose: () => vo @@ -263,7 +267,7 @@ const EditModal = ({ onClose, ...props }: RenderInputProps & { onClose: () => vo title={t('common:core.app.Quote prompt')} minH={300} placeholder={t('workflow:quote_prompt_tip', { - default: quotePromptTemplates[0].value + default: getQuotePrompt(nodeVersion, aiChatQuoteRole) })} value={aiChatQuotePrompt} onChange={(e) => { @@ -288,10 +292,10 @@ const EditModal = ({ onClose, ...props }: RenderInputProps & { onClose: () => vo onSuccess={(e) => { const quoteVal = e.value; - const promptVal = quotePromptTemplates.find((item) => item.title === e.title)?.value; + const promptVal = quotePromptTemplates.find((item) => item.title === e.title)?.value!; - setValue('quoteTemplate', quoteVal); - setValue('quotePrompt', promptVal); + setValue('quoteTemplate', Object.values(quoteVal)[0]); + setValue('quotePrompt', Object.values(promptVal)[0]); }} /> )} diff --git a/projects/app/src/web/core/app/utils.ts b/projects/app/src/web/core/app/utils.ts index 9ca72bba3..326d40c2b 100644 --- a/projects/app/src/web/core/app/utils.ts +++ b/projects/app/src/web/core/app/utils.ts @@ -35,7 +35,7 @@ import { import { DatasetSearchModule } from '@fastgpt/global/core/workflow/template/system/datasetSearch'; import { i18nT } from '@fastgpt/web/i18n/utils'; import { - Input_Template_File_Link_Prompt, + Input_Template_File_Link, Input_Template_UserChatInput } from '@fastgpt/global/core/workflow/template/input'; import { workflowStartNodeId } from './constants'; @@ -175,7 +175,7 @@ export function form2AppWorkflow( value: selectedDatasets?.length > 0 ? [datasetNodeId, 'quoteQA'] : undefined }, { - ...Input_Template_File_Link_Prompt, + ...Input_Template_File_Link, value: [[workflowStartNodeId, NodeOutputKeyEnum.userFiles]] }, { @@ -502,7 +502,7 @@ export function form2AppWorkflow( value: formData.aiSettings.maxHistories }, { - ...Input_Template_File_Link_Prompt, + ...Input_Template_File_Link, value: [[workflowStartNodeId, NodeOutputKeyEnum.userFiles]] }, {