mirror of
https://github.com/1Panel-dev/MaxKB.git
synced 2025-12-25 17:22:55 +00:00
fix: The workflow includes a form collection node. After the normal Q&A on the demonstration page is completed, refreshing the page will display the output of the nodes before the form collection node (#3081)
This commit is contained in:
parent
ccf43bbcd9
commit
2728453f6c
|
|
@ -40,6 +40,7 @@ tool_message_template = """
|
|||
|
||||
"""
|
||||
|
||||
|
||||
def _write_context(node_variable: Dict, workflow_variable: Dict, node: INode, workflow, answer: str,
|
||||
reasoning_content: str):
|
||||
chat_model = node_variable.get('chat_model')
|
||||
|
|
@ -102,7 +103,6 @@ def write_context_stream(node_variable: Dict, workflow_variable: Dict, node: INo
|
|||
_write_context(node_variable, workflow_variable, node, workflow, answer, reasoning_content)
|
||||
|
||||
|
||||
|
||||
async def _yield_mcp_response(chat_model, message_list, mcp_servers):
|
||||
async with MultiServerMCPClient(json.loads(mcp_servers)) as client:
|
||||
agent = create_react_agent(chat_model, client.get_tools())
|
||||
|
|
@ -115,6 +115,7 @@ async def _yield_mcp_response(chat_model, message_list, mcp_servers):
|
|||
if isinstance(chunk[0], AIMessageChunk):
|
||||
yield chunk[0]
|
||||
|
||||
|
||||
def mcp_response_generator(chat_model, message_list, mcp_servers):
|
||||
loop = asyncio.new_event_loop()
|
||||
try:
|
||||
|
|
@ -130,6 +131,7 @@ def mcp_response_generator(chat_model, message_list, mcp_servers):
|
|||
finally:
|
||||
loop.close()
|
||||
|
||||
|
||||
async def anext_async(agen):
|
||||
return await agen.__anext__()
|
||||
|
||||
|
|
@ -186,7 +188,8 @@ class BaseChatNode(IChatNode):
|
|||
self.context['answer'] = details.get('answer')
|
||||
self.context['question'] = details.get('question')
|
||||
self.context['reasoning_content'] = details.get('reasoning_content')
|
||||
self.answer_text = details.get('answer')
|
||||
if self.node_params.get('is_result', False):
|
||||
self.answer_text = details.get('answer')
|
||||
|
||||
def execute(self, model_id, system, prompt, dialogue_number, history_chat_record, stream, chat_id, chat_record_id,
|
||||
model_params_setting=None,
|
||||
|
|
|
|||
|
|
@ -168,7 +168,8 @@ class BaseApplicationNode(IApplicationNode):
|
|||
self.context['question'] = details.get('question')
|
||||
self.context['type'] = details.get('type')
|
||||
self.context['reasoning_content'] = details.get('reasoning_content')
|
||||
self.answer_text = details.get('answer')
|
||||
if self.node_params.get('is_result', False):
|
||||
self.answer_text = details.get('answer')
|
||||
|
||||
def execute(self, application_id, message, chat_id, chat_record_id, stream, re_chat, client_id, client_type,
|
||||
app_document_list=None, app_image_list=None, app_audio_list=None, child_node=None, node_data=None,
|
||||
|
|
|
|||
|
|
@ -15,7 +15,9 @@ from application.flow.step_node.direct_reply_node.i_reply_node import IReplyNode
|
|||
class BaseReplyNode(IReplyNode):
|
||||
def save_context(self, details, workflow_manage):
|
||||
self.context['answer'] = details.get('answer')
|
||||
self.answer_text = details.get('answer')
|
||||
if self.node_params.get('is_result', False):
|
||||
self.answer_text = details.get('answer')
|
||||
|
||||
def execute(self, reply_type, stream, fields=None, content=None, **kwargs) -> NodeResult:
|
||||
if reply_type == 'referencing':
|
||||
result = self.get_reference_content(fields)
|
||||
|
|
|
|||
|
|
@ -38,7 +38,8 @@ class BaseFormNode(IFormNode):
|
|||
self.context['start_time'] = details.get('start_time')
|
||||
self.context['form_data'] = form_data
|
||||
self.context['is_submit'] = details.get('is_submit')
|
||||
self.answer_text = details.get('result')
|
||||
if self.node_params.get('is_result', False):
|
||||
self.answer_text = details.get('result')
|
||||
if form_data is not None:
|
||||
for key in form_data:
|
||||
self.context[key] = form_data[key]
|
||||
|
|
@ -70,7 +71,7 @@ class BaseFormNode(IFormNode):
|
|||
"chat_record_id": self.flow_params_serializer.data.get("chat_record_id"),
|
||||
'form_data': self.context.get('form_data', {}),
|
||||
"is_submit": self.context.get("is_submit", False)}
|
||||
form = f'<form_rander>{json.dumps(form_setting,ensure_ascii=False)}</form_rander>'
|
||||
form = f'<form_rander>{json.dumps(form_setting, ensure_ascii=False)}</form_rander>'
|
||||
context = self.workflow_manage.get_workflow_content()
|
||||
form_content_format = self.workflow_manage.reset_prompt(form_content_format)
|
||||
prompt_template = PromptTemplate.from_template(form_content_format, template_format='jinja2')
|
||||
|
|
@ -85,7 +86,7 @@ class BaseFormNode(IFormNode):
|
|||
"chat_record_id": self.flow_params_serializer.data.get("chat_record_id"),
|
||||
'form_data': self.context.get('form_data', {}),
|
||||
"is_submit": self.context.get("is_submit", False)}
|
||||
form = f'<form_rander>{json.dumps(form_setting,ensure_ascii=False)}</form_rander>'
|
||||
form = f'<form_rander>{json.dumps(form_setting, ensure_ascii=False)}</form_rander>'
|
||||
context = self.workflow_manage.get_workflow_content()
|
||||
form_content_format = self.workflow_manage.reset_prompt(form_content_format)
|
||||
prompt_template = PromptTemplate.from_template(form_content_format, template_format='jinja2')
|
||||
|
|
|
|||
|
|
@ -113,7 +113,8 @@ def valid_function(function_lib, user_id):
|
|||
class BaseFunctionLibNodeNode(IFunctionLibNode):
|
||||
def save_context(self, details, workflow_manage):
|
||||
self.context['result'] = details.get('result')
|
||||
self.answer_text = str(details.get('result'))
|
||||
if self.node_params.get('is_result'):
|
||||
self.answer_text = str(details.get('result'))
|
||||
|
||||
def execute(self, function_lib_id, input_field_list, **kwargs) -> NodeResult:
|
||||
function_lib = QuerySet(FunctionLib).filter(id=function_lib_id).first()
|
||||
|
|
|
|||
|
|
@ -84,7 +84,8 @@ def convert_value(name: str, value, _type, is_required, source, node):
|
|||
class BaseFunctionNodeNode(IFunctionNode):
|
||||
def save_context(self, details, workflow_manage):
|
||||
self.context['result'] = details.get('result')
|
||||
self.answer_text = str(details.get('result'))
|
||||
if self.node_params.get('is_result', False):
|
||||
self.answer_text = str(details.get('result'))
|
||||
|
||||
def execute(self, input_field_list, code, **kwargs) -> NodeResult:
|
||||
params = {field.get('name'): convert_value(field.get('name'), field.get('value'), field.get('type'),
|
||||
|
|
|
|||
|
|
@ -16,7 +16,8 @@ class BaseImageGenerateNode(IImageGenerateNode):
|
|||
def save_context(self, details, workflow_manage):
|
||||
self.context['answer'] = details.get('answer')
|
||||
self.context['question'] = details.get('question')
|
||||
self.answer_text = details.get('answer')
|
||||
if self.node_params.get('is_result', False):
|
||||
self.answer_text = details.get('answer')
|
||||
|
||||
def execute(self, model_id, prompt, negative_prompt, dialogue_number, dialogue_type, history_chat_record, chat_id,
|
||||
model_params_setting,
|
||||
|
|
@ -24,7 +25,8 @@ class BaseImageGenerateNode(IImageGenerateNode):
|
|||
**kwargs) -> NodeResult:
|
||||
print(model_params_setting)
|
||||
application = self.workflow_manage.work_flow_post_handler.chat_info.application
|
||||
tti_model = get_model_instance_by_model_user_id(model_id, self.flow_params_serializer.data.get('user_id'), **model_params_setting)
|
||||
tti_model = get_model_instance_by_model_user_id(model_id, self.flow_params_serializer.data.get('user_id'),
|
||||
**model_params_setting)
|
||||
history_message = self.get_history_message(history_chat_record, dialogue_number)
|
||||
self.context['history_message'] = history_message
|
||||
question = self.generate_prompt_question(prompt)
|
||||
|
|
|
|||
|
|
@ -69,7 +69,8 @@ class BaseImageUnderstandNode(IImageUnderstandNode):
|
|||
def save_context(self, details, workflow_manage):
|
||||
self.context['answer'] = details.get('answer')
|
||||
self.context['question'] = details.get('question')
|
||||
self.answer_text = details.get('answer')
|
||||
if self.node_params.get('is_result', False):
|
||||
self.answer_text = details.get('answer')
|
||||
|
||||
def execute(self, model_id, system, prompt, dialogue_number, dialogue_type, history_chat_record, stream, chat_id,
|
||||
model_params_setting,
|
||||
|
|
|
|||
|
|
@ -14,7 +14,8 @@ class BaseMcpNode(IMcpNode):
|
|||
self.context['result'] = details.get('result')
|
||||
self.context['tool_params'] = details.get('tool_params')
|
||||
self.context['mcp_tool'] = details.get('mcp_tool')
|
||||
self.answer_text = details.get('result')
|
||||
if self.node_params.get('is_result', False):
|
||||
self.answer_text = details.get('result')
|
||||
|
||||
def execute(self, mcp_servers, mcp_server, mcp_tool, tool_params, **kwargs) -> NodeResult:
|
||||
servers = json.loads(mcp_servers)
|
||||
|
|
@ -27,7 +28,8 @@ class BaseMcpNode(IMcpNode):
|
|||
return s
|
||||
|
||||
res = asyncio.run(call_tool(servers, mcp_server, mcp_tool, params))
|
||||
return NodeResult({'result': [content.text for content in res.content], 'tool_params': params, 'mcp_tool': mcp_tool}, {})
|
||||
return NodeResult(
|
||||
{'result': [content.text for content in res.content], 'tool_params': params, 'mcp_tool': mcp_tool}, {})
|
||||
|
||||
def handle_variables(self, tool_params):
|
||||
# 处理参数中的变量
|
||||
|
|
|
|||
|
|
@ -80,7 +80,8 @@ class BaseQuestionNode(IQuestionNode):
|
|||
self.context['answer'] = details.get('answer')
|
||||
self.context['message_tokens'] = details.get('message_tokens')
|
||||
self.context['answer_tokens'] = details.get('answer_tokens')
|
||||
self.answer_text = details.get('answer')
|
||||
if self.node_params.get('is_result', False):
|
||||
self.answer_text = details.get('answer')
|
||||
|
||||
def execute(self, model_id, system, prompt, dialogue_number, history_chat_record, stream, chat_id, chat_record_id,
|
||||
model_params_setting=None,
|
||||
|
|
|
|||
|
|
@ -18,7 +18,8 @@ class BaseSpeechToTextNode(ISpeechToTextNode):
|
|||
|
||||
def save_context(self, details, workflow_manage):
|
||||
self.context['answer'] = details.get('answer')
|
||||
self.answer_text = details.get('answer')
|
||||
if self.node_params.get('is_result', False):
|
||||
self.answer_text = details.get('answer')
|
||||
|
||||
def execute(self, stt_model_id, chat_id, audio, **kwargs) -> NodeResult:
|
||||
stt_model = get_model_instance_by_model_user_id(stt_model_id, self.flow_params_serializer.data.get('user_id'))
|
||||
|
|
|
|||
|
|
@ -37,7 +37,8 @@ def bytes_to_uploaded_file(file_bytes, file_name="generated_audio.mp3"):
|
|||
class BaseTextToSpeechNode(ITextToSpeechNode):
|
||||
def save_context(self, details, workflow_manage):
|
||||
self.context['answer'] = details.get('answer')
|
||||
self.answer_text = details.get('answer')
|
||||
if self.node_params.get('is_result', False):
|
||||
self.answer_text = details.get('answer')
|
||||
|
||||
def execute(self, tts_model_id, chat_id,
|
||||
content, model_params_setting=None,
|
||||
|
|
|
|||
Loading…
Reference in New Issue