diff --git a/apps/application/flow/step_node/ai_chat_step_node/impl/base_chat_node.py b/apps/application/flow/step_node/ai_chat_step_node/impl/base_chat_node.py
index c5a0de1a1..8d576d416 100644
--- a/apps/application/flow/step_node/ai_chat_step_node/impl/base_chat_node.py
+++ b/apps/application/flow/step_node/ai_chat_step_node/impl/base_chat_node.py
@@ -40,6 +40,7 @@ tool_message_template = """
"""
+
def _write_context(node_variable: Dict, workflow_variable: Dict, node: INode, workflow, answer: str,
reasoning_content: str):
chat_model = node_variable.get('chat_model')
@@ -102,7 +103,6 @@ def write_context_stream(node_variable: Dict, workflow_variable: Dict, node: INo
_write_context(node_variable, workflow_variable, node, workflow, answer, reasoning_content)
-
async def _yield_mcp_response(chat_model, message_list, mcp_servers):
async with MultiServerMCPClient(json.loads(mcp_servers)) as client:
agent = create_react_agent(chat_model, client.get_tools())
@@ -115,6 +115,7 @@ async def _yield_mcp_response(chat_model, message_list, mcp_servers):
if isinstance(chunk[0], AIMessageChunk):
yield chunk[0]
+
def mcp_response_generator(chat_model, message_list, mcp_servers):
loop = asyncio.new_event_loop()
try:
@@ -130,6 +131,7 @@ def mcp_response_generator(chat_model, message_list, mcp_servers):
finally:
loop.close()
+
async def anext_async(agen):
return await agen.__anext__()
@@ -186,7 +188,8 @@ class BaseChatNode(IChatNode):
self.context['answer'] = details.get('answer')
self.context['question'] = details.get('question')
self.context['reasoning_content'] = details.get('reasoning_content')
- self.answer_text = details.get('answer')
+ if self.node_params.get('is_result', False):
+ self.answer_text = details.get('answer')
def execute(self, model_id, system, prompt, dialogue_number, history_chat_record, stream, chat_id, chat_record_id,
model_params_setting=None,
diff --git a/apps/application/flow/step_node/application_node/impl/base_application_node.py b/apps/application/flow/step_node/application_node/impl/base_application_node.py
index d962f7163..34d485d44 100644
--- a/apps/application/flow/step_node/application_node/impl/base_application_node.py
+++ b/apps/application/flow/step_node/application_node/impl/base_application_node.py
@@ -168,7 +168,8 @@ class BaseApplicationNode(IApplicationNode):
self.context['question'] = details.get('question')
self.context['type'] = details.get('type')
self.context['reasoning_content'] = details.get('reasoning_content')
- self.answer_text = details.get('answer')
+ if self.node_params.get('is_result', False):
+ self.answer_text = details.get('answer')
def execute(self, application_id, message, chat_id, chat_record_id, stream, re_chat, client_id, client_type,
app_document_list=None, app_image_list=None, app_audio_list=None, child_node=None, node_data=None,
diff --git a/apps/application/flow/step_node/direct_reply_node/impl/base_reply_node.py b/apps/application/flow/step_node/direct_reply_node/impl/base_reply_node.py
index 6a51edd6b..1d3115e4c 100644
--- a/apps/application/flow/step_node/direct_reply_node/impl/base_reply_node.py
+++ b/apps/application/flow/step_node/direct_reply_node/impl/base_reply_node.py
@@ -15,7 +15,9 @@ from application.flow.step_node.direct_reply_node.i_reply_node import IReplyNode
class BaseReplyNode(IReplyNode):
def save_context(self, details, workflow_manage):
self.context['answer'] = details.get('answer')
- self.answer_text = details.get('answer')
+ if self.node_params.get('is_result', False):
+ self.answer_text = details.get('answer')
+
def execute(self, reply_type, stream, fields=None, content=None, **kwargs) -> NodeResult:
if reply_type == 'referencing':
result = self.get_reference_content(fields)
diff --git a/apps/application/flow/step_node/form_node/impl/base_form_node.py b/apps/application/flow/step_node/form_node/impl/base_form_node.py
index 7cbbe9cc1..dcf35dd3c 100644
--- a/apps/application/flow/step_node/form_node/impl/base_form_node.py
+++ b/apps/application/flow/step_node/form_node/impl/base_form_node.py
@@ -38,7 +38,8 @@ class BaseFormNode(IFormNode):
self.context['start_time'] = details.get('start_time')
self.context['form_data'] = form_data
self.context['is_submit'] = details.get('is_submit')
- self.answer_text = details.get('result')
+ if self.node_params.get('is_result', False):
+ self.answer_text = details.get('result')
if form_data is not None:
for key in form_data:
self.context[key] = form_data[key]
@@ -70,7 +71,7 @@ class BaseFormNode(IFormNode):
"chat_record_id": self.flow_params_serializer.data.get("chat_record_id"),
'form_data': self.context.get('form_data', {}),
"is_submit": self.context.get("is_submit", False)}
- form = f'{json.dumps(form_setting,ensure_ascii=False)}'
+ form = f'{json.dumps(form_setting, ensure_ascii=False)}'
context = self.workflow_manage.get_workflow_content()
form_content_format = self.workflow_manage.reset_prompt(form_content_format)
prompt_template = PromptTemplate.from_template(form_content_format, template_format='jinja2')
@@ -85,7 +86,7 @@ class BaseFormNode(IFormNode):
"chat_record_id": self.flow_params_serializer.data.get("chat_record_id"),
'form_data': self.context.get('form_data', {}),
"is_submit": self.context.get("is_submit", False)}
- form = f'{json.dumps(form_setting,ensure_ascii=False)}'
+ form = f'{json.dumps(form_setting, ensure_ascii=False)}'
context = self.workflow_manage.get_workflow_content()
form_content_format = self.workflow_manage.reset_prompt(form_content_format)
prompt_template = PromptTemplate.from_template(form_content_format, template_format='jinja2')
diff --git a/apps/application/flow/step_node/function_lib_node/impl/base_function_lib_node.py b/apps/application/flow/step_node/function_lib_node/impl/base_function_lib_node.py
index d21424f75..9ced4a8b4 100644
--- a/apps/application/flow/step_node/function_lib_node/impl/base_function_lib_node.py
+++ b/apps/application/flow/step_node/function_lib_node/impl/base_function_lib_node.py
@@ -113,7 +113,8 @@ def valid_function(function_lib, user_id):
class BaseFunctionLibNodeNode(IFunctionLibNode):
def save_context(self, details, workflow_manage):
self.context['result'] = details.get('result')
- self.answer_text = str(details.get('result'))
+ if self.node_params.get('is_result'):
+ self.answer_text = str(details.get('result'))
def execute(self, function_lib_id, input_field_list, **kwargs) -> NodeResult:
function_lib = QuerySet(FunctionLib).filter(id=function_lib_id).first()
diff --git a/apps/application/flow/step_node/function_node/impl/base_function_node.py b/apps/application/flow/step_node/function_node/impl/base_function_node.py
index 4a5c75c81..1d12a2f9b 100644
--- a/apps/application/flow/step_node/function_node/impl/base_function_node.py
+++ b/apps/application/flow/step_node/function_node/impl/base_function_node.py
@@ -84,7 +84,8 @@ def convert_value(name: str, value, _type, is_required, source, node):
class BaseFunctionNodeNode(IFunctionNode):
def save_context(self, details, workflow_manage):
self.context['result'] = details.get('result')
- self.answer_text = str(details.get('result'))
+ if self.node_params.get('is_result', False):
+ self.answer_text = str(details.get('result'))
def execute(self, input_field_list, code, **kwargs) -> NodeResult:
params = {field.get('name'): convert_value(field.get('name'), field.get('value'), field.get('type'),
diff --git a/apps/application/flow/step_node/image_generate_step_node/impl/base_image_generate_node.py b/apps/application/flow/step_node/image_generate_step_node/impl/base_image_generate_node.py
index d5cc2c5a2..16423eafd 100644
--- a/apps/application/flow/step_node/image_generate_step_node/impl/base_image_generate_node.py
+++ b/apps/application/flow/step_node/image_generate_step_node/impl/base_image_generate_node.py
@@ -16,7 +16,8 @@ class BaseImageGenerateNode(IImageGenerateNode):
def save_context(self, details, workflow_manage):
self.context['answer'] = details.get('answer')
self.context['question'] = details.get('question')
- self.answer_text = details.get('answer')
+ if self.node_params.get('is_result', False):
+ self.answer_text = details.get('answer')
def execute(self, model_id, prompt, negative_prompt, dialogue_number, dialogue_type, history_chat_record, chat_id,
model_params_setting,
@@ -24,7 +25,8 @@ class BaseImageGenerateNode(IImageGenerateNode):
**kwargs) -> NodeResult:
print(model_params_setting)
application = self.workflow_manage.work_flow_post_handler.chat_info.application
- tti_model = get_model_instance_by_model_user_id(model_id, self.flow_params_serializer.data.get('user_id'), **model_params_setting)
+ tti_model = get_model_instance_by_model_user_id(model_id, self.flow_params_serializer.data.get('user_id'),
+ **model_params_setting)
history_message = self.get_history_message(history_chat_record, dialogue_number)
self.context['history_message'] = history_message
question = self.generate_prompt_question(prompt)
diff --git a/apps/application/flow/step_node/image_understand_step_node/impl/base_image_understand_node.py b/apps/application/flow/step_node/image_understand_step_node/impl/base_image_understand_node.py
index 3b96f15cd..44765bc4f 100644
--- a/apps/application/flow/step_node/image_understand_step_node/impl/base_image_understand_node.py
+++ b/apps/application/flow/step_node/image_understand_step_node/impl/base_image_understand_node.py
@@ -69,7 +69,8 @@ class BaseImageUnderstandNode(IImageUnderstandNode):
def save_context(self, details, workflow_manage):
self.context['answer'] = details.get('answer')
self.context['question'] = details.get('question')
- self.answer_text = details.get('answer')
+ if self.node_params.get('is_result', False):
+ self.answer_text = details.get('answer')
def execute(self, model_id, system, prompt, dialogue_number, dialogue_type, history_chat_record, stream, chat_id,
model_params_setting,
diff --git a/apps/application/flow/step_node/mcp_node/impl/base_mcp_node.py b/apps/application/flow/step_node/mcp_node/impl/base_mcp_node.py
index 6c9fe97fc..e49ef7019 100644
--- a/apps/application/flow/step_node/mcp_node/impl/base_mcp_node.py
+++ b/apps/application/flow/step_node/mcp_node/impl/base_mcp_node.py
@@ -14,7 +14,8 @@ class BaseMcpNode(IMcpNode):
self.context['result'] = details.get('result')
self.context['tool_params'] = details.get('tool_params')
self.context['mcp_tool'] = details.get('mcp_tool')
- self.answer_text = details.get('result')
+ if self.node_params.get('is_result', False):
+ self.answer_text = details.get('result')
def execute(self, mcp_servers, mcp_server, mcp_tool, tool_params, **kwargs) -> NodeResult:
servers = json.loads(mcp_servers)
@@ -27,7 +28,8 @@ class BaseMcpNode(IMcpNode):
return s
res = asyncio.run(call_tool(servers, mcp_server, mcp_tool, params))
- return NodeResult({'result': [content.text for content in res.content], 'tool_params': params, 'mcp_tool': mcp_tool}, {})
+ return NodeResult(
+ {'result': [content.text for content in res.content], 'tool_params': params, 'mcp_tool': mcp_tool}, {})
def handle_variables(self, tool_params):
# 处理参数中的变量
diff --git a/apps/application/flow/step_node/question_node/impl/base_question_node.py b/apps/application/flow/step_node/question_node/impl/base_question_node.py
index 48a2639b7..e1fd5b860 100644
--- a/apps/application/flow/step_node/question_node/impl/base_question_node.py
+++ b/apps/application/flow/step_node/question_node/impl/base_question_node.py
@@ -80,7 +80,8 @@ class BaseQuestionNode(IQuestionNode):
self.context['answer'] = details.get('answer')
self.context['message_tokens'] = details.get('message_tokens')
self.context['answer_tokens'] = details.get('answer_tokens')
- self.answer_text = details.get('answer')
+ if self.node_params.get('is_result', False):
+ self.answer_text = details.get('answer')
def execute(self, model_id, system, prompt, dialogue_number, history_chat_record, stream, chat_id, chat_record_id,
model_params_setting=None,
diff --git a/apps/application/flow/step_node/speech_to_text_step_node/impl/base_speech_to_text_node.py b/apps/application/flow/step_node/speech_to_text_step_node/impl/base_speech_to_text_node.py
index c85588cd4..13b954e46 100644
--- a/apps/application/flow/step_node/speech_to_text_step_node/impl/base_speech_to_text_node.py
+++ b/apps/application/flow/step_node/speech_to_text_step_node/impl/base_speech_to_text_node.py
@@ -18,7 +18,8 @@ class BaseSpeechToTextNode(ISpeechToTextNode):
def save_context(self, details, workflow_manage):
self.context['answer'] = details.get('answer')
- self.answer_text = details.get('answer')
+ if self.node_params.get('is_result', False):
+ self.answer_text = details.get('answer')
def execute(self, stt_model_id, chat_id, audio, **kwargs) -> NodeResult:
stt_model = get_model_instance_by_model_user_id(stt_model_id, self.flow_params_serializer.data.get('user_id'))
diff --git a/apps/application/flow/step_node/text_to_speech_step_node/impl/base_text_to_speech_node.py b/apps/application/flow/step_node/text_to_speech_step_node/impl/base_text_to_speech_node.py
index 72c4d3be5..970447295 100644
--- a/apps/application/flow/step_node/text_to_speech_step_node/impl/base_text_to_speech_node.py
+++ b/apps/application/flow/step_node/text_to_speech_step_node/impl/base_text_to_speech_node.py
@@ -37,7 +37,8 @@ def bytes_to_uploaded_file(file_bytes, file_name="generated_audio.mp3"):
class BaseTextToSpeechNode(ITextToSpeechNode):
def save_context(self, details, workflow_manage):
self.context['answer'] = details.get('answer')
- self.answer_text = details.get('answer')
+ if self.node_params.get('is_result', False):
+ self.answer_text = details.get('answer')
def execute(self, tts_model_id, chat_id,
content, model_params_setting=None,