mirror of
https://github.com/1Panel-dev/MaxKB.git
synced 2025-12-26 01:33:05 +00:00
fix: Loop node embedding sub application error (#4050)
This commit is contained in:
parent
52f420df58
commit
ab93ac7c4e
|
|
@ -55,7 +55,7 @@ def write_context_stream(node_variable: Dict, workflow_variable: Dict, node: INo
|
|||
# 先把流转成字符串
|
||||
response_content = chunk.decode('utf-8')[6:]
|
||||
response_content = json.loads(response_content)
|
||||
content = response_content.get('content', '')
|
||||
content = (response_content.get('content', '') or '')
|
||||
runtime_node_id = response_content.get('runtime_node_id', '')
|
||||
chat_record_id = response_content.get('chat_record_id', '')
|
||||
child_node = response_content.get('child_node')
|
||||
|
|
@ -63,7 +63,7 @@ def write_context_stream(node_variable: Dict, workflow_variable: Dict, node: INo
|
|||
node_type = response_content.get('node_type')
|
||||
real_node_id = response_content.get('real_node_id')
|
||||
node_is_end = response_content.get('node_is_end', False)
|
||||
_reasoning_content = response_content.get('reasoning_content', '')
|
||||
_reasoning_content = (response_content.get('reasoning_content', '') or '')
|
||||
if node_type == 'form-node':
|
||||
is_interrupt_exec = True
|
||||
answer += content
|
||||
|
|
|
|||
|
|
@ -159,8 +159,8 @@ def loop(workflow_manage_new_instance, node: INode, generate_loop):
|
|||
'runtime_node_id': runtime_node_id,
|
||||
'chat_record_id': chat_record_id,
|
||||
'child_node': child_node}
|
||||
content_chunk = chunk.get('content', '')
|
||||
reasoning_content_chunk = chunk.get('reasoning_content', '')
|
||||
content_chunk = (chunk.get('content', '') or '')
|
||||
reasoning_content_chunk = (chunk.get('reasoning_content', '') or '')
|
||||
reasoning_content += reasoning_content_chunk
|
||||
answer += content_chunk
|
||||
yield chunk
|
||||
|
|
|
|||
|
|
@ -381,6 +381,7 @@ class WorkflowManage:
|
|||
child_node = {}
|
||||
node_is_end = False
|
||||
view_type = current_node.view_type
|
||||
node_type = current_node.type
|
||||
if isinstance(r, dict):
|
||||
content = r.get('content')
|
||||
child_node = {'runtime_node_id': r.get('runtime_node_id'),
|
||||
|
|
@ -390,6 +391,8 @@ class WorkflowManage:
|
|||
real_node_id = r.get('real_node_id')
|
||||
if r.__contains__('node_is_end'):
|
||||
node_is_end = r.get('node_is_end')
|
||||
if r.__contains__('node_type'):
|
||||
node_type = r.get("node_type")
|
||||
view_type = r.get('view_type')
|
||||
reasoning_content = r.get('reasoning_content')
|
||||
chunk = self.base_to_response.to_stream_chunk_response(self.params['chat_id'],
|
||||
|
|
@ -397,7 +400,7 @@ class WorkflowManage:
|
|||
current_node.id,
|
||||
current_node.up_node_id_list,
|
||||
content, False, 0, 0,
|
||||
{'node_type': current_node.type,
|
||||
{'node_type': node_type,
|
||||
'runtime_node_id': runtime_node_id,
|
||||
'view_type': view_type,
|
||||
'child_node': child_node,
|
||||
|
|
|
|||
|
|
@ -383,6 +383,9 @@ class AppNodeModel extends HtmlResize.model {
|
|||
this.sourceRules.push({
|
||||
message: t('views.applicationWorkflow.tip.notRecyclable'),
|
||||
validate: (sourceNode: any, targetNode: any, sourceAnchor: any, targetAnchor: any) => {
|
||||
if (targetNode.id == sourceNode.id) {
|
||||
return false
|
||||
}
|
||||
const up_node_list = this.graphModel.getNodeIncomingNode(targetNode.id)
|
||||
const is_c = up_node_list.find((up_node) => up_node.id == sourceNode.id)
|
||||
return !is_c && !isLoop(sourceNode.id, targetNode.id)
|
||||
|
|
|
|||
|
|
@ -466,9 +466,9 @@ export const loopBodyNode = {
|
|||
type: WorkflowType.LoopBodyNode,
|
||||
text: t('views.applicationWorkflow.nodes.loopBodyNode.text', '循环体'),
|
||||
label: t('views.applicationWorkflow.nodes.loopBodyNode.label', '循环体'),
|
||||
height: 600,
|
||||
height: 1080,
|
||||
properties: {
|
||||
width: 1800,
|
||||
width: 1920,
|
||||
stepName: t('views.applicationWorkflow.nodes.loopBodyNode.label', '循环体'),
|
||||
config: {
|
||||
fields: [],
|
||||
|
|
@ -550,11 +550,14 @@ export const applicationLoopMenuNodes = [
|
|||
label: t('views.applicationWorkflow.nodes.classify.aiCapability'),
|
||||
list: [
|
||||
aiChatNode,
|
||||
intentNode,
|
||||
questionNode,
|
||||
imageGenerateNode,
|
||||
imageUnderstandNode,
|
||||
textToSpeechNode,
|
||||
speechToTextNode,
|
||||
textToVideoNode,
|
||||
imageToVideoNode,
|
||||
],
|
||||
},
|
||||
{ label: t('views.knowledge.title'), list: [searchKnowledgeNode, rerankerNode] },
|
||||
|
|
|
|||
|
|
@ -85,6 +85,7 @@ const renderGraphData = (data?: any) => {
|
|||
AppEdge,
|
||||
loopEdge,
|
||||
])
|
||||
|
||||
lf.value.setDefaultEdgeType('app-edge')
|
||||
|
||||
lf.value.render(data ? data : {})
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@
|
|||
>
|
||||
<div v-resize="resizeStepContainer">
|
||||
<div class="flex-between">
|
||||
<div class="flex align-center" style="width: 600px">
|
||||
<div class="flex align-center">
|
||||
<component
|
||||
:is="iconComponent(`${nodeModel.type}-icon`)"
|
||||
class="mr-8"
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
<template>
|
||||
<LoopBodyContainer :nodeModel="nodeModel">
|
||||
<div ref="containerRef" @wheel.stop style="height: 550px"></div>
|
||||
<div ref="containerRef" @wheel.stop style="height: 600px"></div>
|
||||
</LoopBodyContainer>
|
||||
</template>
|
||||
<script setup lang="ts">
|
||||
|
|
|
|||
Loading…
Reference in New Issue